1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
20 from netaddr import IPAddress
22 from vpplib.VPPUtil import VPPUtil
23 from vpplib.VppPCIUtil import VppPCIUtil
24 from vpplib.VppHugePageUtil import VppHugePageUtil
25 from vpplib.CpuUtils import CpuUtils
26 from vpplib.VppGrubUtil import VppGrubUtil
27 from vpplib.QemuUtils import QemuUtils
29 __all__ = ["AutoConfig"]
33 MIN_TOTAL_HUGE_PAGES = 1024
34 MAX_PERCENT_FOR_HUGE_PAGES = 70
37 class AutoConfig(object):
38 """Auto Configuration Tools"""
40 def __init__(self, rootdir, filename, clean=False):
42 The Auto Configure class.
44 :param rootdir: The root directory for all the auto configuration files
45 :param filename: The autoconfiguration file
46 :param clean: When set initialize the nodes from the auto-config file
51 self._autoconfig_filename = rootdir + filename
52 self._rootdir = rootdir
55 self._vpp_devices_node = {}
56 self._hugepage_config = ""
62 Returns the nodes dictionary.
71 def _autoconfig_backup_file(filename):
75 :param filename: The file to backup
79 # Does a copy of the file exist, if not create one
80 ofile = filename + '.orig'
81 (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
84 if stdout.strip('\n') != ofile:
85 cmd = 'sudo cp {} {}'.format(filename, ofile)
86 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
90 # noinspection PyBroadException
94 Asks the user for a number within a range.
95 default is returned if return is entered.
97 :returns: IP address with cidr
102 answer = raw_input("Please enter the IPv4 Address [n.n.n.n/n]: ")
104 ipinput = answer.split('/')
105 ipaddr = IPAddress(ipinput[0])
107 plen = answer.split('/')[1]
109 answer = raw_input("Please enter the netmask [n.n.n.n]: ")
110 plen = IPAddress(answer).netmask_bits()
111 return '{}/{}'.format(ipaddr, plen)
113 print "Please enter a valid IPv4 address."
117 def _ask_user_range(question, first, last, default):
119 Asks the user for a number within a range.
120 default is returned if return is entered.
122 :param question: Text of a question.
123 :param first: First number in the range
124 :param last: Last number in the range
125 :param default: The value returned when return is entered
126 :type question: string
130 :returns: The answer to the question
135 answer = raw_input(question)
139 if re.findall(r'[0-9+]', answer):
140 if int(answer) in range(first, last + 1):
143 print "Please a value between {} and {} or Return.". \
146 print "Please a number between {} and {} or Return.". \
152 def _ask_user_yn(question, default):
154 Asks the user for a yes or no question.
156 :param question: Text of a question.
157 :param default: The value returned when return is entered
158 :type question: string
159 :type default: string
160 :returns: The answer to the question
165 default = default.lower()
167 while not input_valid:
168 answer = raw_input(question)
171 if re.findall(r'[YyNn]', answer):
173 answer = answer[0].lower()
175 print "Please answer Y, N or Return."
179 def _loadconfig(self):
181 Load the testbed configuration, given the auto configuration file.
185 # Get the Topology, from the topology layout file
187 with open(self._autoconfig_filename, 'r') as stream:
189 topo = yaml.load(stream)
190 if 'metadata' in topo:
191 self._metadata = topo['metadata']
192 except yaml.YAMLError as exc:
193 raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc))
195 systemfile = self._rootdir + self._metadata['system_config_file']
196 if self._clean is False and os.path.isfile(systemfile):
197 with open(systemfile, 'r') as sysstream:
199 systopo = yaml.load(sysstream)
200 if 'nodes' in systopo:
201 self._nodes = systopo['nodes']
202 except yaml.YAMLError as sysexc:
203 raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc))
205 # Get the nodes from Auto Config
207 self._nodes = topo['nodes']
209 # Set the root directory in all the nodes
210 for i in self._nodes.items():
212 node['rootdir'] = self._rootdir
214 def updateconfig(self):
216 Update the testbed configuration, given the auto configuration file.
217 We will write the system configuration file with the current node
222 # Initialize the yaml data
223 ydata = {'metadata': self._metadata, 'nodes': self._nodes}
225 # Write the system config file
226 filename = self._rootdir + self._metadata['system_config_file']
227 with open(filename, 'w') as yamlfile:
228 yaml.dump(ydata, yamlfile)
230 def _update_auto_config(self):
232 Write the auto configuration file with the new configuration data,
237 # Initialize the yaml data
239 with open(self._autoconfig_filename, 'r') as stream:
241 ydata = yaml.load(stream)
243 nodes = ydata['nodes']
244 except yaml.YAMLError as exc:
248 for i in nodes.items():
253 node['interfaces'] = {}
254 for item in self._nodes[key]['interfaces'].items():
258 node['interfaces'][port] = {}
259 addr = '{}'.format(interface['pci_address'])
260 node['interfaces'][port]['pci_address'] = addr
261 if 'mac_address' in interface:
262 node['interfaces'][port]['mac_address'] = \
263 interface['mac_address']
265 if 'total_other_cpus' in self._nodes[key]['cpu']:
266 node['cpu']['total_other_cpus'] = \
267 self._nodes[key]['cpu']['total_other_cpus']
268 if 'total_vpp_cpus' in self._nodes[key]['cpu']:
269 node['cpu']['total_vpp_cpus'] = \
270 self._nodes[key]['cpu']['total_vpp_cpus']
271 if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
272 node['cpu']['reserve_vpp_main_core'] = \
273 self._nodes[key]['cpu']['reserve_vpp_main_core']
276 if 'active_open_sessions' in self._nodes[key]['tcp']:
277 node['tcp']['active_open_sessions'] = \
278 self._nodes[key]['tcp']['active_open_sessions']
279 if 'passive_open_sessions' in self._nodes[key]['tcp']:
280 node['tcp']['passive_open_sessions'] = \
281 self._nodes[key]['tcp']['passive_open_sessions']
284 node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
286 # Write the auto config config file
287 with open(self._autoconfig_filename, 'w') as yamlfile:
288 yaml.dump(ydata, yamlfile)
290 def apply_huge_pages(self):
292 Apply the huge page config
296 for i in self._nodes.items():
299 hpg = VppHugePageUtil(node)
300 hpg.hugepages_dryrun_apply()
303 def _apply_vpp_unix(node):
305 Apply the VPP Unix config
307 :param node: Node dictionary with cpuinfo.
312 if 'unix' not in node['vpp']:
315 unixv = node['vpp']['unix']
316 if 'interactive' in unixv:
317 interactive = unixv['interactive']
318 if interactive is True:
319 unix = ' interactive\n'
321 return unix.rstrip('\n')
324 def _apply_vpp_cpu(node):
326 Apply the VPP cpu config
328 :param node: Node dictionary with cpuinfo.
334 if 'vpp_main_core' in node['cpu']:
335 vpp_main_core = node['cpu']['vpp_main_core']
338 if vpp_main_core is not 0:
339 cpu += ' main-core {}\n'.format(vpp_main_core)
342 vpp_workers = node['cpu']['vpp_workers']
343 vpp_worker_len = len(vpp_workers)
344 if vpp_worker_len > 0:
346 for i, worker in enumerate(vpp_workers):
348 vpp_worker_str += ','
349 if worker[0] == worker[1]:
350 vpp_worker_str += "{}".format(worker[0])
352 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
354 cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
359 def _apply_vpp_devices(node):
361 Apply VPP PCI Device configuration to vpp startup.
363 :param node: Node dictionary with cpuinfo.
368 ports_per_numa = node['cpu']['ports_per_numa']
369 total_mbufs = node['cpu']['total_mbufs']
371 for item in ports_per_numa.items():
373 interfaces = value['interfaces']
375 # if 0 was specified for the number of vpp workers, use 1 queue
378 if 'rx_queues' in value:
379 num_rx_queues = value['rx_queues']
380 if 'tx_queues' in value:
381 num_tx_queues = value['tx_queues']
386 # Create the devices string
387 for interface in interfaces:
388 pci_address = interface['pci_address']
389 pci_address = pci_address.lstrip("'").rstrip("'")
391 devices += ' dev {} {{ \n'.format(pci_address)
393 devices += ' num-rx-queues {}\n'.format(num_rx_queues)
395 devices += ' num-rx-queues {}\n'.format(1)
397 devices += ' num-tx-queues {}\n'.format(num_tx_queues)
399 devices += ' num-rx-desc {}\n'.format(num_rx_desc)
401 devices += ' num-tx-desc {}\n'.format(num_tx_desc)
404 if total_mbufs is not 0:
405 devices += '\n num-mbufs {}'.format(total_mbufs)
410 def _calc_vpp_workers(node, vpp_workers, numa_node,
411 other_cpus_end, total_vpp_workers,
412 reserve_vpp_main_core):
414 Calculate the VPP worker information
416 :param node: Node dictionary
417 :param vpp_workers: List of VPP workers
418 :param numa_node: Numa node
419 :param other_cpus_end: The end of the cpus allocated for cores
421 :param total_vpp_workers: The number of vpp workers needed
422 :param reserve_vpp_main_core: Is there a core needed for
426 :type other_cpus_end: int
427 :type total_vpp_workers: int
428 :type reserve_vpp_main_core: bool
429 :returns: Is a core still needed for the vpp main core
433 # Can we fit the workers in one of these slices
434 cpus = node['cpu']['cpus_per_node'][numa_node]
438 if start <= other_cpus_end:
439 start = other_cpus_end + 1
441 if reserve_vpp_main_core:
444 workers_end = start + total_vpp_workers - 1
445 if workers_end <= end:
446 if reserve_vpp_main_core:
447 node['cpu']['vpp_main_core'] = start - 1
448 reserve_vpp_main_core = False
449 if total_vpp_workers:
450 vpp_workers.append((start, workers_end))
453 # We still need to reserve the main core
454 if reserve_vpp_main_core:
455 node['cpu']['vpp_main_core'] = other_cpus_end + 1
457 return reserve_vpp_main_core
460 def _calc_desc_and_queues(total_numa_nodes,
461 total_ports_per_numa,
463 ports_per_numa_value):
465 Calculate the number of descriptors and queues
467 :param total_numa_nodes: The total number of numa nodes
468 :param total_ports_per_numa: The total number of ports for this
470 :param total_vpp_cpus: The total number of cpus to allocate for vpp
471 :param ports_per_numa_value: The value from the ports_per_numa
473 :type total_numa_nodes: int
474 :type total_ports_per_numa: int
475 :type total_vpp_cpus: int
476 :type ports_per_numa_value: dict
477 :returns The total number of message buffers
478 :returns: The total number of vpp workers
483 # Get the total vpp workers
484 total_vpp_workers = total_vpp_cpus
485 ports_per_numa_value['total_vpp_workers'] = total_vpp_workers
487 # Get the number of rx queues
488 rx_queues = max(1, total_vpp_workers)
489 tx_queues = total_vpp_workers * total_numa_nodes + 1
491 # Get the descriptor entries
493 ports_per_numa_value['rx_queues'] = rx_queues
494 total_mbufs = (((rx_queues * desc_entries) +
495 (tx_queues * desc_entries)) *
496 total_ports_per_numa)
497 total_mbufs = total_mbufs
499 return total_mbufs, total_vpp_workers
502 def _create_ports_per_numa(node, interfaces):
504 Create a dictionary or ports per numa node
505 :param node: Node dictionary
506 :param interfaces: All the interfaces to be used by vpp
508 :type interfaces: dict
509 :returns: The ports per numa dictionary
513 # Make a list of ports by numa node
515 for item in interfaces.items():
517 if i['numa_node'] not in ports_per_numa:
518 ports_per_numa[i['numa_node']] = {'interfaces': []}
519 ports_per_numa[i['numa_node']]['interfaces'].append(i)
521 ports_per_numa[i['numa_node']]['interfaces'].append(i)
522 node['cpu']['ports_per_numa'] = ports_per_numa
524 return ports_per_numa
526 def calculate_cpu_parameters(self):
528 Calculate the cpu configuration.
532 # Calculate the cpu parameters, needed for the
533 # vpp_startup and grub configuration
534 for i in self._nodes.items():
537 # get total number of nic ports
538 interfaces = node['interfaces']
540 # Make a list of ports by numa node
541 ports_per_numa = self._create_ports_per_numa(node, interfaces)
543 # Get the number of cpus to skip, we never use the first cpu
545 other_cpus_end = other_cpus_start + \
546 node['cpu']['total_other_cpus'] - 1
548 if other_cpus_end is not 0:
549 other_workers = (other_cpus_start, other_cpus_end)
550 node['cpu']['other_workers'] = other_workers
552 # Allocate the VPP main core and workers
554 reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
555 total_vpp_cpus = node['cpu']['total_vpp_cpus']
557 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
558 # then we shouldn't get workers
559 total_with_main = total_vpp_cpus
560 if reserve_vpp_main_core:
563 if total_with_main is not 0:
564 for item in ports_per_numa.items():
568 # Get the number of descriptors and queues
569 mbufs, total_vpp_workers = self._calc_desc_and_queues(
571 len(value['interfaces']), total_vpp_cpus, value)
574 # Get the VPP workers
575 reserve_vpp_main_core = self._calc_vpp_workers(
576 node, vpp_workers, numa_node, other_cpus_end,
577 total_vpp_workers, reserve_vpp_main_core)
580 total_mbufs = int(total_mbufs)
585 node['cpu']['vpp_workers'] = vpp_workers
586 node['cpu']['total_mbufs'] = total_mbufs
592 def _apply_vpp_tcp(node):
594 Apply the VPP Unix config
596 :param node: Node dictionary with cpuinfo.
600 active_open_sessions = node['tcp']['active_open_sessions']
601 aos = int(active_open_sessions)
603 passive_open_sessions = node['tcp']['passive_open_sessions']
604 pos = int(passive_open_sessions)
606 # Generate the api-segment gid vpp sheit in any case
608 tcp = "api-segment {\n"
609 tcp = tcp + " gid vpp\n"
611 return tcp.rstrip('\n')
613 tcp = "# TCP stack-related configuration parameters\n"
614 tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos)
615 tcp = tcp + "heapsize 4g\n\n"
616 tcp = tcp + "api-segment {\n"
617 tcp = tcp + " global-size 2000M\n"
618 tcp = tcp + " api-size 1G\n"
621 tcp = tcp + "session {\n"
622 tcp = tcp + " event-queue-length " + "{:d}".format(aos + pos) + "\n"
623 tcp = tcp + " preallocated-sessions " + "{:d}".format(aos + pos) + "\n"
624 tcp = tcp + " v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
625 tcp = tcp + " v4-session-table-memory 3g\n"
627 tcp = tcp + " v4-halfopen-table-buckets " + \
628 "{:d}".format((aos + pos) / 4) + "\n"
629 tcp = tcp + " v4-halfopen-table-memory 3g\n"
632 tcp = tcp + "tcp {\n"
633 tcp = tcp + " preallocated-connections " + "{:d}".format(aos + pos) + "\n"
635 tcp = tcp + " preallocated-half-open-connections " + "{:d}".format(aos) + "\n"
636 tcp = tcp + " local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
637 tcp = tcp + " local-endpoints-table-memory 3g\n"
640 return tcp.rstrip('\n')
642 def apply_vpp_startup(self):
644 Apply the vpp startup configration
648 # Apply the VPP startup configruation
649 for i in self._nodes.items():
652 # Get the startup file
653 rootdir = node['rootdir']
654 sfile = rootdir + node['vpp']['startup_config_file']
657 devices = self._apply_vpp_devices(node)
660 cpu = self._apply_vpp_cpu(node)
662 # Get the unix config
663 unix = self._apply_vpp_unix(node)
665 # Get the TCP configuration, if any
666 tcp = self._apply_vpp_tcp(node)
668 # Make a backup if needed
669 self._autoconfig_backup_file(sfile)
672 tfile = sfile + '.template'
673 (ret, stdout, stderr) = \
674 VPPUtil.exec_command('cat {}'.format(tfile))
676 raise RuntimeError('Executing cat command failed to node {}'.
677 format(node['host']))
678 startup = stdout.format(unix=unix,
683 (ret, stdout, stderr) = \
684 VPPUtil.exec_command('rm {}'.format(sfile))
686 logging.debug(stderr)
688 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
689 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
691 raise RuntimeError('Writing config failed node {}'.
692 format(node['host']))
694 def apply_grub_cmdline(self):
696 Apply the grub cmdline
700 for i in self._nodes.items():
703 # Get the isolated CPUs
704 other_workers = node['cpu']['other_workers']
705 vpp_workers = node['cpu']['vpp_workers']
706 if 'vpp_main_core' in node['cpu']:
707 vpp_main_core = node['cpu']['vpp_main_core']
711 if other_workers is not None:
712 all_workers = [other_workers]
713 if vpp_main_core is not 0:
714 all_workers += [(vpp_main_core, vpp_main_core)]
715 all_workers += vpp_workers
717 for idx, worker in enumerate(all_workers):
722 if worker[0] == worker[1]:
723 isolated_cpus += "{}".format(worker[0])
725 isolated_cpus += "{}-{}".format(worker[0], worker[1])
727 vppgrb = VppGrubUtil(node)
728 current_cmdline = vppgrb.get_current_cmdline()
729 if 'grub' not in node:
731 node['grub']['current_cmdline'] = current_cmdline
732 node['grub']['default_cmdline'] = \
733 vppgrb.apply_cmdline(node, isolated_cpus)
737 def get_hugepages(self):
739 Get the hugepage configuration
743 for i in self._nodes.items():
746 hpg = VppHugePageUtil(node)
747 max_map_count, shmmax = hpg.get_huge_page_config()
748 node['hugepages']['max_map_count'] = max_map_count
749 node['hugepages']['shmax'] = shmmax
750 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
751 node['hugepages']['actual_total'] = total
752 node['hugepages']['free'] = free
753 node['hugepages']['size'] = size
754 node['hugepages']['memtotal'] = memtotal
755 node['hugepages']['memfree'] = memfree
761 Get the grub configuration
765 for i in self._nodes.items():
768 vppgrb = VppGrubUtil(node)
769 current_cmdline = vppgrb.get_current_cmdline()
770 default_cmdline = vppgrb.get_default_cmdline()
772 # Get the total number of isolated CPUs
774 iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
775 iso_cpurl = len(iso_cpur)
777 iso_cpu_str = iso_cpur[0]
778 iso_cpu_str = iso_cpu_str.split('=')[1]
779 iso_cpul = iso_cpu_str.split(',')
780 for iso_cpu in iso_cpul:
781 isocpuspl = iso_cpu.split('-')
782 if len(isocpuspl) is 1:
783 current_iso_cpus += 1
785 first = int(isocpuspl[0])
786 second = int(isocpuspl[1])
788 current_iso_cpus += 1
790 current_iso_cpus += second - first
792 if 'grub' not in node:
794 node['grub']['current_cmdline'] = current_cmdline
795 node['grub']['default_cmdline'] = default_cmdline
796 node['grub']['current_iso_cpus'] = current_iso_cpus
801 def _get_device(node):
803 Get the device configuration for a single node
805 :param node: Node dictionary with cpuinfo.
810 vpp = VppPCIUtil(node)
811 vpp.get_all_devices()
813 # Save the device information
815 node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
816 node['devices']['kernel_devices'] = vpp.get_kernel_devices()
817 node['devices']['other_devices'] = vpp.get_other_devices()
818 node['devices']['linkup_devices'] = vpp.get_link_up_devices()
820 def get_devices_per_node(self):
822 Get the device configuration for all the nodes
826 for i in self._nodes.items():
828 # Update the interface data
830 self._get_device(node)
835 def get_cpu_layout(node):
839 using lscpu -p get the cpu layout.
840 Returns a list with each item representing a single cpu.
842 :param node: Node dictionary.
844 :returns: The cpu layout
849 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
851 raise RuntimeError('{} failed on node {} {}'.
852 format(cmd, node['host'], stderr))
855 lines = stdout.split('\n')
857 if line == '' or line[0] == '#':
859 linesplit = line.split(',')
860 layout = {'cpu': linesplit[0], 'core': linesplit[1],
861 'socket': linesplit[2], 'node': linesplit[3]}
863 # cpu, core, socket, node
870 Get the cpu configuration
875 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
877 for i in self._nodes.items():
881 layout = self.get_cpu_layout(node)
882 node['cpu']['layout'] = layout
884 cpuinfo = node['cpuinfo']
885 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
886 node['cpu']['smt_enabled'] = smt_enabled
888 # We don't want to write the cpuinfo
896 Get the current system configuration.
900 # Get the Huge Page configuration
903 # Get the device configuration
904 self.get_devices_per_node()
906 # Get the CPU configuration
909 # Get the current grub cmdline
912 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
914 Ask the user questions related to the cpu configuration.
916 :param node: Node dictionary
917 :param total_cpus: The total number of cpus in the system
918 :param numa_nodes: The list of numa nodes in the system
920 :type total_cpus: int
921 :type numa_nodes: list
924 print "\nYour system has {} core(s) and {} Numa Nodes.". \
925 format(total_cpus, len(numa_nodes))
926 print "To begin, we suggest not reserving any cores for VPP",
927 print "or other processes."
928 print "Then to improve performance try reserving cores as needed. "
930 max_other_cores = total_cpus / 2
931 question = '\nHow many core(s) do you want to reserve for processes \
932 other than VPP? [0-{}][0]? '.format(str(max_other_cores))
933 total_other_cpus = self._ask_user_range(question, 0, max_other_cores,
935 node['cpu']['total_other_cpus'] = total_other_cpus
940 question = "How many core(s) shall we reserve for VPP workers[0-{}][0]? ". \
942 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
943 node['cpu']['total_vpp_cpus'] = total_vpp_cpus
945 max_main_cpus = max_vpp_cpus - total_vpp_cpus
946 reserve_vpp_main_core = False
947 if max_main_cpus > 0:
948 question = "Should we reserve 1 core for the VPP Main thread? "
949 question += "[y/N]? "
950 answer = self._ask_user_yn(question, 'n')
952 reserve_vpp_main_core = True
953 node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
954 node['cpu']['vpp_main_core'] = 0
956 def modify_cpu(self, ask_questions=True):
958 Modify the cpu configuration, asking for the user for the values.
960 :param ask_questions: When true ask the user for config parameters
965 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
967 for i in self._nodes.items():
970 total_cpus_per_slice = 0
974 cpu_layout = self.get_cpu_layout(node)
976 # Assume the number of cpus per slice is always the same as the
979 for cpu in cpu_layout:
980 if cpu['node'] != first_node:
982 total_cpus_per_slice += 1
984 # Get the total number of cpus, cores, and numa nodes from the
986 for cpul in cpu_layout:
987 numa_node = cpul['node']
992 if numa_node not in cpus_per_node:
993 cpus_per_node[numa_node] = []
994 cpuperslice = int(cpu) % total_cpus_per_slice
996 cpus_per_node[numa_node].append((int(cpu), int(cpu) +
997 total_cpus_per_slice - 1))
998 if numa_node not in numa_nodes:
999 numa_nodes.append(numa_node)
1000 if core not in cores:
1002 node['cpu']['cpus_per_node'] = cpus_per_node
1004 # Ask the user some questions
1006 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1008 # Populate the interfaces with the numa node
1009 if 'interfaces' in node:
1010 ikeys = node['interfaces'].keys()
1011 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1013 # We don't want to write the cpuinfo
1014 node['cpuinfo'] = ""
1017 self._update_auto_config()
1020 def _modify_other_devices(self, node,
1021 other_devices, kernel_devices, dpdk_devices):
1023 Modify the devices configuration, asking for the user for the values.
1027 odevices_len = len(other_devices)
1028 if odevices_len > 0:
1029 print "\nThese device(s) are currently NOT being used",
1030 print "by VPP or the OS.\n"
1031 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1032 question = "\nWould you like to give any of these devices"
1033 question += " back to the OS [Y/n]? "
1034 answer = self._ask_user_yn(question, 'Y')
1037 for dit in other_devices.items():
1040 question = "Would you like to use device {} for". \
1042 question += " the OS [y/N]? "
1043 answer = self._ask_user_yn(question, 'n')
1045 driver = device['unused'][0]
1046 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1048 for dit in vppd.items():
1051 kernel_devices[dvid] = device
1052 del other_devices[dvid]
1054 odevices_len = len(other_devices)
1055 if odevices_len > 0:
1056 print "\nThese device(s) are still NOT being used ",
1057 print "by VPP or the OS.\n"
1058 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1059 question = "\nWould you like use any of these for VPP [y/N]? "
1060 answer = self._ask_user_yn(question, 'N')
1063 for dit in other_devices.items():
1066 question = "Would you like to use device {} ".format(dvid)
1067 question += "for VPP [y/N]? "
1068 answer = self._ask_user_yn(question, 'n')
1071 for dit in vppd.items():
1074 dpdk_devices[dvid] = device
1075 del other_devices[dvid]
1077 def update_interfaces_config(self):
1079 Modify the interfaces directly from the config file.
1083 for i in self._nodes.items():
1085 devices = node['devices']
1086 all_devices = devices['other_devices']
1087 all_devices.update(devices['dpdk_devices'])
1088 all_devices.update(devices['kernel_devices'])
1092 if 'interfaces' in node:
1093 current_ifcs = node['interfaces']
1095 for ifc in current_ifcs.values():
1096 dvid = ifc['pci_address']
1097 if dvid in all_devices:
1098 VppPCIUtil.vpp_create_interface(interfaces, dvid,
1100 node['interfaces'] = interfaces
1104 def modify_devices(self):
1106 Modify the devices configuration, asking for the user for the values.
1110 for i in self._nodes.items():
1112 devices = node['devices']
1113 other_devices = devices['other_devices']
1114 kernel_devices = devices['kernel_devices']
1115 dpdk_devices = devices['dpdk_devices']
1118 self._modify_other_devices(node, other_devices,
1119 kernel_devices, dpdk_devices)
1121 # Get the devices again for this node
1122 self._get_device(node)
1123 devices = node['devices']
1124 kernel_devices = devices['kernel_devices']
1125 dpdk_devices = devices['dpdk_devices']
1127 klen = len(kernel_devices)
1129 print "\nThese devices have kernel interfaces, but",
1130 print "appear to be safe to use with VPP.\n"
1131 VppPCIUtil.show_vpp_devices(kernel_devices)
1132 question = "\nWould you like to use any of these "
1133 question += "device(s) for VPP [y/N]? "
1134 answer = self._ask_user_yn(question, 'n')
1137 for dit in kernel_devices.items():
1140 question = "Would you like to use device {} ". \
1142 question += "for VPP [y/N]? "
1143 answer = self._ask_user_yn(question, 'n')
1146 for dit in vppd.items():
1149 dpdk_devices[dvid] = device
1150 del kernel_devices[dvid]
1152 dlen = len(dpdk_devices)
1154 print "\nThese device(s) will be used by VPP.\n"
1155 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1156 question = "\nWould you like to remove any of "
1157 question += "these device(s) [y/N]? "
1158 answer = self._ask_user_yn(question, 'n')
1161 for dit in dpdk_devices.items():
1164 question = "Would you like to remove {} [y/N]? ". \
1166 answer = self._ask_user_yn(question, 'n')
1169 for dit in vppd.items():
1172 driver = device['unused'][0]
1173 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1174 kernel_devices[dvid] = device
1175 del dpdk_devices[dvid]
1178 for dit in dpdk_devices.items():
1181 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1182 node['interfaces'] = interfaces
1184 print "\nThese device(s) will be used by VPP, please",
1185 print "rerun this option if this is incorrect.\n"
1186 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1188 self._update_auto_config()
1191 def modify_huge_pages(self):
1193 Modify the huge page configuration, asking for the user for the values.
1197 for i in self._nodes.items():
1200 total = node['hugepages']['actual_total']
1201 free = node['hugepages']['free']
1202 size = node['hugepages']['size']
1203 memfree = node['hugepages']['memfree'].split(' ')[0]
1204 hugesize = int(size.split(' ')[0])
1205 # The max number of huge pages should be no more than
1206 # 70% of total free memory
1207 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize
1208 print "\nThere currently {} {} huge pages free.". \
1210 question = "Do you want to reconfigure the number of "
1211 question += "huge pages [y/N]? "
1212 answer = self._ask_user_yn(question, 'n')
1214 node['hugepages']['total'] = total
1217 print "\nThere currently a total of {} huge pages.". \
1220 "How many huge pages do you want [{} - {}][{}]? ". \
1221 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1222 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1223 node['hugepages']['total'] = str(answer)
1225 # Update auto-config.yaml
1226 self._update_auto_config()
1228 # Rediscover just the hugepages
1229 self.get_hugepages()
1231 def get_tcp_params(self):
1233 Get the tcp configuration
1236 # maybe nothing to do here?
1239 def acquire_tcp_params(self):
1241 Ask the user for TCP stack configuration parameters
1245 for i in self._nodes.items():
1248 question = "\nHow many active-open / tcp client sessions are expected "
1249 question = question + "[0-10000000][0]? "
1250 answer = self._ask_user_range(question, 0, 10000000, 0)
1251 # Less than 10K is equivalent to 0
1252 if int(answer) < 10000:
1254 node['tcp']['active_open_sessions'] = answer
1256 question = "How many passive-open / tcp server sessions are expected "
1257 question = question + "[0-10000000][0]? "
1258 answer = self._ask_user_range(question, 0, 10000000, 0)
1259 # Less than 10K is equivalent to 0
1260 if int(answer) < 10000:
1262 node['tcp']['passive_open_sessions'] = answer
1264 # Update auto-config.yaml
1265 self._update_auto_config()
1267 # Rediscover tcp parameters
1268 self.get_tcp_params()
1271 def patch_qemu(node):
1273 Patch qemu with the correct patches.
1275 :param node: Node dictionary
1279 print '\nWe are patching the node "{}":\n'.format(node['host'])
1280 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1285 print the CPU information
1289 cpu = CpuUtils.get_cpu_info_per_node(node)
1293 print "{:>20}: {}".format(item, cpu[item])
1296 print "{:>20}: {}".format(item, cpu[item])
1297 item = 'Thread(s) per core'
1299 print "{:>20}: {}".format(item, cpu[item])
1300 item = 'Core(s) per socket'
1302 print "{:>20}: {}".format(item, cpu[item])
1305 print "{:>20}: {}".format(item, cpu[item])
1306 item = 'NUMA node(s)'
1309 numa_nodes = int(cpu[item])
1310 for i in xrange(0, numa_nodes):
1311 item = "NUMA node{} CPU(s)".format(i)
1312 print "{:>20}: {}".format(item, cpu[item])
1313 item = 'CPU max MHz'
1315 print "{:>20}: {}".format(item, cpu[item])
1316 item = 'CPU min MHz'
1318 print "{:>20}: {}".format(item, cpu[item])
1320 if node['cpu']['smt_enabled']:
1324 print "{:>20}: {}".format('SMT', smt)
1327 print "\nVPP Threads: (Name: Cpu Number)"
1328 vpp_processes = cpu['vpp_processes']
1329 for i in vpp_processes.items():
1330 print " {:10}: {:4}".format(i[0], i[1])
1333 def device_info(node):
1335 Show the device information.
1339 if 'cpu' in node and 'total_mbufs' in node['cpu']:
1340 total_mbufs = node['cpu']['total_mbufs']
1341 if total_mbufs is not 0:
1342 print "Total Number of Buffers: {}".format(total_mbufs)
1344 vpp = VppPCIUtil(node)
1345 vpp.get_all_devices()
1346 linkup_devs = vpp.get_link_up_devices()
1347 if len(linkup_devs):
1348 print ("\nDevices with link up (can not be used with VPP):")
1349 vpp.show_vpp_devices(linkup_devs, show_header=False)
1350 # for dev in linkup_devs:
1352 kernel_devs = vpp.get_kernel_devices()
1353 if len(kernel_devs):
1354 print ("\nDevices bound to kernel drivers:")
1355 vpp.show_vpp_devices(kernel_devs, show_header=False)
1357 print ("\nNo devices bound to kernel drivers")
1359 dpdk_devs = vpp.get_dpdk_devices()
1361 print ("\nDevices bound to DPDK drivers:")
1362 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1365 print ("\nNo devices bound to DPDK drivers")
1368 interfaces = vpputl.get_hardware(node)
1369 if interfaces == {}:
1372 print ("\nDevices in use by VPP:")
1374 if len(interfaces.items()) < 2:
1378 print "{:30} {:6} {:4} {:7} {:4} {:7}". \
1379 format('Name', 'Socket', 'RXQs',
1380 'RXDescs', 'TXQs', 'TXDescs')
1381 for intf in sorted(interfaces.items()):
1384 if name == 'local0':
1386 socket = rx_qs = rx_ds = tx_qs = tx_ds = ''
1387 if 'cpu socket' in value:
1388 socket = int(value['cpu socket'])
1389 if 'rx queues' in value:
1390 rx_qs = int(value['rx queues'])
1391 if 'rx descs' in value:
1392 rx_ds = int(value['rx descs'])
1393 if 'tx queues' in value:
1394 tx_qs = int(value['tx queues'])
1395 if 'tx descs' in value:
1396 tx_ds = int(value['tx descs'])
1398 print ("{:30} {:>6} {:>4} {:>7} {:>4} {:>7}".
1399 format(name, socket, rx_qs, rx_ds, tx_qs, tx_ds))
1402 def hugepage_info(node):
1404 Show the huge page information.
1408 hpg = VppHugePageUtil(node)
1409 hpg.show_huge_pages()
1412 def min_system_resources(node):
1414 Check the system for basic minimum resources, return true if
1424 if 'layout' in node['cpu']:
1425 total_cpus = len(node['cpu']['layout'])
1427 print "\nThere is only {} CPU(s) available on this system.".format(total_cpus)
1428 print "This is not enough to run VPP."
1432 if 'free' in node['hugepages'] and \
1433 'memfree' in node['hugepages'] and \
1434 'size' in node['hugepages']:
1435 free = node['hugepages']['free']
1436 memfree = float(node['hugepages']['memfree'].split(' ')[0])
1437 hugesize = float(node['hugepages']['size'].split(' ')[0])
1439 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1440 percentmemhugepages = (memhugepages / memfree) * 100
1441 if free is '0' and \
1442 percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1443 print "\nThe System has only {} of free memory.".format(int(memfree))
1444 print "You will not be able to allocate enough Huge Pages for VPP."
1451 Print the system information
1455 for i in self._nodes.items():
1456 print "\n=============================="
1460 print "NODE: {}\n".format(name)
1467 print "\nGrub Command Line:"
1470 " Current: {}".format(
1471 node['grub']['current_cmdline'])
1473 " Configured: {}".format(
1474 node['grub']['default_cmdline'])
1477 print "\nHuge Pages:"
1478 self.hugepage_info(node)
1482 self.device_info(node)
1485 print "\nVPP Service Status:"
1486 state, errors = VPPUtil.status(node)
1487 print " {}".format(state)
1489 print " {}".format(e)
1491 # Minimum system resources
1492 self.min_system_resources(node)
1494 print "\n=============================="
1496 def _ipv4_interface_setup_questions(self, node):
1498 Ask the user some questions and get a list of interfaces
1499 and IPv4 addresses associated with those interfaces
1501 :param node: Node dictionary.
1503 :returns: A list or interfaces with ip addresses
1508 interfaces = vpputl.get_hardware(node)
1509 if interfaces == {}:
1512 interfaces_with_ip = []
1513 for intf in sorted(interfaces.items()):
1515 if name == 'local0':
1518 question = "Would you like add address to interface {} [Y/n]? ".format(name)
1519 answer = self._ask_user_yn(question, 'y')
1522 addr = self._ask_user_ipv4()
1523 address['name'] = name
1524 address['addr'] = addr
1525 interfaces_with_ip.append(address)
1527 return interfaces_with_ip
1529 def ipv4_interface_setup(self):
1531 After asking the user some questions, get a list of interfaces
1532 and IPv4 addresses associated with those interfaces
1536 for i in self._nodes.items():
1539 # Show the current interfaces with IP addresses
1540 current_ints = VPPUtil.get_int_ip(node)
1541 if current_ints is not {}:
1542 print ("\nThese are the current interfaces with IP addresses:")
1543 for items in sorted(current_ints.items()):
1546 if 'address' not in value:
1549 address = value['address']
1550 print ("{:30} {:20} {:10}".format(name, address, value['state']))
1551 question = "\nWould you like to keep this configuration [Y/n]? "
1552 answer = self._ask_user_yn(question, 'y')
1556 print ("\nThere are currently no interfaces with IP addresses.")
1558 # Create a script that add the ip addresses to the interfaces
1559 # and brings the interfaces up
1560 ints_with_addrs = self._ipv4_interface_setup_questions(node)
1562 for ints in ints_with_addrs:
1565 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1566 setintupstr = 'set int state {} up\n'.format(name)
1567 content += setipstr + setintupstr
1569 # Write the content to the script
1570 rootdir = node['rootdir']
1571 filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1572 with open(filename, 'w+') as sfile:
1573 sfile.write(content)
1575 # Execute the script
1576 cmd = 'vppctl exec {}'.format(filename)
1577 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1579 logging.debug(stderr)
1581 print("\nA script as been created at {}".format(filename))
1582 print("This script can be run using the following:")
1583 print("vppctl exec {}\n".format(filename))
1585 def _create_vints_questions(self, node):
1587 Ask the user some questions and get a list of interfaces
1588 and IPv4 addresses associated with those interfaces
1590 :param node: Node dictionary.
1592 :returns: A list or interfaces with ip addresses
1597 interfaces = vpputl.get_hardware(node)
1598 if interfaces == {}:
1601 # First delete all the Virtual interfaces
1602 for intf in sorted(interfaces.items()):
1604 if name[:7] == 'Virtual':
1605 cmd = 'vppctl delete vhost-user {}'.format(name)
1606 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1608 logging.debug('{} failed on node {} {}'.format(
1609 cmd, node['host'], stderr))
1611 # Create a virtual interface, for each interface the user wants to use
1612 interfaces = vpputl.get_hardware(node)
1613 if interfaces == {}:
1615 interfaces_with_virtual_interfaces = []
1617 for intf in sorted(interfaces.items()):
1619 if name == 'local0':
1622 question = "Would you like connect this interface {} to the VM [Y/n]? ".format(name)
1623 answer = self._ask_user_yn(question, 'y')
1625 sockfilename = '/tmp/sock{}.sock'.format(inum)
1626 if os.path.exists(sockfilename):
1627 os.remove(sockfilename)
1628 cmd = 'vppctl create vhost-user socket {} server'.format(sockfilename)
1629 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1631 raise RuntimeError("Create vhost failed on node {} {}."
1632 .format(node['host'], stderr))
1633 vintname = stdout.rstrip('\r\n')
1635 interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1636 'bridge': '{}'.format(inum)}
1638 interfaces_with_virtual_interfaces.append(interface)
1640 return interfaces_with_virtual_interfaces
1642 def create_and_bridge_virtual_interfaces(self):
1644 After asking the user some questions, create a VM and connect the interfaces
1649 for i in self._nodes.items():
1652 # Show the current bridge and interface configuration
1653 print "\nThis the current bridge configuration:"
1654 VPPUtil.show_bridge(node)
1655 question = "\nWould you like to keep this configuration [Y/n]? "
1656 answer = self._ask_user_yn(question, 'y')
1660 # Create a script that builds a bridge configuration with physical interfaces
1661 # and virtual interfaces
1662 ints_with_vints = self._create_vints_questions(node)
1664 for intf in ints_with_vints:
1665 vhoststr = 'comment { The following command creates the socket }\n'
1666 vhoststr += 'comment { and returns a virtual interface }\n'
1667 vhoststr += 'comment {{ create vhost-user socket /tmp/sock{}.sock server }}\n'. \
1668 format(intf['bridge'])
1670 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1672 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1673 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1675 # set interface state VirtualEthernet/0/0/0 up
1676 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1678 # set interface state VirtualEthernet/0/0/0 down
1679 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1681 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1683 # Write the content to the script
1684 rootdir = node['rootdir']
1685 filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1686 with open(filename, 'w+') as sfile:
1687 sfile.write(content)
1689 # Execute the script
1690 cmd = 'vppctl exec {}'.format(filename)
1691 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1693 logging.debug(stderr)
1695 print("\nA script as been created at {}".format(filename))
1696 print("This script can be run using the following:")
1697 print("vppctl exec {}\n".format(filename))