1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
22 from vpplib.VPPUtil import VPPUtil
23 from vpplib.VppPCIUtil import VppPCIUtil
24 from vpplib.VppHugePageUtil import VppHugePageUtil
25 from vpplib.CpuUtils import CpuUtils
26 from vpplib.VppGrubUtil import VppGrubUtil
27 from vpplib.QemuUtils import QemuUtils
29 __all__ = ["AutoConfig"]
33 MIN_TOTAL_HUGE_PAGES = 1024
34 MAX_PERCENT_FOR_HUGE_PAGES = 70
37 class AutoConfig(object):
38 """Auto Configuration Tools"""
40 def __init__(self, rootdir, filename):
42 The Auto Configure class.
44 :param rootdir: The root directory for all the auto configuration files
45 :param filename: The autoconfiguration file
49 self._autoconfig_filename = rootdir + filename
50 self._rootdir = rootdir
53 self._vpp_devices_node = {}
54 self._hugepage_config = ""
59 Returns the nodes dictionary.
68 def _autoconfig_backup_file(filename):
72 :param filename: The file to backup
76 # Does a copy of the file exist, if not create one
77 ofile = filename + '.orig'
78 (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
81 if stdout.strip('\n') != ofile:
82 cmd = 'sudo cp {} {}'.format(filename, ofile)
83 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
90 Asks the user for a number within a range.
91 default is returned if return is entered.
93 :returns: IP address and prefix len
98 answer = raw_input("Please enter the IPv4 Address [n.n.n.n]: ")
100 ipaddr = ipaddress.ip_address(u'{}'.format(answer))
102 print "Please enter a valid IPv4 address."
105 answer = raw_input("Please enter the netmask [n.n.n.n]: ")
107 netmask = ipaddress.ip_address(u'{}'.format(answer))
108 pl = ipaddress.ip_network(u'0.0.0.0/{}'.format(netmask))
109 plen = pl.exploded.split('/')[1]
112 print "Please enter a valid IPv4 address and netmask."
118 def _ask_user_range(question, first, last, default):
120 Asks the user for a number within a range.
121 default is returned if return is entered.
123 :param question: Text of a question.
124 :param first: First number in the range
125 :param last: Last number in the range
126 :param default: The value returned when return is entered
127 :type question: string
131 :returns: The answer to the question
136 answer = raw_input(question)
140 if re.findall(r'[0-9+]', answer):
141 if int(answer) in range(first, last + 1):
144 print "Please a value between {} and {} or Return.". \
147 print "Please a number between {} and {} or Return.". \
153 def _ask_user_yn(question, default):
155 Asks the user for a yes or no question.
157 :param question: Text of a question.
158 :param default: The value returned when return is entered
159 :type question: string
160 :type default: string
161 :returns: The answer to the question
166 default = default.lower()
168 while not input_valid:
169 answer = raw_input(question)
172 if re.findall(r'[YyNn]', answer):
174 answer = answer[0].lower()
176 print "Please answer Y, N or Return."
180 def _loadconfig(self):
182 Load the testbed configuration, given the auto configuration file.
186 # Get the Topology, from the topology layout file
188 with open(self._autoconfig_filename, 'r') as stream:
190 topo = yaml.load(stream)
191 if 'metadata' in topo:
192 self._metadata = topo['metadata']
193 except yaml.YAMLError as exc:
194 raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc))
196 systemfile = self._rootdir + self._metadata['system_config_file']
197 if os.path.isfile(systemfile):
198 with open(systemfile, 'r') as sysstream:
200 systopo = yaml.load(sysstream)
201 if 'nodes' in systopo:
202 self._nodes = systopo['nodes']
203 except yaml.YAMLError as sysexc:
204 raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc))
206 # Get the nodes from Auto Config
208 self._nodes = topo['nodes']
210 # Set the root directory in all the nodes
211 for i in self._nodes.items():
213 node['rootdir'] = self._rootdir
215 def updateconfig(self):
217 Update the testbed configuration, given the auto configuration file.
218 We will write the system configuration file with the current node
223 # Initialize the yaml data
224 ydata = {'metadata': self._metadata, 'nodes': self._nodes}
226 # Write the system config file
227 filename = self._rootdir + self._metadata['system_config_file']
228 with open(filename, 'w') as yamlfile:
229 yaml.dump(ydata, yamlfile, default_flow_style=False)
231 def _update_auto_config(self):
233 Write the auto configuration file with the new configuration data,
238 # Initialize the yaml data
240 with open(self._autoconfig_filename, 'r') as stream:
242 ydata = yaml.load(stream)
244 nodes = ydata['nodes']
245 except yaml.YAMLError as exc:
249 for i in nodes.items():
254 node['interfaces'] = {}
255 for item in self._nodes[key]['interfaces'].items():
259 node['interfaces'][port] = {}
260 node['interfaces'][port]['pci_address'] = \
261 interface['pci_address']
262 if 'mac_address' in interface:
263 node['interfaces'][port]['mac_address'] = \
264 interface['mac_address']
266 if 'total_other_cpus' in self._nodes[key]['cpu']:
267 node['cpu']['total_other_cpus'] = \
268 self._nodes[key]['cpu']['total_other_cpus']
269 if 'total_vpp_cpus' in self._nodes[key]['cpu']:
270 node['cpu']['total_vpp_cpus'] = \
271 self._nodes[key]['cpu']['total_vpp_cpus']
272 if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
273 node['cpu']['reserve_vpp_main_core'] = \
274 self._nodes[key]['cpu']['reserve_vpp_main_core']
277 if 'active_open_sessions' in self._nodes[key]['tcp']:
278 node['tcp']['active_open_sessions'] = \
279 self._nodes[key]['tcp']['active_open_sessions']
280 if 'passive_open_sessions' in self._nodes[key]['tcp']:
281 node['tcp']['passive_open_sessions'] = \
282 self._nodes[key]['tcp']['passive_open_sessions']
285 node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
287 # Write the auto config config file
288 with open(self._autoconfig_filename, 'w') as yamlfile:
289 yaml.dump(ydata, yamlfile, default_flow_style=False)
291 def apply_huge_pages(self):
293 Apply the huge page config
297 for i in self._nodes.items():
300 hpg = VppHugePageUtil(node)
301 hpg.hugepages_dryrun_apply()
304 def _apply_vpp_unix(node):
306 Apply the VPP Unix config
308 :param node: Node dictionary with cpuinfo.
313 if 'unix' not in node['vpp']:
316 unixv = node['vpp']['unix']
317 if 'interactive' in unixv:
318 interactive = unixv['interactive']
319 if interactive is True:
320 unix = ' interactive\n'
322 return unix.rstrip('\n')
325 def _apply_vpp_cpu(node):
327 Apply the VPP cpu config
329 :param node: Node dictionary with cpuinfo.
335 vpp_main_core = node['cpu']['vpp_main_core']
336 if vpp_main_core is not 0:
337 cpu += ' main-core {}\n'.format(vpp_main_core)
340 vpp_workers = node['cpu']['vpp_workers']
341 vpp_worker_len = len(vpp_workers)
342 if vpp_worker_len > 0:
344 for i, worker in enumerate(vpp_workers):
346 vpp_worker_str += ','
347 if worker[0] == worker[1]:
348 vpp_worker_str += "{}".format(worker[0])
350 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
352 cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
357 def _apply_vpp_devices(node):
359 Apply VPP PCI Device configuration to vpp startup.
361 :param node: Node dictionary with cpuinfo.
366 ports_per_numa = node['cpu']['ports_per_numa']
367 total_mbufs = node['cpu']['total_mbufs']
369 for item in ports_per_numa.items():
371 interfaces = value['interfaces']
373 # if 0 was specified for the number of vpp workers, use 1 queue
376 if 'rx_queues' in value:
377 num_rx_queues = value['rx_queues']
378 if 'tx_queues' in value:
379 num_tx_queues = value['tx_queues']
384 # Create the devices string
385 for interface in interfaces:
386 pci_address = interface['pci_address']
387 pci_address = pci_address.lstrip("'").rstrip("'")
389 devices += ' dev {} {{ \n'.format(pci_address)
391 devices += ' num-rx-queues {}\n'.format(num_rx_queues)
393 devices += ' num-rx-queues {}\n'.format(1)
395 devices += ' num-tx-queues {}\n'.format(num_tx_queues)
397 devices += ' num-rx-desc {}\n'.format(num_rx_desc)
399 devices += ' num-tx-desc {}\n'.format(num_tx_desc)
402 if total_mbufs is not 0:
403 devices += '\n num-mbufs {}'.format(total_mbufs)
408 def _calc_vpp_workers(node, vpp_workers, numa_node,
409 other_cpus_end, total_vpp_workers,
410 reserve_vpp_main_core):
412 Calculate the VPP worker information
414 :param node: Node dictionary
415 :param vpp_workers: List of VPP workers
416 :param numa_node: Numa node
417 :param other_cpus_end: The end of the cpus allocated for cores
419 :param total_vpp_workers: The number of vpp workers needed
420 :param reserve_vpp_main_core: Is there a core needed for
424 :type other_cpus_end: int
425 :type total_vpp_workers: int
426 :type reserve_vpp_main_core: bool
427 :returns: Is a core still needed for the vpp main core
431 # Can we fit the workers in one of these slices
432 cpus = node['cpu']['cpus_per_node'][numa_node]
436 if start <= other_cpus_end:
437 start = other_cpus_end + 1
439 if reserve_vpp_main_core:
442 workers_end = start + total_vpp_workers - 1
443 if workers_end <= end:
444 if reserve_vpp_main_core:
445 node['cpu']['vpp_main_core'] = start - 1
446 reserve_vpp_main_core = False
447 if total_vpp_workers:
448 vpp_workers.append((start, workers_end))
451 # We still need to reserve the main core
452 if reserve_vpp_main_core:
453 node['cpu']['vpp_main_core'] = other_cpus_end + 1
455 return reserve_vpp_main_core
458 def _calc_desc_and_queues(total_numa_nodes,
459 total_ports_per_numa,
461 ports_per_numa_value):
463 Calculate the number of descriptors and queues
465 :param total_numa_nodes: The total number of numa nodes
466 :param total_ports_per_numa: The total number of ports for this
468 :param total_vpp_cpus: The total number of cpus to allocate for vpp
469 :param ports_per_numa_value: The value from the ports_per_numa
471 :type total_numa_nodes: int
472 :type total_ports_per_numa: int
473 :type total_vpp_cpus: int
474 :type ports_per_numa_value: dict
475 :returns The total number of message buffers
476 :returns: The total number of vpp workers
481 # Get the total vpp workers
482 total_vpp_workers = total_vpp_cpus
483 ports_per_numa_value['total_vpp_workers'] = total_vpp_workers
485 # Get the number of rx queues
486 rx_queues = max(1, total_vpp_workers)
487 tx_queues = total_vpp_workers * total_numa_nodes + 1
489 # Get the descriptor entries
491 ports_per_numa_value['rx_queues'] = rx_queues
492 total_mbufs = (((rx_queues * desc_entries) +
493 (tx_queues * desc_entries)) *
494 total_ports_per_numa)
495 total_mbufs = total_mbufs
497 return total_mbufs, total_vpp_workers
500 def _create_ports_per_numa(node, interfaces):
502 Create a dictionary or ports per numa node
503 :param node: Node dictionary
504 :param interfaces: All the interfaces to be used by vpp
506 :type interfaces: dict
507 :returns: The ports per numa dictionary
511 # Make a list of ports by numa node
513 for item in interfaces.items():
515 if i['numa_node'] not in ports_per_numa:
516 ports_per_numa[i['numa_node']] = {'interfaces': []}
517 ports_per_numa[i['numa_node']]['interfaces'].append(i)
519 ports_per_numa[i['numa_node']]['interfaces'].append(i)
520 node['cpu']['ports_per_numa'] = ports_per_numa
522 return ports_per_numa
524 def calculate_cpu_parameters(self):
526 Calculate the cpu configuration.
530 # Calculate the cpu parameters, needed for the
531 # vpp_startup and grub configuration
532 for i in self._nodes.items():
535 # get total number of nic ports
536 interfaces = node['interfaces']
538 # Make a list of ports by numa node
539 ports_per_numa = self._create_ports_per_numa(node, interfaces)
541 # Get the number of cpus to skip, we never use the first cpu
543 other_cpus_end = other_cpus_start + \
544 node['cpu']['total_other_cpus'] - 1
546 if other_cpus_end is not 0:
547 other_workers = (other_cpus_start, other_cpus_end)
548 node['cpu']['other_workers'] = other_workers
550 # Allocate the VPP main core and workers
552 reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
553 total_vpp_cpus = node['cpu']['total_vpp_cpus']
555 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
556 # then we shouldn't get workers
557 total_with_main = total_vpp_cpus
558 if reserve_vpp_main_core:
561 if total_with_main is not 0:
562 for item in ports_per_numa.items():
566 # Get the number of descriptors and queues
567 mbufs, total_vpp_workers = self._calc_desc_and_queues(
569 len(value['interfaces']), total_vpp_cpus, value)
572 # Get the VPP workers
573 reserve_vpp_main_core = self._calc_vpp_workers(
574 node, vpp_workers, numa_node, other_cpus_end,
575 total_vpp_workers, reserve_vpp_main_core)
578 total_mbufs = int(total_mbufs)
583 node['cpu']['vpp_workers'] = vpp_workers
584 node['cpu']['total_mbufs'] = total_mbufs
590 def _apply_vpp_tcp(node):
592 Apply the VPP Unix config
594 :param node: Node dictionary with cpuinfo.
598 active_open_sessions = node['tcp']['active_open_sessions']
599 aos = int(active_open_sessions)
601 passive_open_sessions = node['tcp']['passive_open_sessions']
602 pos = int(passive_open_sessions)
604 # Generate the api-segment gid vpp sheit in any case
606 tcp = "api-segment {\n"
607 tcp = tcp + " gid vpp\n"
609 return tcp.rstrip('\n')
611 tcp = "# TCP stack-related configuration parameters\n"
612 tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos)
613 tcp = tcp + "heapsize 4g\n\n"
614 tcp = tcp + "api-segment {\n"
615 tcp = tcp + " global-size 2000M\n"
616 tcp = tcp + " api-size 1G\n"
619 tcp = tcp + "session {\n"
620 tcp = tcp + " event-queue-length " + "{:d}".format(aos + pos) + "\n"
621 tcp = tcp + " preallocated-sessions " + "{:d}".format(aos + pos) + "\n"
622 tcp = tcp + " v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
623 tcp = tcp + " v4-session-table-memory 3g\n"
625 tcp = tcp + " v4-halfopen-table-buckets " + \
626 "{:d}".format((aos + pos) / 4) + "\n"
627 tcp = tcp + " v4-halfopen-table-memory 3g\n"
630 tcp = tcp + "tcp {\n"
631 tcp = tcp + " preallocated-connections " + "{:d}".format(aos + pos) + "\n"
633 tcp = tcp + " preallocated-half-open-connections " + "{:d}".format(aos) + "\n"
634 tcp = tcp + " local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
635 tcp = tcp + " local-endpoints-table-memory 3g\n"
638 return tcp.rstrip('\n')
640 def apply_vpp_startup(self):
642 Apply the vpp startup configration
646 # Apply the VPP startup configruation
647 for i in self._nodes.items():
650 # Get the startup file
651 rootdir = node['rootdir']
652 sfile = rootdir + node['vpp']['startup_config_file']
655 devices = self._apply_vpp_devices(node)
658 cpu = self._apply_vpp_cpu(node)
660 # Get the unix config
661 unix = self._apply_vpp_unix(node)
663 # Get the TCP configuration, if any
664 tcp = self._apply_vpp_tcp(node)
666 # Make a backup if needed
667 self._autoconfig_backup_file(sfile)
670 tfile = sfile + '.template'
671 (ret, stdout, stderr) = \
672 VPPUtil.exec_command('cat {}'.format(tfile))
674 raise RuntimeError('Executing cat command failed to node {}'.
675 format(node['host']))
676 startup = stdout.format(unix=unix,
681 (ret, stdout, stderr) = \
682 VPPUtil.exec_command('rm {}'.format(sfile))
684 logging.debug(stderr)
686 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
687 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
689 raise RuntimeError('Writing config failed node {}'.
690 format(node['host']))
692 def apply_grub_cmdline(self):
694 Apply the grub cmdline
698 for i in self._nodes.items():
701 # Get the isolated CPUs
702 other_workers = node['cpu']['other_workers']
703 vpp_workers = node['cpu']['vpp_workers']
704 vpp_main_core = node['cpu']['vpp_main_core']
706 if other_workers is not None:
707 all_workers = [other_workers]
708 if vpp_main_core is not 0:
709 all_workers += [(vpp_main_core, vpp_main_core)]
710 all_workers += vpp_workers
712 for idx, worker in enumerate(all_workers):
717 if worker[0] == worker[1]:
718 isolated_cpus += "{}".format(worker[0])
720 isolated_cpus += "{}-{}".format(worker[0], worker[1])
722 vppgrb = VppGrubUtil(node)
723 current_cmdline = vppgrb.get_current_cmdline()
724 if 'grub' not in node:
726 node['grub']['current_cmdline'] = current_cmdline
727 node['grub']['default_cmdline'] = \
728 vppgrb.apply_cmdline(node, isolated_cpus)
732 def get_hugepages(self):
734 Get the hugepage configuration
738 for i in self._nodes.items():
741 hpg = VppHugePageUtil(node)
742 max_map_count, shmmax = hpg.get_huge_page_config()
743 node['hugepages']['max_map_count'] = max_map_count
744 node['hugepages']['shmax'] = shmmax
745 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
746 node['hugepages']['actual_total'] = total
747 node['hugepages']['free'] = free
748 node['hugepages']['size'] = size
749 node['hugepages']['memtotal'] = memtotal
750 node['hugepages']['memfree'] = memfree
756 Get the grub configuration
760 for i in self._nodes.items():
763 vppgrb = VppGrubUtil(node)
764 current_cmdline = vppgrb.get_current_cmdline()
765 default_cmdline = vppgrb.get_default_cmdline()
767 # Get the total number of isolated CPUs
769 iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
770 iso_cpurl = len(iso_cpur)
772 iso_cpu_str = iso_cpur[0]
773 iso_cpu_str = iso_cpu_str.split('=')[1]
774 iso_cpul = iso_cpu_str.split(',')
775 for iso_cpu in iso_cpul:
776 isocpuspl = iso_cpu.split('-')
777 if len(isocpuspl) is 1:
778 current_iso_cpus += 1
780 first = int(isocpuspl[0])
781 second = int(isocpuspl[1])
783 current_iso_cpus += 1
785 current_iso_cpus += second - first
787 if 'grub' not in node:
789 node['grub']['current_cmdline'] = current_cmdline
790 node['grub']['default_cmdline'] = default_cmdline
791 node['grub']['current_iso_cpus'] = current_iso_cpus
796 def _get_device(node):
798 Get the device configuration for a single node
800 :param node: Node dictionary with cpuinfo.
805 vpp = VppPCIUtil(node)
806 vpp.get_all_devices()
808 # Save the device information
810 node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
811 node['devices']['kernel_devices'] = vpp.get_kernel_devices()
812 node['devices']['other_devices'] = vpp.get_other_devices()
813 node['devices']['linkup_devices'] = vpp.get_link_up_devices()
815 def get_devices_per_node(self):
817 Get the device configuration for all the nodes
821 for i in self._nodes.items():
823 # Update the interface data
825 self._get_device(node)
830 def get_cpu_layout(node):
834 using lscpu -p get the cpu layout.
835 Returns a list with each item representing a single cpu.
837 :param node: Node dictionary.
839 :returns: The cpu layout
844 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
846 raise RuntimeError('{} failed on node {} {}'.
847 format(cmd, node['host'], stderr))
850 lines = stdout.split('\n')
852 if line == '' or line[0] == '#':
854 linesplit = line.split(',')
855 layout = {'cpu': linesplit[0], 'core': linesplit[1],
856 'socket': linesplit[2], 'node': linesplit[3]}
858 # cpu, core, socket, node
865 Get the cpu configuration
870 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
872 for i in self._nodes.items():
876 layout = self.get_cpu_layout(node)
877 node['cpu']['layout'] = layout
879 cpuinfo = node['cpuinfo']
880 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
881 node['cpu']['smt_enabled'] = smt_enabled
883 # We don't want to write the cpuinfo
891 Get the current system configuration.
895 # Get the Huge Page configuration
898 # Get the device configuration
899 self.get_devices_per_node()
901 # Get the CPU configuration
904 # Get the current grub cmdline
907 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
909 Ask the user questions related to the cpu configuration.
911 :param node: Node dictionary
912 :param total_cpus: The total number of cpus in the system
913 :param numa_nodes: The list of numa nodes in the system
915 :type total_cpus: int
916 :type numa_nodes: list
919 print "\nYour system has {} core(s) and {} Numa Nodes.". \
920 format(total_cpus, len(numa_nodes))
921 print "To begin, we suggest not reserving any cores for VPP",
922 print "or other processes."
923 print "Then to improve performance try reserving cores as needed. "
925 max_other_cores = total_cpus / 2
926 question = '\nHow many core(s) do you want to reserve for processes \
927 other than VPP? [0-{}][0]? '.format(str(max_other_cores))
928 total_other_cpus = self._ask_user_range(question, 0, max_other_cores,
930 node['cpu']['total_other_cpus'] = total_other_cpus
935 question = "How many core(s) shall we reserve for VPP workers[0-{}][0]? ". \
937 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
938 node['cpu']['total_vpp_cpus'] = total_vpp_cpus
940 max_main_cpus = max_vpp_cpus - total_vpp_cpus
941 reserve_vpp_main_core = False
942 if max_main_cpus > 0:
943 question = "Should we reserve 1 core for the VPP Main thread? "
944 question += "[y/N]? "
945 answer = self._ask_user_yn(question, 'n')
947 reserve_vpp_main_core = True
948 node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
949 node['cpu']['vpp_main_core'] = 0
951 def modify_cpu(self):
953 Modify the cpu configuration, asking for the user for the values.
958 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
960 for i in self._nodes.items():
963 total_cpus_per_slice = 0
967 cpu_layout = self.get_cpu_layout(node)
969 # Assume the number of cpus per slice is always the same as the
972 for cpu in cpu_layout:
973 if cpu['node'] != first_node:
975 total_cpus_per_slice += 1
977 # Get the total number of cpus, cores, and numa nodes from the
979 for cpul in cpu_layout:
980 numa_node = cpul['node']
985 if numa_node not in cpus_per_node:
986 cpus_per_node[numa_node] = []
987 cpuperslice = int(cpu) % total_cpus_per_slice
989 cpus_per_node[numa_node].append((int(cpu), int(cpu) +
990 total_cpus_per_slice - 1))
991 if numa_node not in numa_nodes:
992 numa_nodes.append(numa_node)
993 if core not in cores:
995 node['cpu']['cpus_per_node'] = cpus_per_node
997 # Ask the user some questions
998 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1000 # Populate the interfaces with the numa node
1001 ikeys = node['interfaces'].keys()
1002 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1004 # We don't want to write the cpuinfo
1005 node['cpuinfo'] = ""
1008 self._update_auto_config()
1011 def _modify_other_devices(self, node,
1012 other_devices, kernel_devices, dpdk_devices):
1014 Modify the devices configuration, asking for the user for the values.
1018 odevices_len = len(other_devices)
1019 if odevices_len > 0:
1020 print "\nThese device(s) are currently NOT being used",
1021 print "by VPP or the OS.\n"
1022 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1023 question = "\nWould you like to give any of these devices"
1024 question += " back to the OS [Y/n]? "
1025 answer = self._ask_user_yn(question, 'Y')
1028 for dit in other_devices.items():
1031 question = "Would you like to use device {} for". \
1033 question += " the OS [y/N]? "
1034 answer = self._ask_user_yn(question, 'n')
1036 driver = device['unused'][0]
1037 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1039 for dit in vppd.items():
1042 kernel_devices[dvid] = device
1043 del other_devices[dvid]
1045 odevices_len = len(other_devices)
1046 if odevices_len > 0:
1047 print "\nThese device(s) are still NOT being used ",
1048 print "by VPP or the OS.\n"
1049 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1050 question = "\nWould you like use any of these for VPP [y/N]? "
1051 answer = self._ask_user_yn(question, 'N')
1054 for dit in other_devices.items():
1057 question = "Would you like to use device {} ".format(dvid)
1058 question += "for VPP [y/N]? "
1059 answer = self._ask_user_yn(question, 'n')
1062 for dit in vppd.items():
1065 dpdk_devices[dvid] = device
1066 del other_devices[dvid]
1068 def modify_devices(self):
1070 Modify the devices configuration, asking for the user for the values.
1074 for i in self._nodes.items():
1076 devices = node['devices']
1077 other_devices = devices['other_devices']
1078 kernel_devices = devices['kernel_devices']
1079 dpdk_devices = devices['dpdk_devices']
1082 self._modify_other_devices(node, other_devices,
1083 kernel_devices, dpdk_devices)
1085 # Get the devices again for this node
1086 self._get_device(node)
1087 devices = node['devices']
1088 kernel_devices = devices['kernel_devices']
1089 dpdk_devices = devices['dpdk_devices']
1091 klen = len(kernel_devices)
1093 print "\nThese devices have kernel interfaces, but",
1094 print "appear to be safe to use with VPP.\n"
1095 VppPCIUtil.show_vpp_devices(kernel_devices)
1096 question = "\nWould you like to use any of these "
1097 question += "device(s) for VPP [y/N]? "
1098 answer = self._ask_user_yn(question, 'n')
1101 for dit in kernel_devices.items():
1104 question = "Would you like to use device {} ". \
1106 question += "for VPP [y/N]? "
1107 answer = self._ask_user_yn(question, 'n')
1110 for dit in vppd.items():
1113 dpdk_devices[dvid] = device
1114 del kernel_devices[dvid]
1116 dlen = len(dpdk_devices)
1118 print "\nThese device(s) will be used by VPP.\n"
1119 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1120 question = "\nWould you like to remove any of "
1121 question += "these device(s) [y/N]? "
1122 answer = self._ask_user_yn(question, 'n')
1125 for dit in dpdk_devices.items():
1128 question = "Would you like to remove {} [y/N]? ". \
1130 answer = self._ask_user_yn(question, 'n')
1133 for dit in vppd.items():
1136 driver = device['unused'][0]
1137 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1138 kernel_devices[dvid] = device
1139 del dpdk_devices[dvid]
1142 for dit in dpdk_devices.items():
1145 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1146 node['interfaces'] = interfaces
1148 print "\nThese device(s) will be used by VPP, please",
1149 print "rerun this option if this is incorrect.\n"
1150 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1152 self._update_auto_config()
1155 def modify_huge_pages(self):
1157 Modify the huge page configuration, asking for the user for the values.
1161 for i in self._nodes.items():
1164 total = node['hugepages']['actual_total']
1165 free = node['hugepages']['free']
1166 size = node['hugepages']['size']
1167 memfree = node['hugepages']['memfree'].split(' ')[0]
1168 hugesize = int(size.split(' ')[0])
1169 # The max number of huge pages should be no more than
1170 # 70% of total free memory
1171 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize
1172 print "\nThere currently {} {} huge pages free.". \
1174 question = "Do you want to reconfigure the number of "
1175 question += "huge pages [y/N]? "
1176 answer = self._ask_user_yn(question, 'n')
1178 node['hugepages']['total'] = total
1181 print "\nThere currently a total of {} huge pages.". \
1184 "How many huge pages do you want [{} - {}][{}]? ". \
1185 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1186 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1187 node['hugepages']['total'] = str(answer)
1189 # Update auto-config.yaml
1190 self._update_auto_config()
1192 # Rediscover just the hugepages
1193 self.get_hugepages()
1195 def get_tcp_params(self):
1197 Get the tcp configuration
1200 # maybe nothing to do here?
1203 def acquire_tcp_params(self):
1205 Ask the user for TCP stack configuration parameters
1209 for i in self._nodes.items():
1212 question = "\nHow many active-open / tcp client sessions are expected "
1213 question = question + "[0-10000000][0]? "
1214 answer = self._ask_user_range(question, 0, 10000000, 0)
1215 # Less than 10K is equivalent to 0
1216 if int(answer) < 10000:
1218 node['tcp']['active_open_sessions'] = answer
1220 question = "How many passive-open / tcp server sessions are expected "
1221 question = question + "[0-10000000][0]? "
1222 answer = self._ask_user_range(question, 0, 10000000, 0)
1223 # Less than 10K is equivalent to 0
1224 if int(answer) < 10000:
1226 node['tcp']['passive_open_sessions'] = answer
1228 # Update auto-config.yaml
1229 self._update_auto_config()
1231 # Rediscover tcp parameters
1232 self.get_tcp_params()
1235 def patch_qemu(node):
1237 Patch qemu with the correct patches.
1239 :param node: Node dictionary
1243 print '\nWe are patching the node "{}":\n'.format(node['host'])
1244 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1249 print the CPU information
1253 cpu = CpuUtils.get_cpu_info_per_node(node)
1257 print "{:>20}: {}".format(item, cpu[item])
1260 print "{:>20}: {}".format(item, cpu[item])
1261 item = 'Thread(s) per core'
1263 print "{:>20}: {}".format(item, cpu[item])
1264 item = 'Core(s) per socket'
1266 print "{:>20}: {}".format(item, cpu[item])
1269 print "{:>20}: {}".format(item, cpu[item])
1270 item = 'NUMA node(s)'
1273 numa_nodes = int(cpu[item])
1274 for i in xrange(0, numa_nodes):
1275 item = "NUMA node{} CPU(s)".format(i)
1276 print "{:>20}: {}".format(item, cpu[item])
1277 item = 'CPU max MHz'
1279 print "{:>20}: {}".format(item, cpu[item])
1280 item = 'CPU min MHz'
1282 print "{:>20}: {}".format(item, cpu[item])
1284 if node['cpu']['smt_enabled']:
1288 print "{:>20}: {}".format('SMT', smt)
1291 print "\nVPP Threads: (Name: Cpu Number)"
1292 vpp_processes = cpu['vpp_processes']
1293 for i in vpp_processes.items():
1294 print " {:10}: {:4}".format(i[0], i[1])
1297 def device_info(node):
1299 Show the device information.
1303 if 'cpu' in node and 'total_mbufs' in node['cpu']:
1304 total_mbufs = node['cpu']['total_mbufs']
1305 if total_mbufs is not 0:
1306 print "Total Number of Buffers: {}".format(total_mbufs)
1308 vpp = VppPCIUtil(node)
1309 vpp.get_all_devices()
1310 linkup_devs = vpp.get_link_up_devices()
1311 if len(linkup_devs):
1312 print ("\nDevices with link up (can not be used with VPP):")
1313 vpp.show_vpp_devices(linkup_devs, show_header=False)
1314 # for dev in linkup_devs:
1316 kernel_devs = vpp.get_kernel_devices()
1317 if len(kernel_devs):
1318 print ("\nDevices bound to kernel drivers:")
1319 vpp.show_vpp_devices(kernel_devs, show_header=False)
1321 print ("\nNo devices bound to kernel drivers")
1323 dpdk_devs = vpp.get_dpdk_devices()
1325 print ("\nDevices bound to DPDK drivers:")
1326 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1329 print ("\nNo devices bound to DPDK drivers")
1332 interfaces = vpputl.get_hardware(node)
1333 if interfaces == {}:
1336 print ("\nDevices in use by VPP:")
1338 if len(interfaces.items()) < 2:
1342 print "{:30} {:6} {:4} {:7} {:4} {:7}". \
1343 format('Name', 'Socket', 'RXQs',
1344 'RXDescs', 'TXQs', 'TXDescs')
1345 for intf in sorted(interfaces.items()):
1348 if name == 'local0':
1350 socket = rx_qs = rx_ds = tx_qs = tx_ds = ''
1351 if 'cpu socket' in value:
1352 socket = int(value['cpu socket'])
1353 if 'rx queues' in value:
1354 rx_qs = int(value['rx queues'])
1355 if 'rx descs' in value:
1356 rx_ds = int(value['rx descs'])
1357 if 'tx queues' in value:
1358 tx_qs = int(value['tx queues'])
1359 if 'tx descs' in value:
1360 tx_ds = int(value['tx descs'])
1362 print ("{:30} {:>6} {:>4} {:>7} {:>4} {:>7}".
1363 format(name, socket, rx_qs, rx_ds, tx_qs, tx_ds))
1366 def hugepage_info(node):
1368 Show the huge page information.
1372 hpg = VppHugePageUtil(node)
1373 hpg.show_huge_pages()
1376 def min_system_resources(node):
1378 Check the system for basic minimum resources, return true if
1388 if 'layout' in node['cpu']:
1389 total_cpus = len(node['cpu']['layout'])
1391 print "\nThere is only {} CPU(s) available on this system.".format(total_cpus)
1392 print "This is not enough to run VPP."
1396 if 'free' in node['hugepages'] and \
1397 'memfree' in node['hugepages'] and \
1398 'size' in node['hugepages']:
1399 free = node['hugepages']['free']
1400 memfree = float(node['hugepages']['memfree'].split(' ')[0])
1401 hugesize = float(node['hugepages']['size'].split(' ')[0])
1403 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1404 percentmemhugepages = (memhugepages / memfree) * 100
1405 if free is '0' and \
1406 percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1407 print "\nThe System has only {} of free memory.".format(int(memfree))
1408 print "You will not be able to allocate enough Huge Pages for VPP."
1415 Print the system information
1419 for i in self._nodes.items():
1420 print "\n=============================="
1424 print "NODE: {}\n".format(name)
1431 print "\nGrub Command Line:"
1434 " Current: {}".format(
1435 node['grub']['current_cmdline'])
1437 " Configured: {}".format(
1438 node['grub']['default_cmdline'])
1441 print "\nHuge Pages:"
1442 self.hugepage_info(node)
1446 self.device_info(node)
1449 print "\nVPP Service Status:"
1450 state, errors = VPPUtil.status(node)
1451 print " {}".format(state)
1453 print " {}".format(e)
1455 # Minimum system resources
1456 self.min_system_resources(node)
1458 print "\n=============================="
1460 def _ipv4_interface_setup_questions(self, node):
1462 Ask the user some questions and get a list of interfaces
1463 and IPv4 addresses associated with those interfaces
1465 :param node: Node dictionary.
1467 :returns: A list or interfaces with ip addresses
1472 interfaces = vpputl.get_hardware(node)
1473 if interfaces == {}:
1476 interfaces_with_ip = []
1477 for intf in sorted(interfaces.items()):
1479 if name == 'local0':
1482 question = "Would you like an address to interface {} [Y/n]? ".format(name)
1483 answer = self._ask_user_yn(question, 'y')
1486 addr, plen = self._ask_user_ipv4()
1487 address['name'] = name
1488 address['addr'] = addr
1489 address['plen'] = plen
1490 interfaces_with_ip.append(address)
1492 return interfaces_with_ip
1494 def ipv4_interface_setup(self):
1496 After asking the user some questions, get a list of interfaces
1497 and IPv4 addresses associated with those interfaces
1501 for i in self._nodes.items():
1504 # Show the current interfaces with IP addresses
1505 current_ints = VPPUtil.get_int_ip(node)
1506 if current_ints is not {}:
1507 print ("\nThese are the current interfaces with IP addresses:")
1508 for items in sorted(current_ints.items()):
1511 if 'address' not in value:
1514 address = value['address']
1515 print ("{:30} {:20} {:10}".format(name, address, value['state']))
1516 question = "\nWould you like to keep this configuration [Y/n]? "
1517 answer = self._ask_user_yn(question, 'y')
1521 print ("\nThere are currently no interfaces with IP addresses.")
1523 # Create a script that add the ip addresses to the interfaces
1524 # and brings the interfaces up
1525 ints_with_addrs = self._ipv4_interface_setup_questions(node)
1527 for ints in ints_with_addrs:
1531 setipstr = 'set int ip address {} {}/{}\n'.format(name, addr, plen)
1532 setintupstr = 'set int state {} up\n'.format(name)
1533 content += setipstr + setintupstr
1535 # Write the content to the script
1536 rootdir = node['rootdir']
1537 filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1538 with open(filename, 'w+') as sfile:
1539 sfile.write(content)
1541 # Execute the script
1542 cmd = 'vppctl exec {}'.format(filename)
1543 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1545 logging.debug(stderr)
1547 print("\nA script as been created at {}".format(filename))
1548 print("This script can be run using the following:")
1549 print("vppctl exec {}\n".format(filename))