1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
21 from vpplib.VPPUtil import VPPUtil
22 from vpplib.VppPCIUtil import VppPCIUtil
23 from vpplib.VppHugePageUtil import VppHugePageUtil
24 from vpplib.CpuUtils import CpuUtils
25 from vpplib.VppGrubUtil import VppGrubUtil
26 from vpplib.QemuUtils import QemuUtils
28 __all__ = ["AutoConfig"]
32 MIN_TOTAL_HUGE_PAGES = 1024
33 MAX_PERCENT_FOR_HUGE_PAGES = 70
36 class AutoConfig(object):
37 """Auto Configuration Tools"""
39 def __init__(self, rootdir, filename):
41 The Auto Configure class.
43 :param rootdir: The root directory for all the auto configuration files
44 :param filename: The autoconfiguration file
48 self._autoconfig_filename = rootdir + filename
49 self._rootdir = rootdir
52 self._vpp_devices_node = {}
53 self._hugepage_config = ""
58 Returns the nodes dictionary.
67 def _autoconfig_backup_file(filename):
71 :param filename: The file to backup
75 # Does a copy of the file exist, if not create one
76 ofile = filename + '.orig'
77 (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
80 if stdout.strip('\n') != ofile:
81 cmd = 'sudo cp {} {}'.format(filename, ofile)
82 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
87 def _ask_user_range(question, first, last, default):
89 Asks the user for a number within a range.
90 default is returned if return is entered.
92 :param question: Text of a question.
93 :param first: First number in the range
94 :param last: Last number in the range
95 :param default: The value returned when return is entered
96 :type question: string
100 :returns: The answer to the question
105 answer = raw_input(question)
109 if re.findall(r'[0-9+]', answer):
110 if int(answer) in range(first, last + 1):
113 print "Please a value between {} and {} or Return.". \
116 print "Please a number between {} and {} or Return.". \
122 def _ask_user_yn(question, default):
124 Asks the user for a yes or no question.
126 :param question: Text of a question.
127 :param default: The value returned when return is entered
128 :type question: string
129 :type default: string
130 :returns: The answer to the question
135 default = default.lower()
137 while not input_valid:
138 answer = raw_input(question)
141 if re.findall(r'[YyNn]', answer):
143 answer = answer[0].lower()
145 print "Please answer Y, N or Return."
149 def _loadconfig(self):
151 Load the testbed configuration, given the auto configuration file.
155 # Get the Topology, from the topology layout file
157 with open(self._autoconfig_filename, 'r') as stream:
159 topo = yaml.load(stream)
160 if 'metadata' in topo:
161 self._metadata = topo['metadata']
162 except yaml.YAMLError as exc:
163 raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc))
165 systemfile = self._rootdir + self._metadata['system_config_file']
166 if os.path.isfile(systemfile):
167 with open(systemfile, 'r') as sysstream:
169 systopo = yaml.load(sysstream)
170 if 'nodes' in systopo:
171 self._nodes = systopo['nodes']
172 except yaml.YAMLError as sysexc:
173 raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc))
175 # Get the nodes from Auto Config
177 self._nodes = topo['nodes']
179 # Set the root directory in all the nodes
180 for i in self._nodes.items():
182 node['rootdir'] = self._rootdir
184 def updateconfig(self):
186 Update the testbed configuration, given the auto configuration file.
187 We will write the system configuration file with the current node
192 # Initialize the yaml data
193 ydata = {'metadata': self._metadata, 'nodes': self._nodes}
195 # Write the system config file
196 filename = self._rootdir + self._metadata['system_config_file']
197 with open(filename, 'w') as yamlfile:
198 yaml.dump(ydata, yamlfile, default_flow_style=False)
200 def _update_auto_config(self):
202 Write the auto configuration file with the new configuration data,
207 # Initialize the yaml data
209 with open(self._autoconfig_filename, 'r') as stream:
211 ydata = yaml.load(stream)
213 nodes = ydata['nodes']
214 except yaml.YAMLError as exc:
218 for i in nodes.items():
223 node['interfaces'] = {}
224 for item in self._nodes[key]['interfaces'].items():
228 node['interfaces'][port] = {}
229 node['interfaces'][port]['pci_address'] = \
230 interface['pci_address']
231 if 'mac_address' in interface:
232 node['interfaces'][port]['mac_address'] = \
233 interface['mac_address']
235 if 'total_other_cpus' in self._nodes[key]['cpu']:
236 node['cpu']['total_other_cpus'] = \
237 self._nodes[key]['cpu']['total_other_cpus']
238 if 'total_vpp_cpus' in self._nodes[key]['cpu']:
239 node['cpu']['total_vpp_cpus'] = \
240 self._nodes[key]['cpu']['total_vpp_cpus']
241 if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
242 node['cpu']['reserve_vpp_main_core'] = \
243 self._nodes[key]['cpu']['reserve_vpp_main_core']
246 if 'active_open_sessions' in self._nodes[key]['tcp']:
247 node['tcp']['active_open_sessions'] = \
248 self._nodes[key]['tcp']['active_open_sessions']
249 if 'passive_open_sessions' in self._nodes[key]['tcp']:
250 node['tcp']['passive_open_sessions'] = \
251 self._nodes[key]['tcp']['passive_open_sessions']
254 node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
256 # Write the auto config config file
257 with open(self._autoconfig_filename, 'w') as yamlfile:
258 yaml.dump(ydata, yamlfile, default_flow_style=False)
260 def apply_huge_pages(self):
262 Apply the huge page config
266 for i in self._nodes.items():
269 hpg = VppHugePageUtil(node)
270 hpg.hugepages_dryrun_apply()
273 def _apply_vpp_unix(node):
275 Apply the VPP Unix config
277 :param node: Node dictionary with cpuinfo.
282 if 'unix' not in node['vpp']:
285 unixv = node['vpp']['unix']
286 if 'interactive' in unixv:
287 interactive = unixv['interactive']
288 if interactive is True:
289 unix = ' interactive\n'
291 return unix.rstrip('\n')
294 def _apply_vpp_cpu(node):
296 Apply the VPP cpu config
298 :param node: Node dictionary with cpuinfo.
304 vpp_main_core = node['cpu']['vpp_main_core']
305 if vpp_main_core is not 0:
306 cpu += ' main-core {}\n'.format(vpp_main_core)
309 vpp_workers = node['cpu']['vpp_workers']
310 vpp_worker_len = len(vpp_workers)
311 if vpp_worker_len > 0:
313 for i, worker in enumerate(vpp_workers):
315 vpp_worker_str += ','
316 if worker[0] == worker[1]:
317 vpp_worker_str += "{}".format(worker[0])
319 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
321 cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
326 def _apply_vpp_devices(node):
328 Apply VPP PCI Device configuration to vpp startup.
330 :param node: Node dictionary with cpuinfo.
335 ports_per_numa = node['cpu']['ports_per_numa']
336 total_mbufs = node['cpu']['total_mbufs']
338 for item in ports_per_numa.items():
340 interfaces = value['interfaces']
342 # if 0 was specified for the number of vpp workers, use 1 queue
345 if 'rx_queues' in value:
346 num_rx_queues = value['rx_queues']
347 if 'tx_queues' in value:
348 num_tx_queues = value['tx_queues']
353 # Create the devices string
354 for interface in interfaces:
355 pci_address = interface['pci_address']
356 pci_address = pci_address.lstrip("'").rstrip("'")
358 devices += ' dev {} {{ \n'.format(pci_address)
360 devices += ' num-rx-queues {}\n'.format(num_rx_queues)
362 devices += ' num-rx-queues {}\n'.format(1)
364 devices += ' num-tx-queues {}\n'.format(num_tx_queues)
366 devices += ' num-rx-desc {}\n'.format(num_rx_desc)
368 devices += ' num-tx-desc {}\n'.format(num_tx_desc)
371 if total_mbufs is not 0:
372 devices += '\n num-mbufs {}'.format(total_mbufs)
377 def _calc_vpp_workers(node, vpp_workers, numa_node,
378 other_cpus_end, total_vpp_workers,
379 reserve_vpp_main_core):
381 Calculate the VPP worker information
383 :param node: Node dictionary
384 :param vpp_workers: List of VPP workers
385 :param numa_node: Numa node
386 :param other_cpus_end: The end of the cpus allocated for cores
388 :param total_vpp_workers: The number of vpp workers needed
389 :param reserve_vpp_main_core: Is there a core needed for
393 :type other_cpus_end: int
394 :type total_vpp_workers: int
395 :type reserve_vpp_main_core: bool
396 :returns: Is a core still needed for the vpp main core
400 # Can we fit the workers in one of these slices
401 cpus = node['cpu']['cpus_per_node'][numa_node]
405 if start <= other_cpus_end:
406 start = other_cpus_end + 1
408 if reserve_vpp_main_core:
411 workers_end = start + total_vpp_workers - 1
412 if workers_end <= end:
413 if reserve_vpp_main_core:
414 node['cpu']['vpp_main_core'] = start - 1
415 reserve_vpp_main_core = False
416 if total_vpp_workers:
417 vpp_workers.append((start, workers_end))
420 # We still need to reserve the main core
421 if reserve_vpp_main_core:
422 node['cpu']['vpp_main_core'] = other_cpus_end + 1
424 return reserve_vpp_main_core
427 def _calc_desc_and_queues(total_numa_nodes,
428 total_ports_per_numa,
430 ports_per_numa_value):
432 Calculate the number of descriptors and queues
434 :param total_numa_nodes: The total number of numa nodes
435 :param total_ports_per_numa: The total number of ports for this
437 :param total_vpp_cpus: The total number of cpus to allocate for vpp
438 :param ports_per_numa_value: The value from the ports_per_numa
440 :type total_numa_nodes: int
441 :type total_ports_per_numa: int
442 :type total_vpp_cpus: int
443 :type ports_per_numa_value: dict
444 :returns The total number of message buffers
445 :returns: The total number of vpp workers
450 # Get the total vpp workers
451 total_vpp_workers = total_vpp_cpus
452 ports_per_numa_value['total_vpp_workers'] = total_vpp_workers
454 # Get the number of rx queues
455 rx_queues = max(1, total_vpp_workers)
456 tx_queues = total_vpp_workers * total_numa_nodes + 1
458 # Get the descriptor entries
460 ports_per_numa_value['rx_queues'] = rx_queues
461 total_mbufs = (((rx_queues * desc_entries) +
462 (tx_queues * desc_entries)) *
463 total_ports_per_numa)
464 total_mbufs = total_mbufs
466 return total_mbufs, total_vpp_workers
469 def _create_ports_per_numa(node, interfaces):
471 Create a dictionary or ports per numa node
472 :param node: Node dictionary
473 :param interfaces: All the interfaces to be used by vpp
475 :type interfaces: dict
476 :returns: The ports per numa dictionary
480 # Make a list of ports by numa node
482 for item in interfaces.items():
484 if i['numa_node'] not in ports_per_numa:
485 ports_per_numa[i['numa_node']] = {'interfaces': []}
486 ports_per_numa[i['numa_node']]['interfaces'].append(i)
488 ports_per_numa[i['numa_node']]['interfaces'].append(i)
489 node['cpu']['ports_per_numa'] = ports_per_numa
491 return ports_per_numa
493 def calculate_cpu_parameters(self):
495 Calculate the cpu configuration.
499 # Calculate the cpu parameters, needed for the
500 # vpp_startup and grub configuration
501 for i in self._nodes.items():
504 # get total number of nic ports
505 interfaces = node['interfaces']
507 # Make a list of ports by numa node
508 ports_per_numa = self._create_ports_per_numa(node, interfaces)
510 # Get the number of cpus to skip, we never use the first cpu
512 other_cpus_end = other_cpus_start + \
513 node['cpu']['total_other_cpus'] - 1
515 if other_cpus_end is not 0:
516 other_workers = (other_cpus_start, other_cpus_end)
517 node['cpu']['other_workers'] = other_workers
519 # Allocate the VPP main core and workers
521 reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
522 total_vpp_cpus = node['cpu']['total_vpp_cpus']
524 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
525 # then we shouldn't get workers
526 total_with_main = total_vpp_cpus
527 if reserve_vpp_main_core:
530 if total_with_main is not 0:
531 for item in ports_per_numa.items():
535 # Get the number of descriptors and queues
536 mbufs, total_vpp_workers = self._calc_desc_and_queues(
538 len(value['interfaces']), total_vpp_cpus, value)
541 # Get the VPP workers
542 reserve_vpp_main_core = self._calc_vpp_workers(
543 node, vpp_workers, numa_node, other_cpus_end,
544 total_vpp_workers, reserve_vpp_main_core)
547 total_mbufs = int(total_mbufs)
552 node['cpu']['vpp_workers'] = vpp_workers
553 node['cpu']['total_mbufs'] = total_mbufs
559 def _apply_vpp_tcp(node):
561 Apply the VPP Unix config
563 :param node: Node dictionary with cpuinfo.
567 active_open_sessions = node['tcp']['active_open_sessions']
568 aos = int(active_open_sessions)
570 passive_open_sessions = node['tcp']['passive_open_sessions']
571 pos = int(passive_open_sessions)
573 # Generate the api-segment gid vpp sheit in any case
575 tcp = "api-segment {\n"
576 tcp = tcp + " gid vpp\n"
578 return tcp.rstrip('\n')
580 tcp = "# TCP stack-related configuration parameters\n"
581 tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos)
582 tcp = tcp + "heapsize 4g\n\n"
583 tcp = tcp + "api-segment {\n"
584 tcp = tcp + " global-size 2000M\n"
585 tcp = tcp + " api-size 1G\n"
588 tcp = tcp + "session {\n"
589 tcp = tcp + " event-queue-length " + "{:d}".format(aos + pos) + "\n"
590 tcp = tcp + " preallocated-sessions " + "{:d}".format(aos + pos) + "\n"
591 tcp = tcp + " v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
592 tcp = tcp + " v4-session-table-memory 3g\n"
594 tcp = tcp + " v4-halfopen-table-buckets " + \
595 "{:d}".format((aos + pos) / 4) + "\n"
596 tcp = tcp + " v4-halfopen-table-memory 3g\n"
599 tcp = tcp + "tcp {\n"
600 tcp = tcp + " preallocated-connections " + "{:d}".format(aos + pos) + "\n"
602 tcp = tcp + " preallocated-half-open-connections " + "{:d}".format(aos) + "\n"
603 tcp = tcp + " local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
604 tcp = tcp + " local-endpoints-table-memory 3g\n"
607 return tcp.rstrip('\n')
609 def apply_vpp_startup(self):
611 Apply the vpp startup configration
615 # Apply the VPP startup configruation
616 for i in self._nodes.items():
619 # Get the startup file
620 rootdir = node['rootdir']
621 sfile = rootdir + node['vpp']['startup_config_file']
624 devices = self._apply_vpp_devices(node)
627 cpu = self._apply_vpp_cpu(node)
629 # Get the unix config
630 unix = self._apply_vpp_unix(node)
632 # Get the TCP configuration, if any
633 tcp = self._apply_vpp_tcp(node)
635 # Make a backup if needed
636 self._autoconfig_backup_file(sfile)
639 tfile = sfile + '.template'
640 (ret, stdout, stderr) = \
641 VPPUtil.exec_command('cat {}'.format(tfile))
643 raise RuntimeError('Executing cat command failed to node {}'.
644 format(node['host']))
645 startup = stdout.format(unix=unix,
650 (ret, stdout, stderr) = \
651 VPPUtil.exec_command('rm {}'.format(sfile))
653 logging.debug(stderr)
655 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
656 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
658 raise RuntimeError('Writing config failed node {}'.
659 format(node['host']))
661 def apply_grub_cmdline(self):
663 Apply the grub cmdline
667 for i in self._nodes.items():
670 # Get the isolated CPUs
671 other_workers = node['cpu']['other_workers']
672 vpp_workers = node['cpu']['vpp_workers']
673 vpp_main_core = node['cpu']['vpp_main_core']
675 if other_workers is not None:
676 all_workers = [other_workers]
677 if vpp_main_core is not 0:
678 all_workers += [(vpp_main_core, vpp_main_core)]
679 all_workers += vpp_workers
681 for idx, worker in enumerate(all_workers):
686 if worker[0] == worker[1]:
687 isolated_cpus += "{}".format(worker[0])
689 isolated_cpus += "{}-{}".format(worker[0], worker[1])
691 vppgrb = VppGrubUtil(node)
692 current_cmdline = vppgrb.get_current_cmdline()
693 if 'grub' not in node:
695 node['grub']['current_cmdline'] = current_cmdline
696 node['grub']['default_cmdline'] = \
697 vppgrb.apply_cmdline(node, isolated_cpus)
701 def get_hugepages(self):
703 Get the hugepage configuration
707 for i in self._nodes.items():
710 hpg = VppHugePageUtil(node)
711 max_map_count, shmmax = hpg.get_huge_page_config()
712 node['hugepages']['max_map_count'] = max_map_count
713 node['hugepages']['shmax'] = shmmax
714 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
715 node['hugepages']['actual_total'] = total
716 node['hugepages']['free'] = free
717 node['hugepages']['size'] = size
718 node['hugepages']['memtotal'] = memtotal
719 node['hugepages']['memfree'] = memfree
725 Get the grub configuration
729 for i in self._nodes.items():
732 vppgrb = VppGrubUtil(node)
733 current_cmdline = vppgrb.get_current_cmdline()
734 default_cmdline = vppgrb.get_default_cmdline()
736 # Get the total number of isolated CPUs
738 iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
739 iso_cpurl = len(iso_cpur)
741 iso_cpu_str = iso_cpur[0]
742 iso_cpu_str = iso_cpu_str.split('=')[1]
743 iso_cpul = iso_cpu_str.split(',')
744 for iso_cpu in iso_cpul:
745 isocpuspl = iso_cpu.split('-')
746 if len(isocpuspl) is 1:
747 current_iso_cpus += 1
749 first = int(isocpuspl[0])
750 second = int(isocpuspl[1])
752 current_iso_cpus += 1
754 current_iso_cpus += second - first
756 if 'grub' not in node:
758 node['grub']['current_cmdline'] = current_cmdline
759 node['grub']['default_cmdline'] = default_cmdline
760 node['grub']['current_iso_cpus'] = current_iso_cpus
765 def _get_device(node):
767 Get the device configuration for a single node
769 :param node: Node dictionary with cpuinfo.
774 vpp = VppPCIUtil(node)
775 vpp.get_all_devices()
777 # Save the device information
779 node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
780 node['devices']['kernel_devices'] = vpp.get_kernel_devices()
781 node['devices']['other_devices'] = vpp.get_other_devices()
782 node['devices']['linkup_devices'] = vpp.get_link_up_devices()
784 def get_devices_per_node(self):
786 Get the device configuration for all the nodes
790 for i in self._nodes.items():
792 # Update the interface data
794 self._get_device(node)
799 def get_cpu_layout(node):
803 using lscpu -p get the cpu layout.
804 Returns a list with each item representing a single cpu.
806 :param node: Node dictionary.
808 :returns: The cpu layout
813 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
815 raise RuntimeError('{} failed on node {} {}'.
816 format(cmd, node['host'], stderr))
819 lines = stdout.split('\n')
821 if line == '' or line[0] == '#':
823 linesplit = line.split(',')
824 layout = {'cpu': linesplit[0], 'core': linesplit[1],
825 'socket': linesplit[2], 'node': linesplit[3]}
827 # cpu, core, socket, node
834 Get the cpu configuration
839 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
841 for i in self._nodes.items():
845 layout = self.get_cpu_layout(node)
846 node['cpu']['layout'] = layout
848 cpuinfo = node['cpuinfo']
849 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
850 node['cpu']['smt_enabled'] = smt_enabled
852 # We don't want to write the cpuinfo
860 Get the current system configuration.
864 # Get the Huge Page configuration
867 # Get the device configuration
868 self.get_devices_per_node()
870 # Get the CPU configuration
873 # Get the current grub cmdline
876 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
878 Ask the user questions related to the cpu configuration.
880 :param node: Node dictionary
881 :param total_cpus: The total number of cpus in the system
882 :param numa_nodes: The list of numa nodes in the system
884 :type total_cpus: int
885 :type numa_nodes: list
888 print "\nYour system has {} core(s) and {} Numa Nodes.". \
889 format(total_cpus, len(numa_nodes))
890 print "To begin, we suggest not reserving any cores for VPP",
891 print "or other processes."
892 print "Then to improve performance try reserving cores as needed. "
894 max_other_cores = total_cpus / 2
895 question = '\nHow many core(s) do you want to reserve for processes \
896 other than VPP? [0-{}][0]? '.format(str(max_other_cores))
897 total_other_cpus = self._ask_user_range(question, 0, max_other_cores,
899 node['cpu']['total_other_cpus'] = total_other_cpus
904 question = "How many core(s) shall we reserve for VPP workers[0-{}][0]? ". \
906 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
907 node['cpu']['total_vpp_cpus'] = total_vpp_cpus
909 max_main_cpus = max_vpp_cpus - total_vpp_cpus
910 reserve_vpp_main_core = False
911 if max_main_cpus > 0:
912 question = "Should we reserve 1 core for the VPP Main thread? "
913 question += "[y/N]? "
914 answer = self._ask_user_yn(question, 'n')
916 reserve_vpp_main_core = True
917 node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
918 node['cpu']['vpp_main_core'] = 0
920 def modify_cpu(self):
922 Modify the cpu configuration, asking for the user for the values.
927 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
929 for i in self._nodes.items():
932 total_cpus_per_slice = 0
936 cpu_layout = self.get_cpu_layout(node)
938 # Assume the number of cpus per slice is always the same as the
941 for cpu in cpu_layout:
942 if cpu['node'] != first_node:
944 total_cpus_per_slice += 1
946 # Get the total number of cpus, cores, and numa nodes from the
948 for cpul in cpu_layout:
949 numa_node = cpul['node']
954 if numa_node not in cpus_per_node:
955 cpus_per_node[numa_node] = []
956 cpuperslice = int(cpu) % total_cpus_per_slice
958 cpus_per_node[numa_node].append((int(cpu), int(cpu) +
959 total_cpus_per_slice - 1))
960 if numa_node not in numa_nodes:
961 numa_nodes.append(numa_node)
962 if core not in cores:
964 node['cpu']['cpus_per_node'] = cpus_per_node
966 # Ask the user some questions
967 self._modify_cpu_questions(node, total_cpus, numa_nodes)
969 # Populate the interfaces with the numa node
970 ikeys = node['interfaces'].keys()
971 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
973 # We don't want to write the cpuinfo
977 self._update_auto_config()
980 def _modify_other_devices(self, node,
981 other_devices, kernel_devices, dpdk_devices):
983 Modify the devices configuration, asking for the user for the values.
987 odevices_len = len(other_devices)
989 print "\nThese device(s) are currently NOT being used",
990 print "by VPP or the OS.\n"
991 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
992 question = "\nWould you like to give any of these devices"
993 question += " back to the OS [Y/n]? "
994 answer = self._ask_user_yn(question, 'Y')
997 for dit in other_devices.items():
1000 question = "Would you like to use device {} for". \
1002 question += " the OS [y/N]? "
1003 answer = self._ask_user_yn(question, 'n')
1005 driver = device['unused'][0]
1006 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1008 for dit in vppd.items():
1011 kernel_devices[dvid] = device
1012 del other_devices[dvid]
1014 odevices_len = len(other_devices)
1015 if odevices_len > 0:
1016 print "\nThese device(s) are still NOT being used ",
1017 print "by VPP or the OS.\n"
1018 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1019 question = "\nWould you like use any of these for VPP [y/N]? "
1020 answer = self._ask_user_yn(question, 'N')
1023 for dit in other_devices.items():
1026 question = "Would you like to use device {} ".format(dvid)
1027 question += "for VPP [y/N]? "
1028 answer = self._ask_user_yn(question, 'n')
1031 for dit in vppd.items():
1034 dpdk_devices[dvid] = device
1035 del other_devices[dvid]
1037 def modify_devices(self):
1039 Modify the devices configuration, asking for the user for the values.
1043 for i in self._nodes.items():
1045 devices = node['devices']
1046 other_devices = devices['other_devices']
1047 kernel_devices = devices['kernel_devices']
1048 dpdk_devices = devices['dpdk_devices']
1051 self._modify_other_devices(node, other_devices,
1052 kernel_devices, dpdk_devices)
1054 # Get the devices again for this node
1055 self._get_device(node)
1056 devices = node['devices']
1057 kernel_devices = devices['kernel_devices']
1058 dpdk_devices = devices['dpdk_devices']
1060 klen = len(kernel_devices)
1062 print "\nThese devices have kernel interfaces, but",
1063 print "appear to be safe to use with VPP.\n"
1064 VppPCIUtil.show_vpp_devices(kernel_devices)
1065 question = "\nWould you like to use any of these "
1066 question += "device(s) for VPP [y/N]? "
1067 answer = self._ask_user_yn(question, 'n')
1070 for dit in kernel_devices.items():
1073 question = "Would you like to use device {} ". \
1075 question += "for VPP [y/N]? "
1076 answer = self._ask_user_yn(question, 'n')
1079 for dit in vppd.items():
1082 dpdk_devices[dvid] = device
1083 del kernel_devices[dvid]
1085 dlen = len(dpdk_devices)
1087 print "\nThese device(s) will be used by VPP.\n"
1088 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1089 question = "\nWould you like to remove any of "
1090 question += "these device(s) [y/N]? "
1091 answer = self._ask_user_yn(question, 'n')
1094 for dit in dpdk_devices.items():
1097 question = "Would you like to remove {} [y/N]? ". \
1099 answer = self._ask_user_yn(question, 'n')
1102 for dit in vppd.items():
1105 driver = device['unused'][0]
1106 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1107 kernel_devices[dvid] = device
1108 del dpdk_devices[dvid]
1111 for dit in dpdk_devices.items():
1114 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1115 node['interfaces'] = interfaces
1117 print "\nThese device(s) will be used by VPP, please",
1118 print "rerun this option if this is incorrect.\n"
1119 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1121 self._update_auto_config()
1124 def modify_huge_pages(self):
1126 Modify the huge page configuration, asking for the user for the values.
1130 for i in self._nodes.items():
1133 total = node['hugepages']['actual_total']
1134 free = node['hugepages']['free']
1135 size = node['hugepages']['size']
1136 memfree = node['hugepages']['memfree'].split(' ')[0]
1137 hugesize = int(size.split(' ')[0])
1138 # The max number of huge pages should be no more than
1139 # 70% of total free memory
1140 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize
1141 print "\nThere currently {} {} huge pages free.". \
1143 question = "Do you want to reconfigure the number of "
1144 question += "huge pages [y/N]? "
1145 answer = self._ask_user_yn(question, 'n')
1147 node['hugepages']['total'] = total
1150 print "\nThere currently a total of {} huge pages.". \
1153 "How many huge pages do you want [{} - {}][{}]? ".\
1154 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1155 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1156 node['hugepages']['total'] = str(answer)
1158 # Update auto-config.yaml
1159 self._update_auto_config()
1161 # Rediscover just the hugepages
1162 self.get_hugepages()
1164 def get_tcp_params(self):
1166 Get the tcp configuration
1169 # maybe nothing to do here?
1172 def acquire_tcp_params(self):
1174 Ask the user for TCP stack configuration parameters
1178 for i in self._nodes.items():
1181 question = "\nHow many active-open / tcp client sessions are expected "
1182 question = question + "[0-10000000][0]? "
1183 answer = self._ask_user_range(question, 0, 10000000, 0)
1184 # Less than 10K is equivalent to 0
1185 if int(answer) < 10000:
1187 node['tcp']['active_open_sessions'] = answer
1189 question = "How many passive-open / tcp server sessions are expected "
1190 question = question + "[0-10000000][0]? "
1191 answer = self._ask_user_range(question, 0, 10000000, 0)
1192 # Less than 10K is equivalent to 0
1193 if int(answer) < 10000:
1195 node['tcp']['passive_open_sessions'] = answer
1197 # Update auto-config.yaml
1198 self._update_auto_config()
1200 # Rediscover tcp parameters
1201 self.get_tcp_params()
1204 def patch_qemu(node):
1206 Patch qemu with the correct patches.
1208 :param node: Node dictionary
1212 print '\nWe are patching the node "{}":\n'.format(node['host'])
1213 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1218 print the CPU information
1222 cpu = CpuUtils.get_cpu_info_per_node(node)
1226 print "{:>20}: {}".format(item, cpu[item])
1229 print "{:>20}: {}".format(item, cpu[item])
1230 item = 'Thread(s) per core'
1232 print "{:>20}: {}".format(item, cpu[item])
1233 item = 'Core(s) per socket'
1235 print "{:>20}: {}".format(item, cpu[item])
1238 print "{:>20}: {}".format(item, cpu[item])
1239 item = 'NUMA node(s)'
1242 numa_nodes = int(cpu[item])
1243 for i in xrange(0, numa_nodes):
1244 item = "NUMA node{} CPU(s)".format(i)
1245 print "{:>20}: {}".format(item, cpu[item])
1246 item = 'CPU max MHz'
1248 print "{:>20}: {}".format(item, cpu[item])
1249 item = 'CPU min MHz'
1251 print "{:>20}: {}".format(item, cpu[item])
1253 if node['cpu']['smt_enabled']:
1257 print "{:>20}: {}".format('SMT', smt)
1260 print "\nVPP Threads: (Name: Cpu Number)"
1261 vpp_processes = cpu['vpp_processes']
1262 for i in vpp_processes.items():
1263 print " {:10}: {:4}".format(i[0], i[1])
1266 def device_info(node):
1268 Show the device information.
1272 if 'cpu' in node and 'total_mbufs' in node['cpu']:
1273 total_mbufs = node['cpu']['total_mbufs']
1274 if total_mbufs is not 0:
1275 print "Total Number of Buffers: {}".format(total_mbufs)
1277 vpp = VppPCIUtil(node)
1278 vpp.get_all_devices()
1279 linkup_devs = vpp.get_link_up_devices()
1280 if len(linkup_devs):
1281 print ("\nDevices with link up (can not be used with VPP):")
1282 vpp.show_vpp_devices(linkup_devs, show_header=False)
1283 # for dev in linkup_devs:
1285 kernel_devs = vpp.get_kernel_devices()
1286 if len(kernel_devs):
1287 print ("\nDevices bound to kernel drivers:")
1288 vpp.show_vpp_devices(kernel_devs, show_header=False)
1290 print ("\nNo devices bound to kernel drivers")
1292 dpdk_devs = vpp.get_dpdk_devices()
1294 print ("\nDevices bound to DPDK drivers:")
1295 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1298 print ("\nNo devices bound to DPDK drivers")
1301 interfaces = vpputl.get_hardware(node)
1302 if interfaces == {}:
1305 print ("\nDevices in use by VPP:")
1307 if len(interfaces.items()) < 2:
1311 print "{:30} {:6} {:4} {:7} {:4} {:7}". \
1312 format('Name', 'Socket', 'RXQs',
1313 'RXDescs', 'TXQs', 'TXDescs')
1314 for intf in sorted(interfaces.items()):
1317 if name == 'local0':
1319 socket = rx_qs = rx_ds = tx_qs = tx_ds = ''
1320 if 'cpu socket' in value:
1321 socket = int(value['cpu socket'])
1322 if 'rx queues' in value:
1323 rx_qs = int(value['rx queues'])
1324 if 'rx descs' in value:
1325 rx_ds = int(value['rx descs'])
1326 if 'tx queues' in value:
1327 tx_qs = int(value['tx queues'])
1328 if 'tx descs' in value:
1329 tx_ds = int(value['tx descs'])
1331 print ("{:30} {:>6} {:>4} {:>7} {:>4} {:>7}".
1332 format(name, socket, rx_qs, rx_ds, tx_qs, tx_ds))
1335 def hugepage_info(node):
1337 Show the huge page information.
1341 hpg = VppHugePageUtil(node)
1342 hpg.show_huge_pages()
1345 def min_system_resources(node):
1347 Check the system for basic minimum resources, return true if
1357 if 'layout' in node['cpu']:
1358 total_cpus = len(node['cpu']['layout'])
1360 print "\nThere is only {} CPU(s) available on this system.".format(total_cpus)
1361 print "This is not enough to run VPP."
1365 if 'free' in node['hugepages'] and \
1366 'memfree' in node['hugepages'] and \
1367 'size' in node['hugepages']:
1368 free = node['hugepages']['free']
1369 memfree = float(node['hugepages']['memfree'].split(' ')[0])
1370 hugesize = float(node['hugepages']['size'].split(' ')[0])
1372 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1373 percentmemhugepages = (memhugepages / memfree) * 100
1374 if free is '0' and \
1375 percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1376 print "\nThe System has only {} of free memory.".format(int(memfree))
1377 print "You will not be able to allocate enough Huge Pages for VPP."
1384 Print the system information
1388 for i in self._nodes.items():
1389 print "\n=============================="
1393 print "NODE: {}\n".format(name)
1400 print "\nGrub Command Line:"
1403 " Current: {}".format(
1404 node['grub']['current_cmdline'])
1406 " Configured: {}".format(
1407 node['grub']['default_cmdline'])
1410 print "\nHuge Pages:"
1411 self.hugepage_info(node)
1415 self.device_info(node)
1418 print "\nVPP Service Status:"
1419 state, errors = VPPUtil.status(node)
1420 print " {}".format(state)
1422 print " {}".format(e)
1424 # Minimum system resources
1425 self.min_system_resources(node)
1427 print "\n=============================="