1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
20 from netaddr import IPAddress
22 from vpplib.VPPUtil import VPPUtil
23 from vpplib.VppPCIUtil import VppPCIUtil
24 from vpplib.VppHugePageUtil import VppHugePageUtil
25 from vpplib.CpuUtils import CpuUtils
26 from vpplib.VppGrubUtil import VppGrubUtil
27 from vpplib.QemuUtils import QemuUtils
29 __all__ = ["AutoConfig"]
33 MIN_TOTAL_HUGE_PAGES = 1024
34 MAX_PERCENT_FOR_HUGE_PAGES = 70
37 class AutoConfig(object):
38 """Auto Configuration Tools"""
40 def __init__(self, rootdir, filename, clean=False):
42 The Auto Configure class.
44 :param rootdir: The root directory for all the auto configuration files
45 :param filename: The autoconfiguration file
46 :param clean: When set initialize the nodes from the auto-config file
51 self._autoconfig_filename = rootdir + filename
52 self._rootdir = rootdir
55 self._vpp_devices_node = {}
56 self._hugepage_config = ""
62 Returns the nodes dictionary.
71 def _autoconfig_backup_file(filename):
75 :param filename: The file to backup
79 # Does a copy of the file exist, if not create one
80 ofile = filename + '.orig'
81 (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
84 if stdout.strip('\n') != ofile:
85 cmd = 'sudo cp {} {}'.format(filename, ofile)
86 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
90 # noinspection PyBroadException
94 Asks the user for a number within a range.
95 default is returned if return is entered.
97 :returns: IP address with cidr
102 answer = raw_input("Please enter the IPv4 Address [n.n.n.n/n]: ")
104 ipinput = answer.split('/')
105 ipaddr = IPAddress(ipinput[0])
107 plen = answer.split('/')[1]
109 answer = raw_input("Please enter the netmask [n.n.n.n]: ")
110 plen = IPAddress(answer).netmask_bits()
111 return '{}/{}'.format(ipaddr, plen)
113 print "Please enter a valid IPv4 address."
116 def _ask_user_range(question, first, last, default):
118 Asks the user for a number within a range.
119 default is returned if return is entered.
121 :param question: Text of a question.
122 :param first: First number in the range
123 :param last: Last number in the range
124 :param default: The value returned when return is entered
125 :type question: string
129 :returns: The answer to the question
134 answer = raw_input(question)
138 if re.findall(r'[0-9+]', answer):
139 if int(answer) in range(first, last + 1):
142 print "Please a value between {} and {} or Return.". \
145 print "Please a number between {} and {} or Return.". \
151 def _ask_user_yn(question, default):
153 Asks the user for a yes or no question.
155 :param question: Text of a question.
156 :param default: The value returned when return is entered
157 :type question: string
158 :type default: string
159 :returns: The answer to the question
164 default = default.lower()
166 while not input_valid:
167 answer = raw_input(question)
170 if re.findall(r'[YyNn]', answer):
172 answer = answer[0].lower()
174 print "Please answer Y, N or Return."
178 def _loadconfig(self):
180 Load the testbed configuration, given the auto configuration file.
184 # Get the Topology, from the topology layout file
186 with open(self._autoconfig_filename, 'r') as stream:
188 topo = yaml.load(stream)
189 if 'metadata' in topo:
190 self._metadata = topo['metadata']
191 except yaml.YAMLError as exc:
192 raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc))
194 systemfile = self._rootdir + self._metadata['system_config_file']
195 if self._clean is False and os.path.isfile(systemfile):
196 with open(systemfile, 'r') as sysstream:
198 systopo = yaml.load(sysstream)
199 if 'nodes' in systopo:
200 self._nodes = systopo['nodes']
201 except yaml.YAMLError as sysexc:
202 raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc))
204 # Get the nodes from Auto Config
206 self._nodes = topo['nodes']
208 # Set the root directory in all the nodes
209 for i in self._nodes.items():
211 node['rootdir'] = self._rootdir
213 def updateconfig(self):
215 Update the testbed configuration, given the auto configuration file.
216 We will write the system configuration file with the current node
221 # Initialize the yaml data
222 ydata = {'metadata': self._metadata, 'nodes': self._nodes}
224 # Write the system config file
225 filename = self._rootdir + self._metadata['system_config_file']
226 with open(filename, 'w') as yamlfile:
227 yaml.dump(ydata, yamlfile)
229 def _update_auto_config(self):
231 Write the auto configuration file with the new configuration data,
236 # Initialize the yaml data
238 with open(self._autoconfig_filename, 'r') as stream:
240 ydata = yaml.load(stream)
242 nodes = ydata['nodes']
243 except yaml.YAMLError as exc:
247 for i in nodes.items():
252 node['interfaces'] = {}
253 for item in self._nodes[key]['interfaces'].items():
257 node['interfaces'][port] = {}
258 addr = '{}'.format(interface['pci_address'])
259 node['interfaces'][port]['pci_address'] = addr
260 if 'mac_address' in interface:
261 node['interfaces'][port]['mac_address'] = \
262 interface['mac_address']
264 if 'total_other_cpus' in self._nodes[key]['cpu']:
265 node['cpu']['total_other_cpus'] = \
266 self._nodes[key]['cpu']['total_other_cpus']
267 if 'total_vpp_cpus' in self._nodes[key]['cpu']:
268 node['cpu']['total_vpp_cpus'] = \
269 self._nodes[key]['cpu']['total_vpp_cpus']
270 if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
271 node['cpu']['reserve_vpp_main_core'] = \
272 self._nodes[key]['cpu']['reserve_vpp_main_core']
275 if 'active_open_sessions' in self._nodes[key]['tcp']:
276 node['tcp']['active_open_sessions'] = \
277 self._nodes[key]['tcp']['active_open_sessions']
278 if 'passive_open_sessions' in self._nodes[key]['tcp']:
279 node['tcp']['passive_open_sessions'] = \
280 self._nodes[key]['tcp']['passive_open_sessions']
283 node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
285 # Write the auto config config file
286 with open(self._autoconfig_filename, 'w') as yamlfile:
287 yaml.dump(ydata, yamlfile)
289 def apply_huge_pages(self):
291 Apply the huge page config
295 for i in self._nodes.items():
298 hpg = VppHugePageUtil(node)
299 hpg.hugepages_dryrun_apply()
302 def _apply_vpp_unix(node):
304 Apply the VPP Unix config
306 :param node: Node dictionary with cpuinfo.
311 if 'unix' not in node['vpp']:
314 unixv = node['vpp']['unix']
315 if 'interactive' in unixv:
316 interactive = unixv['interactive']
317 if interactive is True:
318 unix = ' interactive\n'
320 return unix.rstrip('\n')
323 def _apply_vpp_cpu(node):
325 Apply the VPP cpu config
327 :param node: Node dictionary with cpuinfo.
333 if 'vpp_main_core' in node['cpu']:
334 vpp_main_core = node['cpu']['vpp_main_core']
337 if vpp_main_core is not 0:
338 cpu += ' main-core {}\n'.format(vpp_main_core)
341 vpp_workers = node['cpu']['vpp_workers']
342 vpp_worker_len = len(vpp_workers)
343 if vpp_worker_len > 0:
345 for i, worker in enumerate(vpp_workers):
347 vpp_worker_str += ','
348 if worker[0] == worker[1]:
349 vpp_worker_str += "{}".format(worker[0])
351 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
353 cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
358 def _apply_vpp_devices(node):
360 Apply VPP PCI Device configuration to vpp startup.
362 :param node: Node dictionary with cpuinfo.
367 ports_per_numa = node['cpu']['ports_per_numa']
368 total_mbufs = node['cpu']['total_mbufs']
370 for item in ports_per_numa.items():
372 interfaces = value['interfaces']
374 # if 0 was specified for the number of vpp workers, use 1 queue
377 if 'rx_queues' in value:
378 num_rx_queues = value['rx_queues']
379 if 'tx_queues' in value:
380 num_tx_queues = value['tx_queues']
385 # Create the devices string
386 for interface in interfaces:
387 pci_address = interface['pci_address']
388 pci_address = pci_address.lstrip("'").rstrip("'")
390 devices += ' dev {} {{ \n'.format(pci_address)
392 devices += ' num-rx-queues {}\n'.format(num_rx_queues)
394 devices += ' num-rx-queues {}\n'.format(1)
396 devices += ' num-tx-queues {}\n'.format(num_tx_queues)
398 devices += ' num-rx-desc {}\n'.format(num_rx_desc)
400 devices += ' num-tx-desc {}\n'.format(num_tx_desc)
403 # If the total mbufs is not 0 or less than the default, set num-bufs
404 logging.debug("Total mbufs: {}".format(total_mbufs))
405 if total_mbufs is not 0 and total_mbufs > 16384:
406 devices += '\n num-mbufs {}'.format(total_mbufs)
411 def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end, total_vpp_workers,
412 reserve_vpp_main_core):
414 Calculate the VPP worker information
416 :param node: Node dictionary
417 :param vpp_workers: List of VPP workers
418 :param numa_node: Numa node
419 :param other_cpus_end: The end of the cpus allocated for cores
421 :param total_vpp_workers: The number of vpp workers needed
422 :param reserve_vpp_main_core: Is there a core needed for
426 :type other_cpus_end: int
427 :type total_vpp_workers: int
428 :type reserve_vpp_main_core: bool
429 :returns: Is a core still needed for the vpp main core
433 # Can we fit the workers in one of these slices
434 cpus = node['cpu']['cpus_per_node'][numa_node]
438 if start <= other_cpus_end:
439 start = other_cpus_end + 1
441 if reserve_vpp_main_core:
444 workers_end = start + total_vpp_workers - 1
446 if workers_end <= end:
447 if reserve_vpp_main_core:
448 node['cpu']['vpp_main_core'] = start - 1
449 reserve_vpp_main_core = False
450 if total_vpp_workers:
451 vpp_workers.append((start, workers_end))
454 # We still need to reserve the main core
455 if reserve_vpp_main_core:
456 node['cpu']['vpp_main_core'] = other_cpus_end + 1
458 return reserve_vpp_main_core
461 def _calc_desc_and_queues(total_numa_nodes,
462 total_ports_per_numa,
464 ports_per_numa_value):
466 Calculate the number of descriptors and queues
468 :param total_numa_nodes: The total number of numa nodes
469 :param total_ports_per_numa: The total number of ports for this
471 :param total_rx_queues: The total number of rx queues / port
472 :param ports_per_numa_value: The value from the ports_per_numa
474 :type total_numa_nodes: int
475 :type total_ports_per_numa: int
476 :type total_rx_queues: int
477 :type ports_per_numa_value: dict
478 :returns The total number of message buffers
482 # Get the number of rx queues
483 rx_queues = max(1, total_rx_queues)
484 tx_queues = rx_queues * total_numa_nodes + 1
486 # Get the descriptor entries
488 ports_per_numa_value['rx_queues'] = rx_queues
489 total_mbufs = (((rx_queues * desc_entries) +
490 (tx_queues * desc_entries)) *
491 total_ports_per_numa)
492 total_mbufs = total_mbufs
497 def _create_ports_per_numa(node, interfaces):
499 Create a dictionary or ports per numa node
500 :param node: Node dictionary
501 :param interfaces: All the interfaces to be used by vpp
503 :type interfaces: dict
504 :returns: The ports per numa dictionary
508 # Make a list of ports by numa node
510 for item in interfaces.items():
512 if i['numa_node'] not in ports_per_numa:
513 ports_per_numa[i['numa_node']] = {'interfaces': []}
514 ports_per_numa[i['numa_node']]['interfaces'].append(i)
516 ports_per_numa[i['numa_node']]['interfaces'].append(i)
517 node['cpu']['ports_per_numa'] = ports_per_numa
519 return ports_per_numa
521 def calculate_cpu_parameters(self):
523 Calculate the cpu configuration.
527 # Calculate the cpu parameters, needed for the
528 # vpp_startup and grub configuration
529 for i in self._nodes.items():
532 # get total number of nic ports
533 interfaces = node['interfaces']
535 # Make a list of ports by numa node
536 ports_per_numa = self._create_ports_per_numa(node, interfaces)
538 # Get the number of cpus to skip, we never use the first cpu
540 other_cpus_end = other_cpus_start + node['cpu']['total_other_cpus'] - 1
542 if other_cpus_end is not 0:
543 other_workers = (other_cpus_start, other_cpus_end)
544 node['cpu']['other_workers'] = other_workers
546 # Allocate the VPP main core and workers
548 reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
549 total_vpp_cpus = node['cpu']['total_vpp_cpus']
550 total_rx_queues = node['cpu']['total_rx_queues']
552 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
553 # then we shouldn't get workers
554 total_workers_node = total_vpp_cpus / len(ports_per_numa)
556 if reserve_vpp_main_core:
559 if total_main + total_workers_node is not 0:
560 for item in ports_per_numa.items():
564 # Get the number of descriptors and queues
565 mbufs = self._calc_desc_and_queues(len(ports_per_numa),
566 len(value['interfaces']), total_rx_queues, value)
569 # Get the VPP workers
570 reserve_vpp_main_core = self._calc_vpp_workers(node, vpp_workers, numa_node,
571 other_cpus_end, total_workers_node,
572 reserve_vpp_main_core)
575 total_mbufs = int(total_mbufs)
580 node['cpu']['vpp_workers'] = vpp_workers
581 node['cpu']['total_mbufs'] = total_mbufs
587 def _apply_vpp_tcp(node):
589 Apply the VPP Unix config
591 :param node: Node dictionary with cpuinfo.
595 active_open_sessions = node['tcp']['active_open_sessions']
596 aos = int(active_open_sessions)
598 passive_open_sessions = node['tcp']['passive_open_sessions']
599 pos = int(passive_open_sessions)
601 # Generate the api-segment gid vpp sheit in any case
603 tcp = "api-segment {\n"
604 tcp = tcp + " gid vpp\n"
606 return tcp.rstrip('\n')
608 tcp = "# TCP stack-related configuration parameters\n"
609 tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos)
610 tcp = tcp + "heapsize 4g\n\n"
611 tcp = tcp + "api-segment {\n"
612 tcp = tcp + " global-size 2000M\n"
613 tcp = tcp + " api-size 1G\n"
616 tcp = tcp + "session {\n"
617 tcp = tcp + " event-queue-length " + "{:d}".format(aos + pos) + "\n"
618 tcp = tcp + " preallocated-sessions " + "{:d}".format(aos + pos) + "\n"
619 tcp = tcp + " v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
620 tcp = tcp + " v4-session-table-memory 3g\n"
622 tcp = tcp + " v4-halfopen-table-buckets " + \
623 "{:d}".format((aos + pos) / 4) + "\n"
624 tcp = tcp + " v4-halfopen-table-memory 3g\n"
627 tcp = tcp + "tcp {\n"
628 tcp = tcp + " preallocated-connections " + "{:d}".format(aos + pos) + "\n"
630 tcp = tcp + " preallocated-half-open-connections " + "{:d}".format(aos) + "\n"
631 tcp = tcp + " local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
632 tcp = tcp + " local-endpoints-table-memory 3g\n"
635 return tcp.rstrip('\n')
637 def apply_vpp_startup(self):
639 Apply the vpp startup configration
643 # Apply the VPP startup configruation
644 for i in self._nodes.items():
647 # Get the startup file
648 rootdir = node['rootdir']
649 sfile = rootdir + node['vpp']['startup_config_file']
652 devices = self._apply_vpp_devices(node)
655 cpu = self._apply_vpp_cpu(node)
657 # Get the unix config
658 unix = self._apply_vpp_unix(node)
660 # Get the TCP configuration, if any
661 tcp = self._apply_vpp_tcp(node)
663 # Make a backup if needed
664 self._autoconfig_backup_file(sfile)
667 tfile = sfile + '.template'
668 (ret, stdout, stderr) = \
669 VPPUtil.exec_command('cat {}'.format(tfile))
671 raise RuntimeError('Executing cat command failed to node {}'.
672 format(node['host']))
673 startup = stdout.format(unix=unix,
678 (ret, stdout, stderr) = \
679 VPPUtil.exec_command('rm {}'.format(sfile))
681 logging.debug(stderr)
683 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
684 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
686 raise RuntimeError('Writing config failed node {}'.
687 format(node['host']))
689 def apply_grub_cmdline(self):
691 Apply the grub cmdline
695 for i in self._nodes.items():
698 # Get the isolated CPUs
699 other_workers = node['cpu']['other_workers']
700 vpp_workers = node['cpu']['vpp_workers']
701 if 'vpp_main_core' in node['cpu']:
702 vpp_main_core = node['cpu']['vpp_main_core']
706 if other_workers is not None:
707 all_workers = [other_workers]
708 if vpp_main_core is not 0:
709 all_workers += [(vpp_main_core, vpp_main_core)]
710 all_workers += vpp_workers
712 for idx, worker in enumerate(all_workers):
717 if worker[0] == worker[1]:
718 isolated_cpus += "{}".format(worker[0])
720 isolated_cpus += "{}-{}".format(worker[0], worker[1])
722 vppgrb = VppGrubUtil(node)
723 current_cmdline = vppgrb.get_current_cmdline()
724 if 'grub' not in node:
726 node['grub']['current_cmdline'] = current_cmdline
727 node['grub']['default_cmdline'] = \
728 vppgrb.apply_cmdline(node, isolated_cpus)
732 def get_hugepages(self):
734 Get the hugepage configuration
738 for i in self._nodes.items():
741 hpg = VppHugePageUtil(node)
742 max_map_count, shmmax = hpg.get_huge_page_config()
743 node['hugepages']['max_map_count'] = max_map_count
744 node['hugepages']['shmax'] = shmmax
745 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
746 node['hugepages']['actual_total'] = total
747 node['hugepages']['free'] = free
748 node['hugepages']['size'] = size
749 node['hugepages']['memtotal'] = memtotal
750 node['hugepages']['memfree'] = memfree
756 Get the grub configuration
760 for i in self._nodes.items():
763 vppgrb = VppGrubUtil(node)
764 current_cmdline = vppgrb.get_current_cmdline()
765 default_cmdline = vppgrb.get_default_cmdline()
767 # Get the total number of isolated CPUs
769 iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
770 iso_cpurl = len(iso_cpur)
772 iso_cpu_str = iso_cpur[0]
773 iso_cpu_str = iso_cpu_str.split('=')[1]
774 iso_cpul = iso_cpu_str.split(',')
775 for iso_cpu in iso_cpul:
776 isocpuspl = iso_cpu.split('-')
777 if len(isocpuspl) is 1:
778 current_iso_cpus += 1
780 first = int(isocpuspl[0])
781 second = int(isocpuspl[1])
783 current_iso_cpus += 1
785 current_iso_cpus += second - first
787 if 'grub' not in node:
789 node['grub']['current_cmdline'] = current_cmdline
790 node['grub']['default_cmdline'] = default_cmdline
791 node['grub']['current_iso_cpus'] = current_iso_cpus
796 def _get_device(node):
798 Get the device configuration for a single node
800 :param node: Node dictionary with cpuinfo.
805 vpp = VppPCIUtil(node)
806 vpp.get_all_devices()
808 # Save the device information
810 node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
811 node['devices']['kernel_devices'] = vpp.get_kernel_devices()
812 node['devices']['other_devices'] = vpp.get_other_devices()
813 node['devices']['linkup_devices'] = vpp.get_link_up_devices()
815 def get_devices_per_node(self):
817 Get the device configuration for all the nodes
821 for i in self._nodes.items():
823 # Update the interface data
825 self._get_device(node)
830 def get_cpu_layout(node):
834 using lscpu -p get the cpu layout.
835 Returns a list with each item representing a single cpu.
837 :param node: Node dictionary.
839 :returns: The cpu layout
844 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
846 raise RuntimeError('{} failed on node {} {}'.
847 format(cmd, node['host'], stderr))
850 lines = stdout.split('\n')
852 if line == '' or line[0] == '#':
854 linesplit = line.split(',')
855 layout = {'cpu': linesplit[0], 'core': linesplit[1],
856 'socket': linesplit[2], 'node': linesplit[3]}
858 # cpu, core, socket, node
865 Get the cpu configuration
870 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
872 for i in self._nodes.items():
876 layout = self.get_cpu_layout(node)
877 node['cpu']['layout'] = layout
879 cpuinfo = node['cpuinfo']
880 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
881 node['cpu']['smt_enabled'] = smt_enabled
883 # We don't want to write the cpuinfo
891 Get the current system configuration.
895 # Get the Huge Page configuration
898 # Get the device configuration
899 self.get_devices_per_node()
901 # Get the CPU configuration
904 # Get the current grub cmdline
907 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
909 Ask the user questions related to the cpu configuration.
911 :param node: Node dictionary
912 :param total_cpus: The total number of cpus in the system
913 :param numa_nodes: The list of numa nodes in the system
915 :type total_cpus: int
916 :type numa_nodes: list
919 print "\nYour system has {} core(s) and {} Numa Nodes.". \
920 format(total_cpus, len(numa_nodes))
921 print "To begin, we suggest not reserving any cores for VPP or other processes."
922 print "Then to improve performance start reserving cores and adding queues as needed. "
927 question = "\nHow many core(s) shall we reserve for VPP [0-{}][0]? ".format(max_vpp_cpus)
928 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
929 node['cpu']['total_vpp_cpus'] = total_vpp_cpus
931 max_other_cores = (total_cpus - total_vpp_cpus) / 2
932 question = 'How many core(s) do you want to reserve for processes other than VPP? [0-{}][0]? '. \
933 format(str(max_other_cores))
934 total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
935 node['cpu']['total_other_cpus'] = total_other_cpus
937 max_main_cpus = max_vpp_cpus + 1 - total_vpp_cpus
938 reserve_vpp_main_core = False
939 if max_main_cpus > 0:
940 question = "Should we reserve 1 core for the VPP Main thread? "
941 question += "[y/N]? "
942 answer = self._ask_user_yn(question, 'n')
944 reserve_vpp_main_core = True
945 node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
946 node['cpu']['vpp_main_core'] = 0
948 question = "How many RX queues per port shall we use for VPP [1-4][1]? ". \
950 total_rx_queues = self._ask_user_range(question, 1, 4, 1)
951 node['cpu']['total_rx_queues'] = total_rx_queues
953 def modify_cpu(self, ask_questions=True):
955 Modify the cpu configuration, asking for the user for the values.
957 :param ask_questions: When true ask the user for config parameters
962 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
964 for i in self._nodes.items():
967 total_cpus_per_slice = 0
971 cpu_layout = self.get_cpu_layout(node)
973 # Assume the number of cpus per slice is always the same as the
976 for cpu in cpu_layout:
977 if cpu['node'] != first_node:
979 total_cpus_per_slice += 1
981 # Get the total number of cpus, cores, and numa nodes from the
983 for cpul in cpu_layout:
984 numa_node = cpul['node']
989 if numa_node not in cpus_per_node:
990 cpus_per_node[numa_node] = []
991 cpuperslice = int(cpu) % total_cpus_per_slice
993 cpus_per_node[numa_node].append((int(cpu), int(cpu) +
994 total_cpus_per_slice - 1))
995 if numa_node not in numa_nodes:
996 numa_nodes.append(numa_node)
997 if core not in cores:
999 node['cpu']['cpus_per_node'] = cpus_per_node
1001 # Ask the user some questions
1002 if ask_questions and total_cpus >= 8:
1003 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1005 # Populate the interfaces with the numa node
1006 if 'interfaces' in node:
1007 ikeys = node['interfaces'].keys()
1008 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1010 # We don't want to write the cpuinfo
1011 node['cpuinfo'] = ""
1014 self._update_auto_config()
1017 def _modify_other_devices(self, node,
1018 other_devices, kernel_devices, dpdk_devices):
1020 Modify the devices configuration, asking for the user for the values.
1024 odevices_len = len(other_devices)
1025 if odevices_len > 0:
1026 print "\nThese device(s) are currently NOT being used",
1027 print "by VPP or the OS.\n"
1028 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1029 question = "\nWould you like to give any of these devices"
1030 question += " back to the OS [Y/n]? "
1031 answer = self._ask_user_yn(question, 'Y')
1034 for dit in other_devices.items():
1037 question = "Would you like to use device {} for". \
1039 question += " the OS [y/N]? "
1040 answer = self._ask_user_yn(question, 'n')
1042 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1043 driver = device['unused'][0]
1044 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1046 logging.debug('Could not bind device {}'.format(dvid))
1048 for dit in vppd.items():
1051 kernel_devices[dvid] = device
1052 del other_devices[dvid]
1054 odevices_len = len(other_devices)
1055 if odevices_len > 0:
1056 print "\nThese device(s) are still NOT being used ",
1057 print "by VPP or the OS.\n"
1058 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1059 question = "\nWould you like use any of these for VPP [y/N]? "
1060 answer = self._ask_user_yn(question, 'N')
1063 for dit in other_devices.items():
1066 question = "Would you like to use device {} ".format(dvid)
1067 question += "for VPP [y/N]? "
1068 answer = self._ask_user_yn(question, 'n')
1071 for dit in vppd.items():
1074 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1075 driver = device['unused'][0]
1076 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1077 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1079 logging.debug('Could not bind device {}'.format(dvid))
1080 dpdk_devices[dvid] = device
1081 del other_devices[dvid]
1083 def update_interfaces_config(self):
1085 Modify the interfaces directly from the config file.
1089 for i in self._nodes.items():
1091 devices = node['devices']
1092 all_devices = devices['other_devices']
1093 all_devices.update(devices['dpdk_devices'])
1094 all_devices.update(devices['kernel_devices'])
1098 if 'interfaces' in node:
1099 current_ifcs = node['interfaces']
1101 for ifc in current_ifcs.values():
1102 dvid = ifc['pci_address']
1103 if dvid in all_devices:
1104 VppPCIUtil.vpp_create_interface(interfaces, dvid,
1106 node['interfaces'] = interfaces
1110 def modify_devices(self):
1112 Modify the devices configuration, asking for the user for the values.
1116 for i in self._nodes.items():
1118 devices = node['devices']
1119 other_devices = devices['other_devices']
1120 kernel_devices = devices['kernel_devices']
1121 dpdk_devices = devices['dpdk_devices']
1124 self._modify_other_devices(node, other_devices,
1125 kernel_devices, dpdk_devices)
1127 # Get the devices again for this node
1128 self._get_device(node)
1129 devices = node['devices']
1130 kernel_devices = devices['kernel_devices']
1131 dpdk_devices = devices['dpdk_devices']
1133 klen = len(kernel_devices)
1135 print "\nThese devices have kernel interfaces, but",
1136 print "appear to be safe to use with VPP.\n"
1137 VppPCIUtil.show_vpp_devices(kernel_devices)
1138 question = "\nWould you like to use any of these "
1139 question += "device(s) for VPP [y/N]? "
1140 answer = self._ask_user_yn(question, 'n')
1143 for dit in kernel_devices.items():
1146 question = "Would you like to use device {} ". \
1148 question += "for VPP [y/N]? "
1149 answer = self._ask_user_yn(question, 'n')
1152 for dit in vppd.items():
1155 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1156 driver = device['unused'][0]
1157 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1158 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1160 logging.debug('Could not bind device {}'.format(dvid))
1161 dpdk_devices[dvid] = device
1162 del kernel_devices[dvid]
1164 dlen = len(dpdk_devices)
1166 print "\nThese device(s) will be used by VPP.\n"
1167 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1168 question = "\nWould you like to remove any of "
1169 question += "these device(s) [y/N]? "
1170 answer = self._ask_user_yn(question, 'n')
1173 for dit in dpdk_devices.items():
1176 question = "Would you like to remove {} [y/N]? ". \
1178 answer = self._ask_user_yn(question, 'n')
1181 for dit in vppd.items():
1184 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1185 driver = device['unused'][0]
1186 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1187 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1189 logging.debug('Could not bind device {}'.format(dvid))
1190 kernel_devices[dvid] = device
1191 del dpdk_devices[dvid]
1194 for dit in dpdk_devices.items():
1197 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1198 node['interfaces'] = interfaces
1200 print "\nThese device(s) will be used by VPP, please",
1201 print "rerun this option if this is incorrect.\n"
1202 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1204 self._update_auto_config()
1207 def modify_huge_pages(self):
1209 Modify the huge page configuration, asking for the user for the values.
1213 for i in self._nodes.items():
1216 total = node['hugepages']['actual_total']
1217 free = node['hugepages']['free']
1218 size = node['hugepages']['size']
1219 memfree = node['hugepages']['memfree'].split(' ')[0]
1220 hugesize = int(size.split(' ')[0])
1221 # The max number of huge pages should be no more than
1222 # 70% of total free memory
1223 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize
1224 print "\nThere currently {} {} huge pages free.". \
1226 question = "Do you want to reconfigure the number of "
1227 question += "huge pages [y/N]? "
1228 answer = self._ask_user_yn(question, 'n')
1230 node['hugepages']['total'] = total
1233 print "\nThere currently a total of {} huge pages.". \
1235 question = "How many huge pages do you want [{} - {}][{}]? ". \
1236 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1237 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1238 node['hugepages']['total'] = str(answer)
1240 # Update auto-config.yaml
1241 self._update_auto_config()
1243 # Rediscover just the hugepages
1244 self.get_hugepages()
1246 def get_tcp_params(self):
1248 Get the tcp configuration
1251 # maybe nothing to do here?
1254 def acquire_tcp_params(self):
1256 Ask the user for TCP stack configuration parameters
1260 for i in self._nodes.items():
1263 question = "\nHow many active-open / tcp client sessions are expected "
1264 question = question + "[0-10000000][0]? "
1265 answer = self._ask_user_range(question, 0, 10000000, 0)
1266 # Less than 10K is equivalent to 0
1267 if int(answer) < 10000:
1269 node['tcp']['active_open_sessions'] = answer
1271 question = "How many passive-open / tcp server sessions are expected "
1272 question = question + "[0-10000000][0]? "
1273 answer = self._ask_user_range(question, 0, 10000000, 0)
1274 # Less than 10K is equivalent to 0
1275 if int(answer) < 10000:
1277 node['tcp']['passive_open_sessions'] = answer
1279 # Update auto-config.yaml
1280 self._update_auto_config()
1282 # Rediscover tcp parameters
1283 self.get_tcp_params()
1286 def patch_qemu(node):
1288 Patch qemu with the correct patches.
1290 :param node: Node dictionary
1294 print '\nWe are patching the node "{}":\n'.format(node['host'])
1295 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1300 print the CPU information
1304 cpu = CpuUtils.get_cpu_info_per_node(node)
1308 print "{:>20}: {}".format(item, cpu[item])
1311 print "{:>20}: {}".format(item, cpu[item])
1312 item = 'Thread(s) per core'
1314 print "{:>20}: {}".format(item, cpu[item])
1315 item = 'Core(s) per socket'
1317 print "{:>20}: {}".format(item, cpu[item])
1320 print "{:>20}: {}".format(item, cpu[item])
1321 item = 'NUMA node(s)'
1324 numa_nodes = int(cpu[item])
1325 for i in xrange(0, numa_nodes):
1326 item = "NUMA node{} CPU(s)".format(i)
1327 print "{:>20}: {}".format(item, cpu[item])
1328 item = 'CPU max MHz'
1330 print "{:>20}: {}".format(item, cpu[item])
1331 item = 'CPU min MHz'
1333 print "{:>20}: {}".format(item, cpu[item])
1335 if node['cpu']['smt_enabled']:
1339 print "{:>20}: {}".format('SMT', smt)
1342 print "\nVPP Threads: (Name: Cpu Number)"
1343 vpp_processes = cpu['vpp_processes']
1344 for i in vpp_processes.items():
1345 print " {:10}: {:4}".format(i[0], i[1])
1348 def device_info(node):
1350 Show the device information.
1354 if 'cpu' in node and 'total_mbufs' in node['cpu']:
1355 total_mbufs = node['cpu']['total_mbufs']
1356 if total_mbufs is not 0:
1357 print "Total Number of Buffers: {}".format(total_mbufs)
1359 vpp = VppPCIUtil(node)
1360 vpp.get_all_devices()
1361 linkup_devs = vpp.get_link_up_devices()
1362 if len(linkup_devs):
1363 print ("\nDevices with link up (can not be used with VPP):")
1364 vpp.show_vpp_devices(linkup_devs, show_header=False)
1365 # for dev in linkup_devs:
1367 kernel_devs = vpp.get_kernel_devices()
1368 if len(kernel_devs):
1369 print ("\nDevices bound to kernel drivers:")
1370 vpp.show_vpp_devices(kernel_devs, show_header=False)
1372 print ("\nNo devices bound to kernel drivers")
1374 dpdk_devs = vpp.get_dpdk_devices()
1376 print ("\nDevices bound to DPDK drivers:")
1377 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1380 print ("\nNo devices bound to DPDK drivers")
1383 interfaces = vpputl.get_hardware(node)
1384 if interfaces == {}:
1387 print ("\nDevices in use by VPP:")
1389 if len(interfaces.items()) < 2:
1393 print "{:30} {:6} {:4} {:7} {:4} {:7}". \
1394 format('Name', 'Socket', 'RXQs',
1395 'RXDescs', 'TXQs', 'TXDescs')
1396 for intf in sorted(interfaces.items()):
1399 if name == 'local0':
1401 socket = rx_qs = rx_ds = tx_qs = tx_ds = ''
1402 if 'cpu socket' in value:
1403 socket = int(value['cpu socket'])
1404 if 'rx queues' in value:
1405 rx_qs = int(value['rx queues'])
1406 if 'rx descs' in value:
1407 rx_ds = int(value['rx descs'])
1408 if 'tx queues' in value:
1409 tx_qs = int(value['tx queues'])
1410 if 'tx descs' in value:
1411 tx_ds = int(value['tx descs'])
1413 print ("{:30} {:>6} {:>4} {:>7} {:>4} {:>7}".
1414 format(name, socket, rx_qs, rx_ds, tx_qs, tx_ds))
1417 def hugepage_info(node):
1419 Show the huge page information.
1423 hpg = VppHugePageUtil(node)
1424 hpg.show_huge_pages()
1427 def min_system_resources(node):
1429 Check the system for basic minimum resources, return true if
1439 if 'layout' in node['cpu']:
1440 total_cpus = len(node['cpu']['layout'])
1442 print "\nThere is only {} CPU(s) available on this system.".format(total_cpus)
1443 print "This is not enough to run VPP."
1447 if 'free' in node['hugepages'] and \
1448 'memfree' in node['hugepages'] and \
1449 'size' in node['hugepages']:
1450 free = node['hugepages']['free']
1451 memfree = float(node['hugepages']['memfree'].split(' ')[0])
1452 hugesize = float(node['hugepages']['size'].split(' ')[0])
1454 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1455 percentmemhugepages = (memhugepages / memfree) * 100
1456 if free is '0' and \
1457 percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1458 print "\nThe System has only {} of free memory.".format(int(memfree))
1459 print "You will not be able to allocate enough Huge Pages for VPP."
1466 Print the system information
1470 for i in self._nodes.items():
1471 print "\n=============================="
1475 print "NODE: {}\n".format(name)
1482 print "\nGrub Command Line:"
1485 " Current: {}".format(
1486 node['grub']['current_cmdline'])
1488 " Configured: {}".format(
1489 node['grub']['default_cmdline'])
1492 print "\nHuge Pages:"
1493 self.hugepage_info(node)
1497 self.device_info(node)
1500 print "\nVPP Service Status:"
1501 state, errors = VPPUtil.status(node)
1502 print " {}".format(state)
1504 print " {}".format(e)
1506 # Minimum system resources
1507 self.min_system_resources(node)
1509 print "\n=============================="
1511 def _ipv4_interface_setup_questions(self, node):
1513 Ask the user some questions and get a list of interfaces
1514 and IPv4 addresses associated with those interfaces
1516 :param node: Node dictionary.
1518 :returns: A list or interfaces with ip addresses
1523 interfaces = vpputl.get_hardware(node)
1524 if interfaces == {}:
1527 interfaces_with_ip = []
1528 for intf in sorted(interfaces.items()):
1530 if name == 'local0':
1533 question = "Would you like add address to interface {} [Y/n]? ".format(name)
1534 answer = self._ask_user_yn(question, 'y')
1537 addr = self._ask_user_ipv4()
1538 address['name'] = name
1539 address['addr'] = addr
1540 interfaces_with_ip.append(address)
1542 return interfaces_with_ip
1544 def ipv4_interface_setup(self):
1546 After asking the user some questions, get a list of interfaces
1547 and IPv4 addresses associated with those interfaces
1551 for i in self._nodes.items():
1554 # Show the current interfaces with IP addresses
1555 current_ints = VPPUtil.get_int_ip(node)
1556 if current_ints is not {}:
1557 print ("\nThese are the current interfaces with IP addresses:")
1558 for items in sorted(current_ints.items()):
1561 if 'address' not in value:
1564 address = value['address']
1565 print ("{:30} {:20} {:10}".format(name, address, value['state']))
1566 question = "\nWould you like to keep this configuration [Y/n]? "
1567 answer = self._ask_user_yn(question, 'y')
1571 print ("\nThere are currently no interfaces with IP addresses.")
1573 # Create a script that add the ip addresses to the interfaces
1574 # and brings the interfaces up
1575 ints_with_addrs = self._ipv4_interface_setup_questions(node)
1577 for ints in ints_with_addrs:
1580 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1581 setintupstr = 'set int state {} up\n'.format(name)
1582 content += setipstr + setintupstr
1584 # Write the content to the script
1585 rootdir = node['rootdir']
1586 filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1587 with open(filename, 'w+') as sfile:
1588 sfile.write(content)
1590 # Execute the script
1591 cmd = 'vppctl exec {}'.format(filename)
1592 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1594 logging.debug(stderr)
1596 print("\nA script as been created at {}".format(filename))
1597 print("This script can be run using the following:")
1598 print("vppctl exec {}\n".format(filename))
1600 def _create_vints_questions(self, node):
1602 Ask the user some questions and get a list of interfaces
1603 and IPv4 addresses associated with those interfaces
1605 :param node: Node dictionary.
1607 :returns: A list or interfaces with ip addresses
1612 interfaces = vpputl.get_hardware(node)
1613 if interfaces == {}:
1616 # First delete all the Virtual interfaces
1617 for intf in sorted(interfaces.items()):
1619 if name[:7] == 'Virtual':
1620 cmd = 'vppctl delete vhost-user {}'.format(name)
1621 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1623 logging.debug('{} failed on node {} {}'.format(
1624 cmd, node['host'], stderr))
1626 # Create a virtual interface, for each interface the user wants to use
1627 interfaces = vpputl.get_hardware(node)
1628 if interfaces == {}:
1630 interfaces_with_virtual_interfaces = []
1632 for intf in sorted(interfaces.items()):
1634 if name == 'local0':
1637 question = "Would you like connect this interface {} to the VM [Y/n]? ".format(name)
1638 answer = self._ask_user_yn(question, 'y')
1640 sockfilename = '/var/run/vpp/sock{}.sock'.format(inum)
1641 if os.path.exists(sockfilename):
1642 os.remove(sockfilename)
1643 cmd = 'vppctl create vhost-user socket {} server'.format(sockfilename)
1644 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1646 raise RuntimeError("Create vhost failed on node {} {}."
1647 .format(node['host'], stderr))
1648 vintname = stdout.rstrip('\r\n')
1650 interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1651 'bridge': '{}'.format(inum)}
1653 interfaces_with_virtual_interfaces.append(interface)
1655 return interfaces_with_virtual_interfaces
1657 def create_and_bridge_virtual_interfaces(self):
1659 After asking the user some questions, create a VM and connect the interfaces
1664 for i in self._nodes.items():
1667 # Show the current bridge and interface configuration
1668 print "\nThis the current bridge configuration:"
1669 VPPUtil.show_bridge(node)
1670 question = "\nWould you like to keep this configuration [Y/n]? "
1671 answer = self._ask_user_yn(question, 'y')
1675 # Create a script that builds a bridge configuration with physical interfaces
1676 # and virtual interfaces
1677 ints_with_vints = self._create_vints_questions(node)
1679 for intf in ints_with_vints:
1680 vhoststr = 'comment { The following command creates the socket }\n'
1681 vhoststr += 'comment { and returns a virtual interface }\n'
1682 vhoststr += 'comment {{ create vhost-user socket /var/run/vpp/sock{}.sock server }}\n'. \
1683 format(intf['bridge'])
1685 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1687 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1688 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1690 # set interface state VirtualEthernet/0/0/0 up
1691 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1693 # set interface state VirtualEthernet/0/0/0 down
1694 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1696 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1698 # Write the content to the script
1699 rootdir = node['rootdir']
1700 filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1701 with open(filename, 'w+') as sfile:
1702 sfile.write(content)
1704 # Execute the script
1705 cmd = 'vppctl exec {}'.format(filename)
1706 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1708 logging.debug(stderr)
1710 print("\nA script as been created at {}".format(filename))
1711 print("This script can be run using the following:")
1712 print("vppctl exec {}\n".format(filename))