1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
20 from netaddr import IPAddress
22 from vpplib.VPPUtil import VPPUtil
23 from vpplib.VppPCIUtil import VppPCIUtil
24 from vpplib.VppHugePageUtil import VppHugePageUtil
25 from vpplib.CpuUtils import CpuUtils
26 from vpplib.VppGrubUtil import VppGrubUtil
27 from vpplib.QemuUtils import QemuUtils
29 __all__ = ["AutoConfig"]
33 MIN_TOTAL_HUGE_PAGES = 1024
34 MAX_PERCENT_FOR_HUGE_PAGES = 70
37 class AutoConfig(object):
38 """Auto Configuration Tools"""
40 def __init__(self, rootdir, filename):
42 The Auto Configure class.
44 :param rootdir: The root directory for all the auto configuration files
45 :param filename: The autoconfiguration file
49 self._autoconfig_filename = rootdir + filename
50 self._rootdir = rootdir
53 self._vpp_devices_node = {}
54 self._hugepage_config = ""
59 Returns the nodes dictionary.
68 def _autoconfig_backup_file(filename):
72 :param filename: The file to backup
76 # Does a copy of the file exist, if not create one
77 ofile = filename + '.orig'
78 (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
81 if stdout.strip('\n') != ofile:
82 cmd = 'sudo cp {} {}'.format(filename, ofile)
83 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
90 Asks the user for a number within a range.
91 default is returned if return is entered.
93 :returns: IP address with cidr
98 answer = raw_input("Please enter the IPv4 Address [n.n.n.n/n]: ")
100 ipinput = answer.split('/')
101 ipaddr = IPAddress(ipinput[0])
103 plen = answer.split('/')[1]
105 answer = raw_input("Please enter the netmask [n.n.n.n]: ")
106 plen = IPAddress(answer).netmask_bits()
107 return '{}/{}'.format(ipaddr, plen)
109 print "Please enter a valid IPv4 address."
113 def _ask_user_range(question, first, last, default):
115 Asks the user for a number within a range.
116 default is returned if return is entered.
118 :param question: Text of a question.
119 :param first: First number in the range
120 :param last: Last number in the range
121 :param default: The value returned when return is entered
122 :type question: string
126 :returns: The answer to the question
131 answer = raw_input(question)
135 if re.findall(r'[0-9+]', answer):
136 if int(answer) in range(first, last + 1):
139 print "Please a value between {} and {} or Return.". \
142 print "Please a number between {} and {} or Return.". \
148 def _ask_user_yn(question, default):
150 Asks the user for a yes or no question.
152 :param question: Text of a question.
153 :param default: The value returned when return is entered
154 :type question: string
155 :type default: string
156 :returns: The answer to the question
161 default = default.lower()
163 while not input_valid:
164 answer = raw_input(question)
167 if re.findall(r'[YyNn]', answer):
169 answer = answer[0].lower()
171 print "Please answer Y, N or Return."
175 def _loadconfig(self):
177 Load the testbed configuration, given the auto configuration file.
181 # Get the Topology, from the topology layout file
183 with open(self._autoconfig_filename, 'r') as stream:
185 topo = yaml.load(stream)
186 if 'metadata' in topo:
187 self._metadata = topo['metadata']
188 except yaml.YAMLError as exc:
189 raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc))
191 systemfile = self._rootdir + self._metadata['system_config_file']
192 if os.path.isfile(systemfile):
193 with open(systemfile, 'r') as sysstream:
195 systopo = yaml.load(sysstream)
196 if 'nodes' in systopo:
197 self._nodes = systopo['nodes']
198 except yaml.YAMLError as sysexc:
199 raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc))
201 # Get the nodes from Auto Config
203 self._nodes = topo['nodes']
205 # Set the root directory in all the nodes
206 for i in self._nodes.items():
208 node['rootdir'] = self._rootdir
210 def updateconfig(self):
212 Update the testbed configuration, given the auto configuration file.
213 We will write the system configuration file with the current node
218 # Initialize the yaml data
219 ydata = {'metadata': self._metadata, 'nodes': self._nodes}
221 # Write the system config file
222 filename = self._rootdir + self._metadata['system_config_file']
223 with open(filename, 'w') as yamlfile:
224 yaml.dump(ydata, yamlfile, default_flow_style=False)
226 def _update_auto_config(self):
228 Write the auto configuration file with the new configuration data,
233 # Initialize the yaml data
235 with open(self._autoconfig_filename, 'r') as stream:
237 ydata = yaml.load(stream)
239 nodes = ydata['nodes']
240 except yaml.YAMLError as exc:
244 for i in nodes.items():
249 node['interfaces'] = {}
250 for item in self._nodes[key]['interfaces'].items():
254 node['interfaces'][port] = {}
255 node['interfaces'][port]['pci_address'] = \
256 interface['pci_address']
257 if 'mac_address' in interface:
258 node['interfaces'][port]['mac_address'] = \
259 interface['mac_address']
261 if 'total_other_cpus' in self._nodes[key]['cpu']:
262 node['cpu']['total_other_cpus'] = \
263 self._nodes[key]['cpu']['total_other_cpus']
264 if 'total_vpp_cpus' in self._nodes[key]['cpu']:
265 node['cpu']['total_vpp_cpus'] = \
266 self._nodes[key]['cpu']['total_vpp_cpus']
267 if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
268 node['cpu']['reserve_vpp_main_core'] = \
269 self._nodes[key]['cpu']['reserve_vpp_main_core']
272 if 'active_open_sessions' in self._nodes[key]['tcp']:
273 node['tcp']['active_open_sessions'] = \
274 self._nodes[key]['tcp']['active_open_sessions']
275 if 'passive_open_sessions' in self._nodes[key]['tcp']:
276 node['tcp']['passive_open_sessions'] = \
277 self._nodes[key]['tcp']['passive_open_sessions']
280 node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
282 # Write the auto config config file
283 with open(self._autoconfig_filename, 'w') as yamlfile:
284 yaml.dump(ydata, yamlfile, default_flow_style=False)
286 def apply_huge_pages(self):
288 Apply the huge page config
292 for i in self._nodes.items():
295 hpg = VppHugePageUtil(node)
296 hpg.hugepages_dryrun_apply()
299 def _apply_vpp_unix(node):
301 Apply the VPP Unix config
303 :param node: Node dictionary with cpuinfo.
308 if 'unix' not in node['vpp']:
311 unixv = node['vpp']['unix']
312 if 'interactive' in unixv:
313 interactive = unixv['interactive']
314 if interactive is True:
315 unix = ' interactive\n'
317 return unix.rstrip('\n')
320 def _apply_vpp_cpu(node):
322 Apply the VPP cpu config
324 :param node: Node dictionary with cpuinfo.
330 vpp_main_core = node['cpu']['vpp_main_core']
331 if vpp_main_core is not 0:
332 cpu += ' main-core {}\n'.format(vpp_main_core)
335 vpp_workers = node['cpu']['vpp_workers']
336 vpp_worker_len = len(vpp_workers)
337 if vpp_worker_len > 0:
339 for i, worker in enumerate(vpp_workers):
341 vpp_worker_str += ','
342 if worker[0] == worker[1]:
343 vpp_worker_str += "{}".format(worker[0])
345 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
347 cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
352 def _apply_vpp_devices(node):
354 Apply VPP PCI Device configuration to vpp startup.
356 :param node: Node dictionary with cpuinfo.
361 ports_per_numa = node['cpu']['ports_per_numa']
362 total_mbufs = node['cpu']['total_mbufs']
364 for item in ports_per_numa.items():
366 interfaces = value['interfaces']
368 # if 0 was specified for the number of vpp workers, use 1 queue
371 if 'rx_queues' in value:
372 num_rx_queues = value['rx_queues']
373 if 'tx_queues' in value:
374 num_tx_queues = value['tx_queues']
379 # Create the devices string
380 for interface in interfaces:
381 pci_address = interface['pci_address']
382 pci_address = pci_address.lstrip("'").rstrip("'")
384 devices += ' dev {} {{ \n'.format(pci_address)
386 devices += ' num-rx-queues {}\n'.format(num_rx_queues)
388 devices += ' num-rx-queues {}\n'.format(1)
390 devices += ' num-tx-queues {}\n'.format(num_tx_queues)
392 devices += ' num-rx-desc {}\n'.format(num_rx_desc)
394 devices += ' num-tx-desc {}\n'.format(num_tx_desc)
397 if total_mbufs is not 0:
398 devices += '\n num-mbufs {}'.format(total_mbufs)
403 def _calc_vpp_workers(node, vpp_workers, numa_node,
404 other_cpus_end, total_vpp_workers,
405 reserve_vpp_main_core):
407 Calculate the VPP worker information
409 :param node: Node dictionary
410 :param vpp_workers: List of VPP workers
411 :param numa_node: Numa node
412 :param other_cpus_end: The end of the cpus allocated for cores
414 :param total_vpp_workers: The number of vpp workers needed
415 :param reserve_vpp_main_core: Is there a core needed for
419 :type other_cpus_end: int
420 :type total_vpp_workers: int
421 :type reserve_vpp_main_core: bool
422 :returns: Is a core still needed for the vpp main core
426 # Can we fit the workers in one of these slices
427 cpus = node['cpu']['cpus_per_node'][numa_node]
431 if start <= other_cpus_end:
432 start = other_cpus_end + 1
434 if reserve_vpp_main_core:
437 workers_end = start + total_vpp_workers - 1
438 if workers_end <= end:
439 if reserve_vpp_main_core:
440 node['cpu']['vpp_main_core'] = start - 1
441 reserve_vpp_main_core = False
442 if total_vpp_workers:
443 vpp_workers.append((start, workers_end))
446 # We still need to reserve the main core
447 if reserve_vpp_main_core:
448 node['cpu']['vpp_main_core'] = other_cpus_end + 1
450 return reserve_vpp_main_core
453 def _calc_desc_and_queues(total_numa_nodes,
454 total_ports_per_numa,
456 ports_per_numa_value):
458 Calculate the number of descriptors and queues
460 :param total_numa_nodes: The total number of numa nodes
461 :param total_ports_per_numa: The total number of ports for this
463 :param total_vpp_cpus: The total number of cpus to allocate for vpp
464 :param ports_per_numa_value: The value from the ports_per_numa
466 :type total_numa_nodes: int
467 :type total_ports_per_numa: int
468 :type total_vpp_cpus: int
469 :type ports_per_numa_value: dict
470 :returns The total number of message buffers
471 :returns: The total number of vpp workers
476 # Get the total vpp workers
477 total_vpp_workers = total_vpp_cpus
478 ports_per_numa_value['total_vpp_workers'] = total_vpp_workers
480 # Get the number of rx queues
481 rx_queues = max(1, total_vpp_workers)
482 tx_queues = total_vpp_workers * total_numa_nodes + 1
484 # Get the descriptor entries
486 ports_per_numa_value['rx_queues'] = rx_queues
487 total_mbufs = (((rx_queues * desc_entries) +
488 (tx_queues * desc_entries)) *
489 total_ports_per_numa)
490 total_mbufs = total_mbufs
492 return total_mbufs, total_vpp_workers
495 def _create_ports_per_numa(node, interfaces):
497 Create a dictionary or ports per numa node
498 :param node: Node dictionary
499 :param interfaces: All the interfaces to be used by vpp
501 :type interfaces: dict
502 :returns: The ports per numa dictionary
506 # Make a list of ports by numa node
508 for item in interfaces.items():
510 if i['numa_node'] not in ports_per_numa:
511 ports_per_numa[i['numa_node']] = {'interfaces': []}
512 ports_per_numa[i['numa_node']]['interfaces'].append(i)
514 ports_per_numa[i['numa_node']]['interfaces'].append(i)
515 node['cpu']['ports_per_numa'] = ports_per_numa
517 return ports_per_numa
519 def calculate_cpu_parameters(self):
521 Calculate the cpu configuration.
525 # Calculate the cpu parameters, needed for the
526 # vpp_startup and grub configuration
527 for i in self._nodes.items():
530 # get total number of nic ports
531 interfaces = node['interfaces']
533 # Make a list of ports by numa node
534 ports_per_numa = self._create_ports_per_numa(node, interfaces)
536 # Get the number of cpus to skip, we never use the first cpu
538 other_cpus_end = other_cpus_start + \
539 node['cpu']['total_other_cpus'] - 1
541 if other_cpus_end is not 0:
542 other_workers = (other_cpus_start, other_cpus_end)
543 node['cpu']['other_workers'] = other_workers
545 # Allocate the VPP main core and workers
547 reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
548 total_vpp_cpus = node['cpu']['total_vpp_cpus']
550 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
551 # then we shouldn't get workers
552 total_with_main = total_vpp_cpus
553 if reserve_vpp_main_core:
556 if total_with_main is not 0:
557 for item in ports_per_numa.items():
561 # Get the number of descriptors and queues
562 mbufs, total_vpp_workers = self._calc_desc_and_queues(
564 len(value['interfaces']), total_vpp_cpus, value)
567 # Get the VPP workers
568 reserve_vpp_main_core = self._calc_vpp_workers(
569 node, vpp_workers, numa_node, other_cpus_end,
570 total_vpp_workers, reserve_vpp_main_core)
573 total_mbufs = int(total_mbufs)
578 node['cpu']['vpp_workers'] = vpp_workers
579 node['cpu']['total_mbufs'] = total_mbufs
585 def _apply_vpp_tcp(node):
587 Apply the VPP Unix config
589 :param node: Node dictionary with cpuinfo.
593 active_open_sessions = node['tcp']['active_open_sessions']
594 aos = int(active_open_sessions)
596 passive_open_sessions = node['tcp']['passive_open_sessions']
597 pos = int(passive_open_sessions)
599 # Generate the api-segment gid vpp sheit in any case
601 tcp = "api-segment {\n"
602 tcp = tcp + " gid vpp\n"
604 return tcp.rstrip('\n')
606 tcp = "# TCP stack-related configuration parameters\n"
607 tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos)
608 tcp = tcp + "heapsize 4g\n\n"
609 tcp = tcp + "api-segment {\n"
610 tcp = tcp + " global-size 2000M\n"
611 tcp = tcp + " api-size 1G\n"
614 tcp = tcp + "session {\n"
615 tcp = tcp + " event-queue-length " + "{:d}".format(aos + pos) + "\n"
616 tcp = tcp + " preallocated-sessions " + "{:d}".format(aos + pos) + "\n"
617 tcp = tcp + " v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
618 tcp = tcp + " v4-session-table-memory 3g\n"
620 tcp = tcp + " v4-halfopen-table-buckets " + \
621 "{:d}".format((aos + pos) / 4) + "\n"
622 tcp = tcp + " v4-halfopen-table-memory 3g\n"
625 tcp = tcp + "tcp {\n"
626 tcp = tcp + " preallocated-connections " + "{:d}".format(aos + pos) + "\n"
628 tcp = tcp + " preallocated-half-open-connections " + "{:d}".format(aos) + "\n"
629 tcp = tcp + " local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
630 tcp = tcp + " local-endpoints-table-memory 3g\n"
633 return tcp.rstrip('\n')
635 def apply_vpp_startup(self):
637 Apply the vpp startup configration
641 # Apply the VPP startup configruation
642 for i in self._nodes.items():
645 # Get the startup file
646 rootdir = node['rootdir']
647 sfile = rootdir + node['vpp']['startup_config_file']
650 devices = self._apply_vpp_devices(node)
653 cpu = self._apply_vpp_cpu(node)
655 # Get the unix config
656 unix = self._apply_vpp_unix(node)
658 # Get the TCP configuration, if any
659 tcp = self._apply_vpp_tcp(node)
661 # Make a backup if needed
662 self._autoconfig_backup_file(sfile)
665 tfile = sfile + '.template'
666 (ret, stdout, stderr) = \
667 VPPUtil.exec_command('cat {}'.format(tfile))
669 raise RuntimeError('Executing cat command failed to node {}'.
670 format(node['host']))
671 startup = stdout.format(unix=unix,
676 (ret, stdout, stderr) = \
677 VPPUtil.exec_command('rm {}'.format(sfile))
679 logging.debug(stderr)
681 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
682 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
684 raise RuntimeError('Writing config failed node {}'.
685 format(node['host']))
687 def apply_grub_cmdline(self):
689 Apply the grub cmdline
693 for i in self._nodes.items():
696 # Get the isolated CPUs
697 other_workers = node['cpu']['other_workers']
698 vpp_workers = node['cpu']['vpp_workers']
699 vpp_main_core = node['cpu']['vpp_main_core']
701 if other_workers is not None:
702 all_workers = [other_workers]
703 if vpp_main_core is not 0:
704 all_workers += [(vpp_main_core, vpp_main_core)]
705 all_workers += vpp_workers
707 for idx, worker in enumerate(all_workers):
712 if worker[0] == worker[1]:
713 isolated_cpus += "{}".format(worker[0])
715 isolated_cpus += "{}-{}".format(worker[0], worker[1])
717 vppgrb = VppGrubUtil(node)
718 current_cmdline = vppgrb.get_current_cmdline()
719 if 'grub' not in node:
721 node['grub']['current_cmdline'] = current_cmdline
722 node['grub']['default_cmdline'] = \
723 vppgrb.apply_cmdline(node, isolated_cpus)
727 def get_hugepages(self):
729 Get the hugepage configuration
733 for i in self._nodes.items():
736 hpg = VppHugePageUtil(node)
737 max_map_count, shmmax = hpg.get_huge_page_config()
738 node['hugepages']['max_map_count'] = max_map_count
739 node['hugepages']['shmax'] = shmmax
740 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
741 node['hugepages']['actual_total'] = total
742 node['hugepages']['free'] = free
743 node['hugepages']['size'] = size
744 node['hugepages']['memtotal'] = memtotal
745 node['hugepages']['memfree'] = memfree
751 Get the grub configuration
755 for i in self._nodes.items():
758 vppgrb = VppGrubUtil(node)
759 current_cmdline = vppgrb.get_current_cmdline()
760 default_cmdline = vppgrb.get_default_cmdline()
762 # Get the total number of isolated CPUs
764 iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
765 iso_cpurl = len(iso_cpur)
767 iso_cpu_str = iso_cpur[0]
768 iso_cpu_str = iso_cpu_str.split('=')[1]
769 iso_cpul = iso_cpu_str.split(',')
770 for iso_cpu in iso_cpul:
771 isocpuspl = iso_cpu.split('-')
772 if len(isocpuspl) is 1:
773 current_iso_cpus += 1
775 first = int(isocpuspl[0])
776 second = int(isocpuspl[1])
778 current_iso_cpus += 1
780 current_iso_cpus += second - first
782 if 'grub' not in node:
784 node['grub']['current_cmdline'] = current_cmdline
785 node['grub']['default_cmdline'] = default_cmdline
786 node['grub']['current_iso_cpus'] = current_iso_cpus
791 def _get_device(node):
793 Get the device configuration for a single node
795 :param node: Node dictionary with cpuinfo.
800 vpp = VppPCIUtil(node)
801 vpp.get_all_devices()
803 # Save the device information
805 node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
806 node['devices']['kernel_devices'] = vpp.get_kernel_devices()
807 node['devices']['other_devices'] = vpp.get_other_devices()
808 node['devices']['linkup_devices'] = vpp.get_link_up_devices()
810 def get_devices_per_node(self):
812 Get the device configuration for all the nodes
816 for i in self._nodes.items():
818 # Update the interface data
820 self._get_device(node)
825 def get_cpu_layout(node):
829 using lscpu -p get the cpu layout.
830 Returns a list with each item representing a single cpu.
832 :param node: Node dictionary.
834 :returns: The cpu layout
839 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
841 raise RuntimeError('{} failed on node {} {}'.
842 format(cmd, node['host'], stderr))
845 lines = stdout.split('\n')
847 if line == '' or line[0] == '#':
849 linesplit = line.split(',')
850 layout = {'cpu': linesplit[0], 'core': linesplit[1],
851 'socket': linesplit[2], 'node': linesplit[3]}
853 # cpu, core, socket, node
860 Get the cpu configuration
865 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
867 for i in self._nodes.items():
871 layout = self.get_cpu_layout(node)
872 node['cpu']['layout'] = layout
874 cpuinfo = node['cpuinfo']
875 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
876 node['cpu']['smt_enabled'] = smt_enabled
878 # We don't want to write the cpuinfo
886 Get the current system configuration.
890 # Get the Huge Page configuration
893 # Get the device configuration
894 self.get_devices_per_node()
896 # Get the CPU configuration
899 # Get the current grub cmdline
902 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
904 Ask the user questions related to the cpu configuration.
906 :param node: Node dictionary
907 :param total_cpus: The total number of cpus in the system
908 :param numa_nodes: The list of numa nodes in the system
910 :type total_cpus: int
911 :type numa_nodes: list
914 print "\nYour system has {} core(s) and {} Numa Nodes.". \
915 format(total_cpus, len(numa_nodes))
916 print "To begin, we suggest not reserving any cores for VPP",
917 print "or other processes."
918 print "Then to improve performance try reserving cores as needed. "
920 max_other_cores = total_cpus / 2
921 question = '\nHow many core(s) do you want to reserve for processes \
922 other than VPP? [0-{}][0]? '.format(str(max_other_cores))
923 total_other_cpus = self._ask_user_range(question, 0, max_other_cores,
925 node['cpu']['total_other_cpus'] = total_other_cpus
930 question = "How many core(s) shall we reserve for VPP workers[0-{}][0]? ". \
932 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
933 node['cpu']['total_vpp_cpus'] = total_vpp_cpus
935 max_main_cpus = max_vpp_cpus - total_vpp_cpus
936 reserve_vpp_main_core = False
937 if max_main_cpus > 0:
938 question = "Should we reserve 1 core for the VPP Main thread? "
939 question += "[y/N]? "
940 answer = self._ask_user_yn(question, 'n')
942 reserve_vpp_main_core = True
943 node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
944 node['cpu']['vpp_main_core'] = 0
946 def modify_cpu(self):
948 Modify the cpu configuration, asking for the user for the values.
953 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
955 for i in self._nodes.items():
958 total_cpus_per_slice = 0
962 cpu_layout = self.get_cpu_layout(node)
964 # Assume the number of cpus per slice is always the same as the
967 for cpu in cpu_layout:
968 if cpu['node'] != first_node:
970 total_cpus_per_slice += 1
972 # Get the total number of cpus, cores, and numa nodes from the
974 for cpul in cpu_layout:
975 numa_node = cpul['node']
980 if numa_node not in cpus_per_node:
981 cpus_per_node[numa_node] = []
982 cpuperslice = int(cpu) % total_cpus_per_slice
984 cpus_per_node[numa_node].append((int(cpu), int(cpu) +
985 total_cpus_per_slice - 1))
986 if numa_node not in numa_nodes:
987 numa_nodes.append(numa_node)
988 if core not in cores:
990 node['cpu']['cpus_per_node'] = cpus_per_node
992 # Ask the user some questions
993 self._modify_cpu_questions(node, total_cpus, numa_nodes)
995 # Populate the interfaces with the numa node
996 ikeys = node['interfaces'].keys()
997 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
999 # We don't want to write the cpuinfo
1000 node['cpuinfo'] = ""
1003 self._update_auto_config()
1006 def _modify_other_devices(self, node,
1007 other_devices, kernel_devices, dpdk_devices):
1009 Modify the devices configuration, asking for the user for the values.
1013 odevices_len = len(other_devices)
1014 if odevices_len > 0:
1015 print "\nThese device(s) are currently NOT being used",
1016 print "by VPP or the OS.\n"
1017 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1018 question = "\nWould you like to give any of these devices"
1019 question += " back to the OS [Y/n]? "
1020 answer = self._ask_user_yn(question, 'Y')
1023 for dit in other_devices.items():
1026 question = "Would you like to use device {} for". \
1028 question += " the OS [y/N]? "
1029 answer = self._ask_user_yn(question, 'n')
1031 driver = device['unused'][0]
1032 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1034 for dit in vppd.items():
1037 kernel_devices[dvid] = device
1038 del other_devices[dvid]
1040 odevices_len = len(other_devices)
1041 if odevices_len > 0:
1042 print "\nThese device(s) are still NOT being used ",
1043 print "by VPP or the OS.\n"
1044 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1045 question = "\nWould you like use any of these for VPP [y/N]? "
1046 answer = self._ask_user_yn(question, 'N')
1049 for dit in other_devices.items():
1052 question = "Would you like to use device {} ".format(dvid)
1053 question += "for VPP [y/N]? "
1054 answer = self._ask_user_yn(question, 'n')
1057 for dit in vppd.items():
1060 dpdk_devices[dvid] = device
1061 del other_devices[dvid]
1063 def modify_devices(self):
1065 Modify the devices configuration, asking for the user for the values.
1069 for i in self._nodes.items():
1071 devices = node['devices']
1072 other_devices = devices['other_devices']
1073 kernel_devices = devices['kernel_devices']
1074 dpdk_devices = devices['dpdk_devices']
1077 self._modify_other_devices(node, other_devices,
1078 kernel_devices, dpdk_devices)
1080 # Get the devices again for this node
1081 self._get_device(node)
1082 devices = node['devices']
1083 kernel_devices = devices['kernel_devices']
1084 dpdk_devices = devices['dpdk_devices']
1086 klen = len(kernel_devices)
1088 print "\nThese devices have kernel interfaces, but",
1089 print "appear to be safe to use with VPP.\n"
1090 VppPCIUtil.show_vpp_devices(kernel_devices)
1091 question = "\nWould you like to use any of these "
1092 question += "device(s) for VPP [y/N]? "
1093 answer = self._ask_user_yn(question, 'n')
1096 for dit in kernel_devices.items():
1099 question = "Would you like to use device {} ". \
1101 question += "for VPP [y/N]? "
1102 answer = self._ask_user_yn(question, 'n')
1105 for dit in vppd.items():
1108 dpdk_devices[dvid] = device
1109 del kernel_devices[dvid]
1111 dlen = len(dpdk_devices)
1113 print "\nThese device(s) will be used by VPP.\n"
1114 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1115 question = "\nWould you like to remove any of "
1116 question += "these device(s) [y/N]? "
1117 answer = self._ask_user_yn(question, 'n')
1120 for dit in dpdk_devices.items():
1123 question = "Would you like to remove {} [y/N]? ". \
1125 answer = self._ask_user_yn(question, 'n')
1128 for dit in vppd.items():
1131 driver = device['unused'][0]
1132 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1133 kernel_devices[dvid] = device
1134 del dpdk_devices[dvid]
1137 for dit in dpdk_devices.items():
1140 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1141 node['interfaces'] = interfaces
1143 print "\nThese device(s) will be used by VPP, please",
1144 print "rerun this option if this is incorrect.\n"
1145 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1147 self._update_auto_config()
1150 def modify_huge_pages(self):
1152 Modify the huge page configuration, asking for the user for the values.
1156 for i in self._nodes.items():
1159 total = node['hugepages']['actual_total']
1160 free = node['hugepages']['free']
1161 size = node['hugepages']['size']
1162 memfree = node['hugepages']['memfree'].split(' ')[0]
1163 hugesize = int(size.split(' ')[0])
1164 # The max number of huge pages should be no more than
1165 # 70% of total free memory
1166 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize
1167 print "\nThere currently {} {} huge pages free.". \
1169 question = "Do you want to reconfigure the number of "
1170 question += "huge pages [y/N]? "
1171 answer = self._ask_user_yn(question, 'n')
1173 node['hugepages']['total'] = total
1176 print "\nThere currently a total of {} huge pages.". \
1179 "How many huge pages do you want [{} - {}][{}]? ". \
1180 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1181 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1182 node['hugepages']['total'] = str(answer)
1184 # Update auto-config.yaml
1185 self._update_auto_config()
1187 # Rediscover just the hugepages
1188 self.get_hugepages()
1190 def get_tcp_params(self):
1192 Get the tcp configuration
1195 # maybe nothing to do here?
1198 def acquire_tcp_params(self):
1200 Ask the user for TCP stack configuration parameters
1204 for i in self._nodes.items():
1207 question = "\nHow many active-open / tcp client sessions are expected "
1208 question = question + "[0-10000000][0]? "
1209 answer = self._ask_user_range(question, 0, 10000000, 0)
1210 # Less than 10K is equivalent to 0
1211 if int(answer) < 10000:
1213 node['tcp']['active_open_sessions'] = answer
1215 question = "How many passive-open / tcp server sessions are expected "
1216 question = question + "[0-10000000][0]? "
1217 answer = self._ask_user_range(question, 0, 10000000, 0)
1218 # Less than 10K is equivalent to 0
1219 if int(answer) < 10000:
1221 node['tcp']['passive_open_sessions'] = answer
1223 # Update auto-config.yaml
1224 self._update_auto_config()
1226 # Rediscover tcp parameters
1227 self.get_tcp_params()
1230 def patch_qemu(node):
1232 Patch qemu with the correct patches.
1234 :param node: Node dictionary
1238 print '\nWe are patching the node "{}":\n'.format(node['host'])
1239 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1244 print the CPU information
1248 cpu = CpuUtils.get_cpu_info_per_node(node)
1252 print "{:>20}: {}".format(item, cpu[item])
1255 print "{:>20}: {}".format(item, cpu[item])
1256 item = 'Thread(s) per core'
1258 print "{:>20}: {}".format(item, cpu[item])
1259 item = 'Core(s) per socket'
1261 print "{:>20}: {}".format(item, cpu[item])
1264 print "{:>20}: {}".format(item, cpu[item])
1265 item = 'NUMA node(s)'
1268 numa_nodes = int(cpu[item])
1269 for i in xrange(0, numa_nodes):
1270 item = "NUMA node{} CPU(s)".format(i)
1271 print "{:>20}: {}".format(item, cpu[item])
1272 item = 'CPU max MHz'
1274 print "{:>20}: {}".format(item, cpu[item])
1275 item = 'CPU min MHz'
1277 print "{:>20}: {}".format(item, cpu[item])
1279 if node['cpu']['smt_enabled']:
1283 print "{:>20}: {}".format('SMT', smt)
1286 print "\nVPP Threads: (Name: Cpu Number)"
1287 vpp_processes = cpu['vpp_processes']
1288 for i in vpp_processes.items():
1289 print " {:10}: {:4}".format(i[0], i[1])
1292 def device_info(node):
1294 Show the device information.
1298 if 'cpu' in node and 'total_mbufs' in node['cpu']:
1299 total_mbufs = node['cpu']['total_mbufs']
1300 if total_mbufs is not 0:
1301 print "Total Number of Buffers: {}".format(total_mbufs)
1303 vpp = VppPCIUtil(node)
1304 vpp.get_all_devices()
1305 linkup_devs = vpp.get_link_up_devices()
1306 if len(linkup_devs):
1307 print ("\nDevices with link up (can not be used with VPP):")
1308 vpp.show_vpp_devices(linkup_devs, show_header=False)
1309 # for dev in linkup_devs:
1311 kernel_devs = vpp.get_kernel_devices()
1312 if len(kernel_devs):
1313 print ("\nDevices bound to kernel drivers:")
1314 vpp.show_vpp_devices(kernel_devs, show_header=False)
1316 print ("\nNo devices bound to kernel drivers")
1318 dpdk_devs = vpp.get_dpdk_devices()
1320 print ("\nDevices bound to DPDK drivers:")
1321 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1324 print ("\nNo devices bound to DPDK drivers")
1327 interfaces = vpputl.get_hardware(node)
1328 if interfaces == {}:
1331 print ("\nDevices in use by VPP:")
1333 if len(interfaces.items()) < 2:
1337 print "{:30} {:6} {:4} {:7} {:4} {:7}". \
1338 format('Name', 'Socket', 'RXQs',
1339 'RXDescs', 'TXQs', 'TXDescs')
1340 for intf in sorted(interfaces.items()):
1343 if name == 'local0':
1345 socket = rx_qs = rx_ds = tx_qs = tx_ds = ''
1346 if 'cpu socket' in value:
1347 socket = int(value['cpu socket'])
1348 if 'rx queues' in value:
1349 rx_qs = int(value['rx queues'])
1350 if 'rx descs' in value:
1351 rx_ds = int(value['rx descs'])
1352 if 'tx queues' in value:
1353 tx_qs = int(value['tx queues'])
1354 if 'tx descs' in value:
1355 tx_ds = int(value['tx descs'])
1357 print ("{:30} {:>6} {:>4} {:>7} {:>4} {:>7}".
1358 format(name, socket, rx_qs, rx_ds, tx_qs, tx_ds))
1361 def hugepage_info(node):
1363 Show the huge page information.
1367 hpg = VppHugePageUtil(node)
1368 hpg.show_huge_pages()
1371 def min_system_resources(node):
1373 Check the system for basic minimum resources, return true if
1383 if 'layout' in node['cpu']:
1384 total_cpus = len(node['cpu']['layout'])
1386 print "\nThere is only {} CPU(s) available on this system.".format(total_cpus)
1387 print "This is not enough to run VPP."
1391 if 'free' in node['hugepages'] and \
1392 'memfree' in node['hugepages'] and \
1393 'size' in node['hugepages']:
1394 free = node['hugepages']['free']
1395 memfree = float(node['hugepages']['memfree'].split(' ')[0])
1396 hugesize = float(node['hugepages']['size'].split(' ')[0])
1398 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1399 percentmemhugepages = (memhugepages / memfree) * 100
1400 if free is '0' and \
1401 percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1402 print "\nThe System has only {} of free memory.".format(int(memfree))
1403 print "You will not be able to allocate enough Huge Pages for VPP."
1410 Print the system information
1414 for i in self._nodes.items():
1415 print "\n=============================="
1419 print "NODE: {}\n".format(name)
1426 print "\nGrub Command Line:"
1429 " Current: {}".format(
1430 node['grub']['current_cmdline'])
1432 " Configured: {}".format(
1433 node['grub']['default_cmdline'])
1436 print "\nHuge Pages:"
1437 self.hugepage_info(node)
1441 self.device_info(node)
1444 print "\nVPP Service Status:"
1445 state, errors = VPPUtil.status(node)
1446 print " {}".format(state)
1448 print " {}".format(e)
1450 # Minimum system resources
1451 self.min_system_resources(node)
1453 print "\n=============================="
1455 def _ipv4_interface_setup_questions(self, node):
1457 Ask the user some questions and get a list of interfaces
1458 and IPv4 addresses associated with those interfaces
1460 :param node: Node dictionary.
1462 :returns: A list or interfaces with ip addresses
1467 interfaces = vpputl.get_hardware(node)
1468 if interfaces == {}:
1471 interfaces_with_ip = []
1472 for intf in sorted(interfaces.items()):
1474 if name == 'local0':
1477 question = "Would you like add address to interface {} [Y/n]? ".format(name)
1478 answer = self._ask_user_yn(question, 'y')
1481 addr = self._ask_user_ipv4()
1482 address['name'] = name
1483 address['addr'] = addr
1484 interfaces_with_ip.append(address)
1486 return interfaces_with_ip
1488 def ipv4_interface_setup(self):
1490 After asking the user some questions, get a list of interfaces
1491 and IPv4 addresses associated with those interfaces
1495 for i in self._nodes.items():
1498 # Show the current interfaces with IP addresses
1499 current_ints = VPPUtil.get_int_ip(node)
1500 if current_ints is not {}:
1501 print ("\nThese are the current interfaces with IP addresses:")
1502 for items in sorted(current_ints.items()):
1505 if 'address' not in value:
1508 address = value['address']
1509 print ("{:30} {:20} {:10}".format(name, address, value['state']))
1510 question = "\nWould you like to keep this configuration [Y/n]? "
1511 answer = self._ask_user_yn(question, 'y')
1515 print ("\nThere are currently no interfaces with IP addresses.")
1517 # Create a script that add the ip addresses to the interfaces
1518 # and brings the interfaces up
1519 ints_with_addrs = self._ipv4_interface_setup_questions(node)
1521 for ints in ints_with_addrs:
1524 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1525 setintupstr = 'set int state {} up\n'.format(name)
1526 content += setipstr + setintupstr
1528 # Write the content to the script
1529 rootdir = node['rootdir']
1530 filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1531 with open(filename, 'w+') as sfile:
1532 sfile.write(content)
1534 # Execute the script
1535 cmd = 'vppctl exec {}'.format(filename)
1536 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1538 logging.debug(stderr)
1540 print("\nA script as been created at {}".format(filename))
1541 print("This script can be run using the following:")
1542 print("vppctl exec {}\n".format(filename))
1544 def _create_vints_questions(self, node):
1546 Ask the user some questions and get a list of interfaces
1547 and IPv4 addresses associated with those interfaces
1549 :param node: Node dictionary.
1551 :returns: A list or interfaces with ip addresses
1556 interfaces = vpputl.get_hardware(node)
1557 if interfaces == {}:
1560 # First delete all the Virtual interfaces
1561 for intf in sorted(interfaces.items()):
1563 if name[:7] == 'Virtual':
1564 cmd = 'vppctl delete vhost-user {}'.format(name)
1565 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1567 logging.debug('{} failed on node {} {}'.format(
1568 cmd, node['host'], stderr))
1570 # Create a virtual interface, for each interface the user wants to use
1571 interfaces = vpputl.get_hardware(node)
1572 if interfaces == {}:
1574 interfaces_with_virtual_interfaces = []
1576 for intf in sorted(interfaces.items()):
1578 if name == 'local0':
1581 question = "Would you like connect this interface {} to the VM [Y/n]? ".format(name)
1582 answer = self._ask_user_yn(question, 'y')
1584 sockfilename = '/tmp/sock{}.sock'.format(inum)
1585 if os.path.exists(sockfilename):
1586 os.remove(sockfilename)
1587 cmd = 'vppctl create vhost-user socket {} server'.format(sockfilename)
1588 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1590 raise RuntimeError("Create vhost failed on node {} {}."
1591 .format(node['host'], stderr))
1592 vintname = stdout.rstrip('\r\n')
1594 interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1595 'bridge': '{}'.format(inum)}
1597 interfaces_with_virtual_interfaces.append(interface)
1599 return interfaces_with_virtual_interfaces
1601 def create_and_bridge_virtual_interfaces(self):
1603 After asking the user some questions, create a VM and connect the interfaces
1608 for i in self._nodes.items():
1611 # Show the current bridge and interface configuration
1612 print "\nThis the current bridge configuration:"
1613 VPPUtil.show_bridge(node)
1614 question = "\nWould you like to keep this configuration [Y/n]? "
1615 answer = self._ask_user_yn(question, 'y')
1619 # Create a script that builds a bridge configuration with physical interfaces
1620 # and virtual interfaces
1621 ints_with_vints = self._create_vints_questions(node)
1623 for intf in ints_with_vints:
1624 vhoststr = 'comment { The following command creates the socket }\n'
1625 vhoststr += 'comment { and returns a virtual interface }\n'
1626 vhoststr += 'comment {{ create vhost-user socket /tmp/sock{}.sock server }}\n'. \
1627 format(intf['bridge'])
1629 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1631 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1632 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1634 # set interface state VirtualEthernet/0/0/0 up
1635 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1637 # set interface state VirtualEthernet/0/0/0 down
1638 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1640 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1642 # Write the content to the script
1643 rootdir = node['rootdir']
1644 filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1645 with open(filename, 'w+') as sfile:
1646 sfile.write(content)
1648 # Execute the script
1649 cmd = 'vppctl exec {}'.format(filename)
1650 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1652 logging.debug(stderr)
1654 print("\nA script as been created at {}".format(filename))
1655 print("This script can be run using the following:")
1656 print("vppctl exec {}\n".format(filename))