1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
15 from __future__ import absolute_import, division, print_function
20 from ipaddress import ip_address
24 from vpplib.VPPUtil import VPPUtil
25 from vpplib.VppPCIUtil import VppPCIUtil
26 from vpplib.VppHugePageUtil import VppHugePageUtil
27 from vpplib.CpuUtils import CpuUtils
28 from vpplib.VppGrubUtil import VppGrubUtil
29 from vpplib.QemuUtils import QemuUtils
31 # Python2/3 compatible
33 input = raw_input # noqa
37 __all__ = ["AutoConfig"]
41 MIN_TOTAL_HUGE_PAGES = 1024
42 MAX_PERCENT_FOR_HUGE_PAGES = 70
44 IPERFVM_XML = 'configs/iperf-vm.xml'
45 IPERFVM_IMAGE = 'images/xenial-mod.img'
46 IPERFVM_ISO = 'configs/cloud-config.iso'
49 class AutoConfig(object):
50 """Auto Configuration Tools"""
52 def __init__(self, rootdir, filename, clean=False):
54 The Auto Configure class.
56 :param rootdir: The root directory for all the auto configuration files
57 :param filename: The autoconfiguration file
58 :param clean: When set initialize the nodes from the auto-config file
63 self._autoconfig_filename = rootdir + filename
64 self._rootdir = rootdir
67 self._vpp_devices_node = {}
68 self._hugepage_config = ""
71 self._sockfilename = ""
75 Returns the nodes dictionary.
84 def _autoconfig_backup_file(filename):
88 :param filename: The file to backup
92 # Does a copy of the file exist, if not create one
93 ofile = filename + '.orig'
94 (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
97 if stdout.strip('\n') != ofile:
98 cmd = 'sudo cp {} {}'.format(filename, ofile)
99 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
101 logging.debug(stderr)
103 # noinspection PyBroadException
105 def _ask_user_ipv4():
107 Asks the user for a number within a range.
108 default is returned if return is entered.
110 :returns: IP address with cidr
115 answer = input("Please enter the IPv4 Address [n.n.n.n/n]: ")
117 ipinput = answer.split('/')
118 ipaddr = ip_address(ipinput[0])
120 plen = answer.split('/')[1]
122 answer = input("Please enter the netmask [n.n.n.n]: ")
123 plen = ip_address(answer).netmask_bits()
124 return '{}/{}'.format(ipaddr, plen)
126 print("Please enter a valid IPv4 address.")
129 def _ask_user_range(question, first, last, default):
131 Asks the user for a number within a range.
132 default is returned if return is entered.
134 :param question: Text of a question.
135 :param first: First number in the range
136 :param last: Last number in the range
137 :param default: The value returned when return is entered
138 :type question: string
142 :returns: The answer to the question
147 answer = input(question)
151 if re.findall(r'[0-9+]', answer):
152 if int(answer) in range(first, last + 1):
155 print("Please a value between {} and {} or Return.".
158 print("Please a number between {} and {} or Return.".
164 def _ask_user_yn(question, default):
166 Asks the user for a yes or no question.
168 :param question: Text of a question.
169 :param default: The value returned when return is entered
170 :type question: string
171 :type default: string
172 :returns: The answer to the question
177 default = default.lower()
179 while not input_valid:
180 answer = input(question)
183 if re.findall(r'[YyNn]', answer):
185 answer = answer[0].lower()
187 print("Please answer Y, N or Return.")
191 def _loadconfig(self):
193 Load the testbed configuration, given the auto configuration file.
197 # Get the Topology, from the topology layout file
199 with open(self._autoconfig_filename, 'r') as stream:
201 topo = yaml.load(stream)
202 if 'metadata' in topo:
203 self._metadata = topo['metadata']
204 except yaml.YAMLError as exc:
206 "Couldn't read the Auto config file {}.".format(
207 self._autoconfig_filename, exc))
209 systemfile = self._rootdir + self._metadata['system_config_file']
210 if self._clean is False and os.path.isfile(systemfile):
211 with open(systemfile, 'r') as sysstream:
213 systopo = yaml.load(sysstream)
214 if 'nodes' in systopo:
215 self._nodes = systopo['nodes']
216 except yaml.YAMLError as sysexc:
218 "Couldn't read the System config file {}.".format(
221 # Get the nodes from Auto Config
223 self._nodes = topo['nodes']
225 # Set the root directory in all the nodes
226 for i in self._nodes.items():
228 node['rootdir'] = self._rootdir
230 def updateconfig(self):
232 Update the testbed configuration, given the auto configuration file.
233 We will write the system configuration file with the current node
238 # Initialize the yaml data
239 ydata = {'metadata': self._metadata, 'nodes': self._nodes}
241 # Write the system config file
242 filename = self._rootdir + self._metadata['system_config_file']
243 with open(filename, 'w') as yamlfile:
244 yaml.dump(ydata, yamlfile)
246 def _update_auto_config(self):
248 Write the auto configuration file with the new configuration data,
253 # Initialize the yaml data
255 with open(self._autoconfig_filename, 'r') as stream:
257 ydata = yaml.load(stream)
259 nodes = ydata['nodes']
260 except yaml.YAMLError as exc:
264 for i in nodes.items():
269 node['interfaces'] = {}
270 for item in self._nodes[key]['interfaces'].items():
274 node['interfaces'][port] = {}
275 addr = '{}'.format(interface['pci_address'])
276 node['interfaces'][port]['pci_address'] = addr
277 if 'mac_address' in interface:
278 node['interfaces'][port]['mac_address'] = \
279 interface['mac_address']
281 if 'total_other_cpus' in self._nodes[key]['cpu']:
282 node['cpu']['total_other_cpus'] = \
283 self._nodes[key]['cpu']['total_other_cpus']
284 if 'total_vpp_cpus' in self._nodes[key]['cpu']:
285 node['cpu']['total_vpp_cpus'] = \
286 self._nodes[key]['cpu']['total_vpp_cpus']
287 if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
288 node['cpu']['reserve_vpp_main_core'] = \
289 self._nodes[key]['cpu']['reserve_vpp_main_core']
292 if 'active_open_sessions' in self._nodes[key]['tcp']:
293 node['tcp']['active_open_sessions'] = \
294 self._nodes[key]['tcp']['active_open_sessions']
295 if 'passive_open_sessions' in self._nodes[key]['tcp']:
296 node['tcp']['passive_open_sessions'] = \
297 self._nodes[key]['tcp']['passive_open_sessions']
300 node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
302 # Write the auto config config file
303 with open(self._autoconfig_filename, 'w') as yamlfile:
304 yaml.dump(ydata, yamlfile)
306 def apply_huge_pages(self):
308 Apply the huge page config
312 for i in self._nodes.items():
315 hpg = VppHugePageUtil(node)
316 hpg.hugepages_dryrun_apply()
319 def _apply_vpp_unix(node):
321 Apply the VPP Unix config
323 :param node: Node dictionary with cpuinfo.
328 if 'unix' not in node['vpp']:
331 unixv = node['vpp']['unix']
332 if 'interactive' in unixv:
333 interactive = unixv['interactive']
334 if interactive is True:
335 unix = ' interactive\n'
337 return unix.rstrip('\n')
340 def _apply_vpp_cpu(node):
342 Apply the VPP cpu config
344 :param node: Node dictionary with cpuinfo.
350 if 'vpp_main_core' in node['cpu']:
351 vpp_main_core = node['cpu']['vpp_main_core']
354 if vpp_main_core != 0:
355 cpu += ' main-core {}\n'.format(vpp_main_core)
358 vpp_workers = node['cpu']['vpp_workers']
359 vpp_worker_len = len(vpp_workers)
360 if vpp_worker_len > 0:
362 for i, worker in enumerate(vpp_workers):
364 vpp_worker_str += ','
365 if worker[0] == worker[1]:
366 vpp_worker_str += "{}".format(worker[0])
368 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
370 cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
375 def _apply_vpp_devices(node):
377 Apply VPP PCI Device configuration to vpp startup.
379 :param node: Node dictionary with cpuinfo.
384 ports_per_numa = node['cpu']['ports_per_numa']
385 total_mbufs = node['cpu']['total_mbufs']
387 for item in ports_per_numa.items():
389 interfaces = value['interfaces']
391 # if 0 was specified for the number of vpp workers, use 1 queue
394 if 'rx_queues' in value:
395 num_rx_queues = value['rx_queues']
396 if 'tx_queues' in value:
397 num_tx_queues = value['tx_queues']
402 # Create the devices string
403 for interface in interfaces:
404 pci_address = interface['pci_address']
405 pci_address = pci_address.lstrip("'").rstrip("'")
407 devices += ' dev {} {{ \n'.format(pci_address)
409 devices += ' num-rx-queues {}\n'.format(num_rx_queues)
411 devices += ' num-rx-queues {}\n'.format(1)
413 devices += ' num-tx-queues {}\n'.format(num_tx_queues)
415 devices += ' num-rx-desc {}\n'.format(num_rx_desc)
417 devices += ' num-tx-desc {}\n'.format(num_tx_desc)
420 # If the total mbufs is not 0 or less than the default, set num-bufs
421 logging.debug("Total mbufs: {}".format(total_mbufs))
422 if total_mbufs != 0 and total_mbufs > 16384:
423 devices += '\n num-mbufs {}'.format(total_mbufs)
428 def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end,
430 reserve_vpp_main_core):
432 Calculate the VPP worker information
434 :param node: Node dictionary
435 :param vpp_workers: List of VPP workers
436 :param numa_node: Numa node
437 :param other_cpus_end: The end of the cpus allocated for cores
439 :param total_vpp_workers: The number of vpp workers needed
440 :param reserve_vpp_main_core: Is there a core needed for
444 :type other_cpus_end: int
445 :type total_vpp_workers: int
446 :type reserve_vpp_main_core: bool
447 :returns: Is a core still needed for the vpp main core
451 # Can we fit the workers in one of these slices
452 cpus = node['cpu']['cpus_per_node'][numa_node]
456 if start <= other_cpus_end:
457 start = other_cpus_end + 1
459 if reserve_vpp_main_core:
462 workers_end = start + total_vpp_workers - 1
464 if workers_end <= end:
465 if reserve_vpp_main_core:
466 node['cpu']['vpp_main_core'] = start - 1
467 reserve_vpp_main_core = False
468 if total_vpp_workers:
469 vpp_workers.append((start, workers_end))
472 # We still need to reserve the main core
473 if reserve_vpp_main_core:
474 node['cpu']['vpp_main_core'] = other_cpus_end + 1
476 return reserve_vpp_main_core
479 def _calc_desc_and_queues(total_numa_nodes,
480 total_ports_per_numa,
482 ports_per_numa_value):
484 Calculate the number of descriptors and queues
486 :param total_numa_nodes: The total number of numa nodes
487 :param total_ports_per_numa: The total number of ports for this
489 :param total_rx_queues: The total number of rx queues / port
490 :param ports_per_numa_value: The value from the ports_per_numa
492 :type total_numa_nodes: int
493 :type total_ports_per_numa: int
494 :type total_rx_queues: int
495 :type ports_per_numa_value: dict
496 :returns The total number of message buffers
500 # Get the number of rx queues
501 rx_queues = max(1, total_rx_queues)
502 tx_queues = rx_queues * total_numa_nodes + 1
504 # Get the descriptor entries
506 ports_per_numa_value['rx_queues'] = rx_queues
507 total_mbufs = (((rx_queues * desc_entries) +
508 (tx_queues * desc_entries)) *
509 total_ports_per_numa)
514 def _create_ports_per_numa(node, interfaces):
516 Create a dictionary or ports per numa node
517 :param node: Node dictionary
518 :param interfaces: All the interfaces to be used by vpp
520 :type interfaces: dict
521 :returns: The ports per numa dictionary
525 # Make a list of ports by numa node
527 for item in interfaces.items():
529 if i['numa_node'] not in ports_per_numa:
530 ports_per_numa[i['numa_node']] = {'interfaces': []}
531 ports_per_numa[i['numa_node']]['interfaces'].append(i)
533 ports_per_numa[i['numa_node']]['interfaces'].append(i)
534 node['cpu']['ports_per_numa'] = ports_per_numa
536 return ports_per_numa
538 def calculate_cpu_parameters(self):
540 Calculate the cpu configuration.
544 # Calculate the cpu parameters, needed for the
545 # vpp_startup and grub configuration
546 for i in self._nodes.items():
549 # get total number of nic ports
550 interfaces = node['interfaces']
552 # Make a list of ports by numa node
553 ports_per_numa = self._create_ports_per_numa(node, interfaces)
555 # Get the number of cpus to skip, we never use the first cpu
557 other_cpus_end = other_cpus_start + \
558 node['cpu']['total_other_cpus'] - 1
560 if other_cpus_end != 0:
561 other_workers = (other_cpus_start, other_cpus_end)
562 node['cpu']['other_workers'] = other_workers
564 # Allocate the VPP main core and workers
566 reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
567 total_vpp_cpus = node['cpu']['total_vpp_cpus']
568 total_rx_queues = node['cpu']['total_rx_queues']
570 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
571 # then we shouldn't get workers
572 total_workers_node = 0
573 if len(ports_per_numa):
574 total_workers_node = total_vpp_cpus // len(ports_per_numa)
576 if reserve_vpp_main_core:
579 if total_main + total_workers_node != 0:
580 for item in ports_per_numa.items():
584 # Get the number of descriptors and queues
585 mbufs = self._calc_desc_and_queues(
587 len(value['interfaces']), total_rx_queues, value)
590 # Get the VPP workers
591 reserve_vpp_main_core = self._calc_vpp_workers(
592 node, vpp_workers, numa_node,
593 other_cpus_end, total_workers_node,
594 reserve_vpp_main_core)
597 total_mbufs = int(total_mbufs)
602 node['cpu']['vpp_workers'] = vpp_workers
603 node['cpu']['total_mbufs'] = total_mbufs
609 def _apply_vpp_tcp(node):
611 Apply the VPP Unix config
613 :param node: Node dictionary with cpuinfo.
617 active_open_sessions = node['tcp']['active_open_sessions']
618 aos = int(active_open_sessions)
620 passive_open_sessions = node['tcp']['passive_open_sessions']
621 pos = int(passive_open_sessions)
623 # Generate the api-segment gid vpp sheit in any case
630 return tcp.rstrip('\n')
633 "# TCP stack-related configuration parameters",
634 "# expecting {:d} client sessions, {:d} server sessions\n".format(
638 " global-size 2000M",
643 " event-queue-length {:d}".format(aos + pos),
644 " preallocated-sessions {:d}".format(aos + pos),
645 " v4-session-table-buckets {:d}".format((aos + pos) // 4),
646 " v4-session-table-memory 3g\n"
649 tcp = tcp + " v4-halfopen-table-buckets {:d}".format(
650 (aos + pos) // 4) + "\n"
651 tcp = tcp + " v4-halfopen-table-memory 3g\n"
652 tcp = tcp + " local-endpoints-table-buckets {:d}".format(
653 (aos + pos) // 4) + "\n"
654 tcp = tcp + " local-endpoints-table-memory 3g\n"
657 tcp = tcp + "tcp {\n"
658 tcp = tcp + " preallocated-connections {:d}".format(aos + pos) + "\n"
660 tcp = tcp + " preallocated-half-open-connections {:d}".format(
664 return tcp.rstrip('\n')
666 def apply_vpp_startup(self):
668 Apply the vpp startup configration
672 # Apply the VPP startup configruation
673 for i in self._nodes.items():
676 # Get the startup file
677 rootdir = node['rootdir']
678 sfile = rootdir + node['vpp']['startup_config_file']
681 devices = self._apply_vpp_devices(node)
684 cpu = self._apply_vpp_cpu(node)
686 # Get the unix config
687 unix = self._apply_vpp_unix(node)
689 # Get the TCP configuration, if any
690 tcp = self._apply_vpp_tcp(node)
692 # Make a backup if needed
693 self._autoconfig_backup_file(sfile)
696 tfile = sfile + '.template'
697 (ret, stdout, stderr) = \
698 VPPUtil.exec_command('cat {}'.format(tfile))
700 raise RuntimeError('Executing cat command failed to node {}'.
701 format(node['host']))
702 startup = stdout.format(unix=unix,
707 (ret, stdout, stderr) = \
708 VPPUtil.exec_command('rm {}'.format(sfile))
710 logging.debug(stderr)
712 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
713 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
715 raise RuntimeError('Writing config failed node {}'.
716 format(node['host']))
718 def apply_grub_cmdline(self):
720 Apply the grub cmdline
724 for i in self._nodes.items():
727 # Get the isolated CPUs
728 other_workers = node['cpu']['other_workers']
729 vpp_workers = node['cpu']['vpp_workers']
730 if 'vpp_main_core' in node['cpu']:
731 vpp_main_core = node['cpu']['vpp_main_core']
735 if other_workers is not None:
736 all_workers = [other_workers]
737 if vpp_main_core != 0:
738 all_workers += [(vpp_main_core, vpp_main_core)]
739 all_workers += vpp_workers
741 for idx, worker in enumerate(all_workers):
746 if worker[0] == worker[1]:
747 isolated_cpus += "{}".format(worker[0])
749 isolated_cpus += "{}-{}".format(worker[0], worker[1])
751 vppgrb = VppGrubUtil(node)
752 current_cmdline = vppgrb.get_current_cmdline()
753 if 'grub' not in node:
755 node['grub']['current_cmdline'] = current_cmdline
756 node['grub']['default_cmdline'] = \
757 vppgrb.apply_cmdline(node, isolated_cpus)
761 def get_hugepages(self):
763 Get the hugepage configuration
767 for i in self._nodes.items():
770 hpg = VppHugePageUtil(node)
771 max_map_count, shmmax = hpg.get_huge_page_config()
772 node['hugepages']['max_map_count'] = max_map_count
773 node['hugepages']['shmax'] = shmmax
774 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
775 node['hugepages']['actual_total'] = total
776 node['hugepages']['free'] = free
777 node['hugepages']['size'] = size
778 node['hugepages']['memtotal'] = memtotal
779 node['hugepages']['memfree'] = memfree
785 Get the grub configuration
789 for i in self._nodes.items():
792 vppgrb = VppGrubUtil(node)
793 current_cmdline = vppgrb.get_current_cmdline()
794 default_cmdline = vppgrb.get_default_cmdline()
796 # Get the total number of isolated CPUs
798 iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
799 iso_cpurl = len(iso_cpur)
801 iso_cpu_str = iso_cpur[0]
802 iso_cpu_str = iso_cpu_str.split('=')[1]
803 iso_cpul = iso_cpu_str.split(',')
804 for iso_cpu in iso_cpul:
805 isocpuspl = iso_cpu.split('-')
806 if len(isocpuspl) == 1:
807 current_iso_cpus += 1
809 first = int(isocpuspl[0])
810 second = int(isocpuspl[1])
812 current_iso_cpus += 1
814 current_iso_cpus += second - first
816 if 'grub' not in node:
818 node['grub']['current_cmdline'] = current_cmdline
819 node['grub']['default_cmdline'] = default_cmdline
820 node['grub']['current_iso_cpus'] = current_iso_cpus
825 def _get_device(node):
827 Get the device configuration for a single node
829 :param node: Node dictionary with cpuinfo.
834 vpp = VppPCIUtil(node)
835 vpp.get_all_devices()
837 # Save the device information
839 node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
840 node['devices']['kernel_devices'] = vpp.get_kernel_devices()
841 node['devices']['other_devices'] = vpp.get_other_devices()
842 node['devices']['linkup_devices'] = vpp.get_link_up_devices()
844 def get_devices_per_node(self):
846 Get the device configuration for all the nodes
850 for i in self._nodes.items():
852 # Update the interface data
854 self._get_device(node)
859 def get_cpu_layout(node):
863 using lscpu -p get the cpu layout.
864 Returns a list with each item representing a single cpu.
866 :param node: Node dictionary.
868 :returns: The cpu layout
873 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
875 raise RuntimeError('{} failed on node {} {}'.
876 format(cmd, node['host'], stderr))
879 lines = stdout.split('\n')
881 if line == '' or line[0] == '#':
883 linesplit = line.split(',')
884 layout = {'cpu': linesplit[0], 'core': linesplit[1],
885 'socket': linesplit[2], 'node': linesplit[3]}
887 # cpu, core, socket, node
894 Get the cpu configuration
899 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
901 for i in self._nodes.items():
905 layout = self.get_cpu_layout(node)
906 node['cpu']['layout'] = layout
908 cpuinfo = node['cpuinfo']
909 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
910 node['cpu']['smt_enabled'] = smt_enabled
912 # We don't want to write the cpuinfo
920 Get the current system configuration.
924 # Get the Huge Page configuration
927 # Get the device configuration
928 self.get_devices_per_node()
930 # Get the CPU configuration
933 # Get the current grub cmdline
936 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
938 Ask the user questions related to the cpu configuration.
940 :param node: Node dictionary
941 :param total_cpus: The total number of cpus in the system
942 :param numa_nodes: The list of numa nodes in the system
944 :type total_cpus: int
945 :type numa_nodes: list
948 print("\nYour system has {} core(s) and {} Numa Nodes.".
949 format(total_cpus, len(numa_nodes)))
950 print("To begin, we suggest not reserving any cores for "
951 "VPP or other processes.")
952 print("Then to improve performance start reserving cores and "
953 "adding queues as needed.")
955 # Leave 1 for the general system
957 max_vpp_cpus = min(total_cpus, 4)
960 question = "\nHow many core(s) shall we reserve for " \
961 "VPP [0-{}][0]? ".format(max_vpp_cpus)
962 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
963 node['cpu']['total_vpp_cpus'] = total_vpp_cpus
966 max_other_cores = total_cpus - total_vpp_cpus
967 if max_other_cores > 0:
968 question = 'How many core(s) do you want to reserve for ' \
969 'processes other than VPP? [0-{}][0]? '. \
970 format(str(max_other_cores))
971 total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
972 node['cpu']['total_other_cpus'] = total_other_cpus
974 max_main_cpus = total_cpus - total_vpp_cpus - total_other_cpus
975 reserve_vpp_main_core = False
976 if max_main_cpus > 0:
977 question = "Should we reserve 1 core for the VPP Main thread? "
978 question += "[y/N]? "
979 answer = self._ask_user_yn(question, 'n')
981 reserve_vpp_main_core = True
982 node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
983 node['cpu']['vpp_main_core'] = 0
985 question = "How many RX queues per port shall we use for " \
986 "VPP [1-4][1]? ".format(max_vpp_cpus)
987 total_rx_queues = self._ask_user_range(question, 1, 4, 1)
988 node['cpu']['total_rx_queues'] = total_rx_queues
990 def modify_cpu(self, ask_questions=True):
992 Modify the cpu configuration, asking for the user for the values.
994 :param ask_questions: When true ask the user for config parameters
999 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
1001 for i in self._nodes.items():
1004 total_cpus_per_slice = 0
1008 cpu_layout = self.get_cpu_layout(node)
1010 # Assume the number of cpus per slice is always the same as the
1013 for cpu in cpu_layout:
1014 if cpu['node'] != first_node:
1016 total_cpus_per_slice += 1
1018 # Get the total number of cpus, cores, and numa nodes from the
1020 for cpul in cpu_layout:
1021 numa_node = cpul['node']
1026 if numa_node not in cpus_per_node:
1027 cpus_per_node[numa_node] = []
1028 cpuperslice = int(cpu) % total_cpus_per_slice
1029 if cpuperslice == 0:
1030 cpus_per_node[numa_node].append((int(cpu), int(cpu) +
1031 total_cpus_per_slice - 1))
1032 if numa_node not in numa_nodes:
1033 numa_nodes.append(numa_node)
1034 if core not in cores:
1036 node['cpu']['cpus_per_node'] = cpus_per_node
1038 # Ask the user some questions
1039 if ask_questions and total_cpus >= 4:
1040 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1042 # Populate the interfaces with the numa node
1043 if 'interfaces' in node:
1044 ikeys = node['interfaces'].keys()
1045 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1047 # We don't want to write the cpuinfo
1048 node['cpuinfo'] = ""
1051 self._update_auto_config()
1054 def _modify_other_devices(self, node,
1055 other_devices, kernel_devices, dpdk_devices):
1057 Modify the devices configuration, asking for the user for the values.
1061 odevices_len = len(other_devices)
1062 if odevices_len > 0:
1063 print("\nThese device(s) are currently NOT being used "
1064 "by VPP or the OS.\n")
1065 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1066 question = "\nWould you like to give any of these devices"
1067 question += " back to the OS [Y/n]? "
1068 answer = self._ask_user_yn(question, 'Y')
1071 for dit in other_devices.items():
1074 question = "Would you like to use device {} for". \
1076 question += " the OS [y/N]? "
1077 answer = self._ask_user_yn(question, 'n')
1079 if 'unused' in device and len(
1080 device['unused']) != 0 and \
1081 device['unused'][0] != '':
1082 driver = device['unused'][0]
1083 ret = VppPCIUtil.bind_vpp_device(
1087 'Could not bind device {}'.format(dvid))
1090 for dit in vppd.items():
1093 kernel_devices[dvid] = device
1094 del other_devices[dvid]
1096 odevices_len = len(other_devices)
1097 if odevices_len > 0:
1098 print("\nThese device(s) are still NOT being used "
1099 "by VPP or the OS.\n")
1100 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1101 question = "\nWould you like use any of these for VPP [y/N]? "
1102 answer = self._ask_user_yn(question, 'N')
1105 for dit in other_devices.items():
1108 question = "Would you like to use device {} ".format(dvid)
1109 question += "for VPP [y/N]? "
1110 answer = self._ask_user_yn(question, 'n')
1113 for dit in vppd.items():
1116 if 'unused' in device and len(device['unused']) != 0 and \
1117 device['unused'][0] != '':
1118 driver = device['unused'][0]
1120 'Binding device {} to driver {}'.format(dvid,
1122 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1125 'Could not bind device {}'.format(dvid))
1127 dpdk_devices[dvid] = device
1128 del other_devices[dvid]
1130 def update_interfaces_config(self):
1132 Modify the interfaces directly from the config file.
1136 for i in self._nodes.items():
1138 devices = node['devices']
1139 all_devices = devices['other_devices']
1140 all_devices.update(devices['dpdk_devices'])
1141 all_devices.update(devices['kernel_devices'])
1145 if 'interfaces' in node:
1146 current_ifcs = node['interfaces']
1148 for ifc in current_ifcs.values():
1149 dvid = ifc['pci_address']
1150 if dvid in all_devices:
1151 VppPCIUtil.vpp_create_interface(interfaces, dvid,
1153 node['interfaces'] = interfaces
1157 def modify_devices(self):
1159 Modify the devices configuration, asking for the user for the values.
1163 for i in self._nodes.items():
1165 devices = node['devices']
1166 other_devices = devices['other_devices']
1167 kernel_devices = devices['kernel_devices']
1168 dpdk_devices = devices['dpdk_devices']
1171 self._modify_other_devices(node, other_devices,
1172 kernel_devices, dpdk_devices)
1174 # Get the devices again for this node
1175 self._get_device(node)
1176 devices = node['devices']
1177 kernel_devices = devices['kernel_devices']
1178 dpdk_devices = devices['dpdk_devices']
1180 klen = len(kernel_devices)
1182 print("\nThese devices are safe to be used with VPP.\n")
1183 VppPCIUtil.show_vpp_devices(kernel_devices)
1184 question = "\nWould you like to use any of these " \
1185 "device(s) for VPP [y/N]? "
1186 answer = self._ask_user_yn(question, 'n')
1189 for dit in kernel_devices.items():
1192 question = "Would you like to use device {} ".format(dvid)
1193 question += "for VPP [y/N]? "
1194 answer = self._ask_user_yn(question, 'n')
1197 for dit in vppd.items():
1200 if 'unused' in device and len(
1201 device['unused']) != 0 and device['unused'][
1203 driver = device['unused'][0]
1204 question = "Would you like to bind the driver {} for {} [y/N]? ".format(driver, dvid)
1205 answer = self._ask_user_yn(question, 'n')
1207 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1208 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1210 logging.debug('Could not bind device {}'.format(dvid))
1211 dpdk_devices[dvid] = device
1212 del kernel_devices[dvid]
1214 dlen = len(dpdk_devices)
1216 print("\nThese device(s) are already using DPDK.\n")
1217 VppPCIUtil.show_vpp_devices(dpdk_devices,
1218 show_interfaces=False)
1219 question = "\nWould you like to remove any of "
1220 question += "these device(s) [y/N]? "
1221 answer = self._ask_user_yn(question, 'n')
1224 for dit in dpdk_devices.items():
1227 question = "Would you like to remove {} [y/N]? ". \
1229 answer = self._ask_user_yn(question, 'n')
1231 vppdl[dvid] = device
1232 for dit in vppdl.items():
1235 if 'unused' in device and len(
1236 device['unused']) != 0 and device['unused'][
1238 driver = device['unused'][0]
1240 'Binding device {} to driver {}'.format(
1242 ret = VppPCIUtil.bind_vpp_device(node, driver,
1246 'Could not bind device {}'.format(dvid))
1248 kernel_devices[dvid] = device
1249 del dpdk_devices[dvid]
1252 for dit in dpdk_devices.items():
1255 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1256 node['interfaces'] = interfaces
1258 self._update_auto_config()
1261 def modify_huge_pages(self):
1263 Modify the huge page configuration, asking for the user for the values.
1267 for i in self._nodes.items():
1270 total = node['hugepages']['actual_total']
1271 free = node['hugepages']['free']
1272 size = node['hugepages']['size']
1273 memfree = node['hugepages']['memfree'].split(' ')[0]
1274 hugesize = int(size.split(' ')[0])
1275 # The max number of huge pages should be no more than
1276 # 70% of total free memory
1277 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES // 100) // \
1279 print("\nThere currently {} {} huge pages free.".format(
1281 question = "Do you want to reconfigure the number of " \
1282 "huge pages [y/N]? "
1283 answer = self._ask_user_yn(question, 'n')
1285 node['hugepages']['total'] = total
1288 print("\nThere currently a total of {} huge pages.".
1290 question = "How many huge pages do you want [{} - {}][{}]? ". \
1291 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1292 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1293 node['hugepages']['total'] = str(answer)
1295 # Update auto-config.yaml
1296 self._update_auto_config()
1298 # Rediscover just the hugepages
1299 self.get_hugepages()
1301 def get_tcp_params(self):
1303 Get the tcp configuration
1306 # maybe nothing to do here?
1309 def acquire_tcp_params(self):
1311 Ask the user for TCP stack configuration parameters
1315 for i in self._nodes.items():
1318 question = "\nHow many active-open / tcp client sessions are " \
1319 "expected [0-10000000][0]? "
1320 answer = self._ask_user_range(question, 0, 10000000, 0)
1321 # Less than 10K is equivalent to 0
1322 if int(answer) < 10000:
1324 node['tcp']['active_open_sessions'] = answer
1326 question = "How many passive-open / tcp server sessions are " \
1327 "expected [0-10000000][0]? "
1328 answer = self._ask_user_range(question, 0, 10000000, 0)
1329 # Less than 10K is equivalent to 0
1330 if int(answer) < 10000:
1332 node['tcp']['passive_open_sessions'] = answer
1334 # Update auto-config.yaml
1335 self._update_auto_config()
1337 # Rediscover tcp parameters
1338 self.get_tcp_params()
1341 def patch_qemu(node):
1343 Patch qemu with the correct patches.
1345 :param node: Node dictionary
1349 print('\nWe are patching the node "{}":\n'.format(node['host']))
1350 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1355 print the CPU information
1359 cpu = CpuUtils.get_cpu_info_per_node(node)
1363 print("{:>20}: {}".format(item, cpu[item]))
1366 print("{:>20}: {}".format(item, cpu[item]))
1367 item = 'Thread(s) per core'
1369 print("{:>20}: {}".format(item, cpu[item]))
1370 item = 'Core(s) per socket'
1372 print("{:>20}: {}".format(item, cpu[item]))
1375 print("{:>20}: {}".format(item, cpu[item]))
1376 item = 'NUMA node(s)'
1379 numa_nodes = int(cpu[item])
1380 for i in range(0, numa_nodes):
1381 item = "NUMA node{} CPU(s)".format(i)
1382 print("{:>20}: {}".format(item, cpu[item]))
1383 item = 'CPU max MHz'
1385 print("{:>20}: {}".format(item, cpu[item]))
1386 item = 'CPU min MHz'
1388 print("{:>20}: {}".format(item, cpu[item]))
1390 if node['cpu']['smt_enabled']:
1394 print("{:>20}: {}".format('SMT', smt))
1397 print("\nVPP Threads: (Name: Cpu Number)")
1398 vpp_processes = cpu['vpp_processes']
1399 for i in vpp_processes.items():
1400 print(" {:10}: {:4}".format(i[0], i[1]))
1403 def device_info(node):
1405 Show the device information.
1409 if 'cpu' in node and 'total_mbufs' in node['cpu']:
1410 total_mbufs = node['cpu']['total_mbufs']
1411 if total_mbufs != 0:
1412 print("Total Number of Buffers: {}".format(total_mbufs))
1414 vpp = VppPCIUtil(node)
1415 vpp.get_all_devices()
1416 linkup_devs = vpp.get_link_up_devices()
1417 if len(linkup_devs):
1418 print("\nDevices with link up (can not be used with VPP):")
1419 vpp.show_vpp_devices(linkup_devs, show_header=False)
1420 # for dev in linkup_devs:
1422 kernel_devs = vpp.get_kernel_devices()
1423 if len(kernel_devs):
1424 print("\nDevices bound to kernel drivers:")
1425 vpp.show_vpp_devices(kernel_devs, show_header=False)
1427 print("\nNo devices bound to kernel drivers")
1429 dpdk_devs = vpp.get_dpdk_devices()
1431 print("\nDevices bound to DPDK drivers:")
1432 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1435 print("\nNo devices bound to DPDK drivers")
1437 other_devs = vpp.get_other_devices()
1439 print("\nDevices not bound to Kernel or DPDK drivers:")
1440 vpp.show_vpp_devices(other_devs, show_interfaces=True,
1443 print("\nNo devices not bound to Kernel or DPDK drivers")
1446 interfaces = vpputl.get_hardware(node)
1447 if interfaces == {}:
1450 print("\nDevices in use by VPP:")
1452 if len(interfaces.items()) < 2:
1456 print("{:30} {:4} {:4} {:7} {:4} {:7}".
1457 format('Name', 'Numa', 'RXQs',
1458 'RXDescs', 'TXQs', 'TXDescs'))
1459 for intf in sorted(interfaces.items()):
1462 if name == 'local0':
1464 numa = rx_qs = rx_ds = tx_qs = tx_ds = ''
1466 numa = int(value['numa'])
1467 if 'rx queues' in value:
1468 rx_qs = int(value['rx queues'])
1469 if 'rx descs' in value:
1470 rx_ds = int(value['rx descs'])
1471 if 'tx queues' in value:
1472 tx_qs = int(value['tx queues'])
1473 if 'tx descs' in value:
1474 tx_ds = int(value['tx descs'])
1476 print("{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".
1477 format(name, numa, rx_qs, rx_ds, tx_qs, tx_ds))
1480 def hugepage_info(node):
1482 Show the huge page information.
1486 hpg = VppHugePageUtil(node)
1487 hpg.show_huge_pages()
1490 def has_interfaces(node):
1492 Check for interfaces, return tru if there is at least one
1496 if 'interfaces' in node and len(node['interfaces']):
1502 def min_system_resources(node):
1504 Check the system for basic minimum resources, return true if
1513 if 'layout' in node['cpu']:
1514 total_cpus = len(node['cpu']['layout'])
1516 print("\nThere is only {} CPU(s) available on this system. "
1517 "This is not enough to run VPP.".format(total_cpus))
1521 if 'free' in node['hugepages'] and \
1522 'memfree' in node['hugepages'] and \
1523 'size' in node['hugepages']:
1524 free = node['hugepages']['free']
1525 memfree = float(node['hugepages']['memfree'].split(' ')[0])
1526 hugesize = float(node['hugepages']['size'].split(' ')[0])
1528 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1529 percentmemhugepages = (memhugepages / memfree) * 100
1530 if free is '0' and \
1531 percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1533 "\nThe System has only {} of free memory. You will not "
1534 "be able to allocate enough Huge Pages for VPP.".format(
1544 Print the system information
1548 for i in self._nodes.items():
1549 print("\n==============================")
1553 print("NODE: {}\n".format(name))
1560 print("\nGrub Command Line:")
1562 print(" Current: {}".format(
1563 node['grub']['current_cmdline']))
1564 print(" Configured: {}".format(
1565 node['grub']['default_cmdline']))
1568 print("\nHuge Pages:")
1569 self.hugepage_info(node)
1573 self.device_info(node)
1576 print("\nVPP Service Status:")
1577 state, errors = VPPUtil.status(node)
1578 print(" {}".format(state))
1580 print(" {}".format(e))
1582 # Minimum system resources
1583 self.min_system_resources(node)
1585 print("\n==============================")
1587 def _ipv4_interface_setup_questions(self, node):
1589 Ask the user some questions and get a list of interfaces
1590 and IPv4 addresses associated with those interfaces
1592 :param node: Node dictionary.
1594 :returns: A list or interfaces with ip addresses
1599 interfaces = vpputl.get_hardware(node)
1600 if interfaces == {}:
1603 interfaces_with_ip = []
1604 for intf in sorted(interfaces.items()):
1606 if name == 'local0':
1609 question = "Would you like add address to " \
1610 "interface {} [Y/n]? ".format(name)
1611 answer = self._ask_user_yn(question, 'y')
1614 addr = self._ask_user_ipv4()
1615 address['name'] = name
1616 address['addr'] = addr
1617 interfaces_with_ip.append(address)
1619 return interfaces_with_ip
1621 def ipv4_interface_setup(self):
1623 After asking the user some questions, get a list of interfaces
1624 and IPv4 addresses associated with those interfaces
1628 for i in self._nodes.items():
1631 # Show the current interfaces with IP addresses
1632 current_ints = VPPUtil.get_int_ip(node)
1633 if current_ints != {}:
1634 print("\nThese are the current interfaces with IP addresses:")
1635 for items in sorted(current_ints.items()):
1638 if 'address' not in value:
1641 address = value['address']
1642 print("{:30} {:20} {:10}".format(name, address,
1644 question = "\nWould you like to keep this configuration " \
1646 answer = self._ask_user_yn(question, 'y')
1650 print("\nThere are currently no interfaces with IP "
1653 # Create a script that add the ip addresses to the interfaces
1654 # and brings the interfaces up
1655 ints_with_addrs = self._ipv4_interface_setup_questions(node)
1657 for ints in ints_with_addrs:
1660 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1661 setintupstr = 'set int state {} up\n'.format(name)
1662 content += setipstr + setintupstr
1664 # Write the content to the script
1665 rootdir = node['rootdir']
1666 filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1667 with open(filename, 'w+') as sfile:
1668 sfile.write(content)
1670 # Execute the script
1671 cmd = 'vppctl exec {}'.format(filename)
1672 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1674 logging.debug(stderr)
1676 print("\nA script as been created at {}".format(filename))
1677 print("This script can be run using the following:")
1678 print("vppctl exec {}\n".format(filename))
1680 def _create_vints_questions(self, node):
1682 Ask the user some questions and get a list of interfaces
1683 and IPv4 addresses associated with those interfaces
1685 :param node: Node dictionary.
1687 :returns: A list or interfaces with ip addresses
1692 interfaces = vpputl.get_hardware(node)
1693 if interfaces == {}:
1696 # First delete all the Virtual interfaces
1697 for intf in sorted(interfaces.items()):
1699 if name[:7] == 'Virtual':
1700 cmd = 'vppctl delete vhost-user {}'.format(name)
1701 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1703 logging.debug('{} failed on node {} {}'.format(
1704 cmd, node['host'], stderr))
1706 # Create a virtual interface, for each interface the user wants to use
1707 interfaces = vpputl.get_hardware(node)
1708 if interfaces == {}:
1710 interfaces_with_virtual_interfaces = []
1712 for intf in sorted(interfaces.items()):
1714 if name == 'local0':
1717 question = "Would you like connect this interface {} to " \
1718 "the VM [Y/n]? ".format(name)
1719 answer = self._ask_user_yn(question, 'y')
1721 sockfilename = '/var/run/vpp/{}.sock'.format(
1722 name.replace('/', '_'))
1723 if os.path.exists(sockfilename):
1724 os.remove(sockfilename)
1725 cmd = 'vppctl create vhost-user socket {} server'.format(
1727 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1730 "Couldn't execute the command {}, {}.".format(cmd,
1732 vintname = stdout.rstrip('\r\n')
1734 cmd = 'chmod 777 {}'.format(sockfilename)
1735 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1738 "Couldn't execute the command {}, {}.".format(cmd,
1741 interface = {'name': name,
1742 'virtualinterface': '{}'.format(vintname),
1743 'bridge': '{}'.format(inum)}
1745 interfaces_with_virtual_interfaces.append(interface)
1747 return interfaces_with_virtual_interfaces
1749 def create_and_bridge_virtual_interfaces(self):
1751 After asking the user some questions, create a VM and connect
1752 the interfaces to VPP interfaces
1756 for i in self._nodes.items():
1759 # Show the current bridge and interface configuration
1760 print("\nThis the current bridge configuration:")
1761 VPPUtil.show_bridge(node)
1762 question = "\nWould you like to keep this configuration [Y/n]? "
1763 answer = self._ask_user_yn(question, 'y')
1767 # Create a script that builds a bridge configuration with
1768 # physical interfaces and virtual interfaces
1769 ints_with_vints = self._create_vints_questions(node)
1771 for intf in ints_with_vints:
1772 vhoststr = '\n'.join([
1773 'comment { The following command creates the socket }',
1774 'comment { and returns a virtual interface }',
1775 'comment {{ create vhost-user socket '
1776 '/var/run/vpp/sock{}.sock server }}\n'.format(
1780 setintdnstr = 'set interface state {} down\n'.format(
1783 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1784 intf['name'], intf['bridge'])
1785 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1786 intf['virtualinterface'], intf['bridge'])
1788 # set interface state VirtualEthernet/0/0/0 up
1789 setintvststr = 'set interface state {} up\n'.format(
1790 intf['virtualinterface'])
1792 # set interface state VirtualEthernet/0/0/0 down
1793 setintupstr = 'set interface state {} up\n'.format(
1796 content += vhoststr + setintdnstr + setintbrstr + \
1797 setvintbrstr + setintvststr + setintupstr
1799 # Write the content to the script
1800 rootdir = node['rootdir']
1801 filename = rootdir + \
1802 '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1803 with open(filename, 'w+') as sfile:
1804 sfile.write(content)
1806 # Execute the script
1807 cmd = 'vppctl exec {}'.format(filename)
1808 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1810 logging.debug(stderr)
1812 print("\nA script as been created at {}".format(filename))
1813 print("This script can be run using the following:")
1814 print("vppctl exec {}\n".format(filename))
1816 def _iperf_vm_questions(self, node):
1818 Ask the user some questions and get a list of interfaces
1819 and IPv4 addresses associated with those interfaces
1821 :param node: Node dictionary.
1823 :returns: A list or interfaces with ip addresses
1828 interfaces = vpputl.get_hardware(node)
1829 if interfaces == {}:
1832 # First delete all the Virtual interfaces
1833 for intf in sorted(interfaces.items()):
1835 if name[:7] == 'Virtual':
1836 cmd = 'vppctl delete vhost-user {}'.format(name)
1837 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1839 logging.debug('{} failed on node {} {}'.format(
1840 cmd, node['host'], stderr))
1842 # Create a virtual interface, for each interface the user wants to use
1843 interfaces = vpputl.get_hardware(node)
1844 if interfaces == {}:
1846 interfaces_with_virtual_interfaces = []
1850 print('\nPlease pick one interface to connect to the iperf VM.')
1851 for intf in sorted(interfaces.items()):
1853 if name == 'local0':
1856 question = "Would you like connect this interface {} to " \
1857 "the VM [y/N]? ".format(name)
1858 answer = self._ask_user_yn(question, 'n')
1860 self._sockfilename = '/var/run/vpp/{}.sock'.format(
1861 name.replace('/', '_'))
1862 if os.path.exists(self._sockfilename):
1863 os.remove(self._sockfilename)
1864 cmd = 'vppctl create vhost-user socket {} server'.format(
1866 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1869 "Couldn't execute the command {}, {}.".format(
1871 vintname = stdout.rstrip('\r\n')
1873 cmd = 'chmod 777 {}'.format(self._sockfilename)
1874 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1877 "Couldn't execute the command {}, {}.".format(
1880 interface = {'name': name,
1881 'virtualinterface': '{}'.format(vintname),
1882 'bridge': '{}'.format(inum)}
1884 interfaces_with_virtual_interfaces.append(interface)
1885 return interfaces_with_virtual_interfaces
1887 def create_and_bridge_iperf_virtual_interface(self):
1889 After asking the user some questions, and create and bridge a
1890 virtual interface to be used with iperf VM
1894 for i in self._nodes.items():
1897 # Show the current bridge and interface configuration
1898 print("\nThis the current bridge configuration:")
1899 ifaces = VPPUtil.show_bridge(node)
1900 question = "\nWould you like to keep this configuration [Y/n]? "
1901 answer = self._ask_user_yn(question, 'y')
1903 self._sockfilename = '/var/run/vpp/{}.sock'.format(
1904 ifaces[0]['name'].replace('/', '_'))
1905 if os.path.exists(self._sockfilename):
1908 # Create a script that builds a bridge configuration with
1909 # physical interfaces and virtual interfaces
1910 ints_with_vints = self._iperf_vm_questions(node)
1912 for intf in ints_with_vints:
1913 vhoststr = '\n'.join([
1914 'comment { The following command creates the socket }',
1915 'comment { and returns a virtual interface }',
1916 'comment {{ create vhost-user socket '
1917 '/var/run/vpp/sock{}.sock server }}\n'.format(
1921 setintdnstr = 'set interface state {} down\n'.format(
1924 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1925 intf['name'], intf['bridge'])
1926 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1927 intf['virtualinterface'], intf['bridge'])
1929 # set interface state VirtualEthernet/0/0/0 up
1930 setintvststr = 'set interface state {} up\n'.format(
1931 intf['virtualinterface'])
1933 # set interface state VirtualEthernet/0/0/0 down
1934 setintupstr = 'set interface state {} up\n'.format(
1937 content += vhoststr + setintdnstr + setintbrstr + \
1938 setvintbrstr + setintvststr + setintupstr
1940 # Write the content to the script
1941 rootdir = node['rootdir']
1942 filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
1943 with open(filename, 'w+') as sfile:
1944 sfile.write(content)
1946 # Execute the script
1947 cmd = 'vppctl exec {}'.format(filename)
1948 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1950 logging.debug(stderr)
1952 print("\nA script as been created at {}".format(filename))
1953 print("This script can be run using the following:")
1954 print("vppctl exec {}\n".format(filename))
1957 def destroy_iperf_vm(name):
1959 After asking the user some questions, create a VM and connect
1960 the interfaces to VPP interfaces
1962 :param name: The name of the VM to be be destroyed
1967 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1969 logging.debug(stderr)
1971 "Couldn't execute the command {} : {}".format(cmd, stderr))
1973 if re.findall(name, stdout):
1974 cmd = 'virsh destroy {}'.format(name)
1975 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1977 logging.debug(stderr)
1979 "Couldn't execute the command {} : {}".format(
1982 def create_iperf_vm(self, vmname):
1984 After asking the user some questions, create a VM and connect
1985 the interfaces to VPP interfaces
1989 # Read the iperf VM template file
1990 distro = VPPUtil.get_linux_distro()
1991 if distro[0] == 'Ubuntu':
1993 '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(
1997 '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(
2000 with open(tfilename, 'r') as tfile:
2001 tcontents = tfile.read()
2005 imagename = '{}/vpp/vpp-config/{}'.format(
2006 self._rootdir, IPERFVM_IMAGE)
2007 isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
2008 tcontents = tcontents.format(vmname=vmname, imagename=imagename,
2010 vhostsocketname=self._sockfilename)
2013 ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
2014 with open(ifilename, 'w+') as ifile:
2015 ifile.write(tcontents)
2018 cmd = 'virsh create {}'.format(ifilename)
2019 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
2021 logging.debug(stderr)
2023 "Couldn't execute the command {} : {}".format(cmd, stderr))