1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
15 from __future__ import absolute_import, division, print_function
20 from ipaddress import ip_address
24 from vpplib.VPPUtil import VPPUtil
25 from vpplib.VppPCIUtil import VppPCIUtil
26 from vpplib.VppHugePageUtil import VppHugePageUtil
27 from vpplib.CpuUtils import CpuUtils
28 from vpplib.VppGrubUtil import VppGrubUtil
29 from vpplib.QemuUtils import QemuUtils
31 # Python2/3 compatible
33 input = raw_input # noqa
37 __all__ = ["AutoConfig"]
41 MIN_TOTAL_HUGE_PAGES = 1024
42 MAX_PERCENT_FOR_HUGE_PAGES = 70
44 IPERFVM_XML = 'configs/iperf-vm.xml'
45 IPERFVM_IMAGE = 'images/xenial-mod.img'
46 IPERFVM_ISO = 'configs/cloud-config.iso'
49 class AutoConfig(object):
50 """Auto Configuration Tools"""
52 def __init__(self, rootdir, filename, clean=False):
54 The Auto Configure class.
56 :param rootdir: The root directory for all the auto configuration files
57 :param filename: The autoconfiguration file
58 :param clean: When set initialize the nodes from the auto-config file
63 self._autoconfig_filename = rootdir + filename
64 self._rootdir = rootdir
67 self._vpp_devices_node = {}
68 self._hugepage_config = ""
71 self._sockfilename = ""
75 Returns the nodes dictionary.
84 def _autoconfig_backup_file(filename):
88 :param filename: The file to backup
92 # Does a copy of the file exist, if not create one
93 ofile = filename + '.orig'
94 (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
97 if stdout.strip('\n') != ofile:
98 cmd = 'sudo cp {} {}'.format(filename, ofile)
99 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
101 logging.debug(stderr)
103 # noinspection PyBroadException
105 def _ask_user_ipv4():
107 Asks the user for a number within a range.
108 default is returned if return is entered.
110 :returns: IP address with cidr
115 answer = input("Please enter the IPv4 Address [n.n.n.n/n]: ")
117 ipinput = answer.split('/')
118 ipaddr = ip_address(ipinput[0])
120 plen = answer.split('/')[1]
122 answer = input("Please enter the netmask [n.n.n.n]: ")
123 plen = ip_address(answer).netmask_bits()
124 return '{}/{}'.format(ipaddr, plen)
126 print("Please enter a valid IPv4 address.")
129 def _ask_user_range(question, first, last, default):
131 Asks the user for a number within a range.
132 default is returned if return is entered.
134 :param question: Text of a question.
135 :param first: First number in the range
136 :param last: Last number in the range
137 :param default: The value returned when return is entered
138 :type question: string
142 :returns: The answer to the question
147 answer = input(question)
151 if re.findall(r'[0-9+]', answer):
152 if int(answer) in range(first, last + 1):
155 print("Please a value between {} and {} or Return.".
158 print("Please a number between {} and {} or Return.".
164 def _ask_user_yn(question, default):
166 Asks the user for a yes or no question.
168 :param question: Text of a question.
169 :param default: The value returned when return is entered
170 :type question: string
171 :type default: string
172 :returns: The answer to the question
177 default = default.lower()
179 while not input_valid:
180 answer = input(question)
183 if re.findall(r'[YyNn]', answer):
185 answer = answer[0].lower()
187 print("Please answer Y, N or Return.")
191 def _loadconfig(self):
193 Load the testbed configuration, given the auto configuration file.
197 # Get the Topology, from the topology layout file
199 with open(self._autoconfig_filename, 'r') as stream:
201 topo = yaml.load(stream)
202 if 'metadata' in topo:
203 self._metadata = topo['metadata']
204 except yaml.YAMLError as exc:
206 "Couldn't read the Auto config file {}.".format(
207 self._autoconfig_filename, exc))
209 systemfile = self._rootdir + self._metadata['system_config_file']
210 if self._clean is False and os.path.isfile(systemfile):
211 with open(systemfile, 'r') as sysstream:
213 systopo = yaml.load(sysstream)
214 if 'nodes' in systopo:
215 self._nodes = systopo['nodes']
216 except yaml.YAMLError as sysexc:
218 "Couldn't read the System config file {}.".format(
221 # Get the nodes from Auto Config
223 self._nodes = topo['nodes']
225 # Set the root directory in all the nodes
226 for i in self._nodes.items():
228 node['rootdir'] = self._rootdir
230 def updateconfig(self):
232 Update the testbed configuration, given the auto configuration file.
233 We will write the system configuration file with the current node
238 # Initialize the yaml data
239 ydata = {'metadata': self._metadata, 'nodes': self._nodes}
241 # Write the system config file
242 filename = self._rootdir + self._metadata['system_config_file']
243 with open(filename, 'w') as yamlfile:
244 yaml.dump(ydata, yamlfile)
246 def _update_auto_config(self):
248 Write the auto configuration file with the new configuration data,
253 # Initialize the yaml data
255 with open(self._autoconfig_filename, 'r') as stream:
257 ydata = yaml.load(stream)
259 nodes = ydata['nodes']
260 except yaml.YAMLError as exc:
264 for i in nodes.items():
269 node['interfaces'] = {}
270 for item in self._nodes[key]['interfaces'].items():
274 node['interfaces'][port] = {}
275 addr = '{}'.format(interface['pci_address'])
276 node['interfaces'][port]['pci_address'] = addr
277 if 'mac_address' in interface:
278 node['interfaces'][port]['mac_address'] = \
279 interface['mac_address']
281 if 'total_other_cpus' in self._nodes[key]['cpu']:
282 node['cpu']['total_other_cpus'] = \
283 self._nodes[key]['cpu']['total_other_cpus']
284 if 'total_vpp_cpus' in self._nodes[key]['cpu']:
285 node['cpu']['total_vpp_cpus'] = \
286 self._nodes[key]['cpu']['total_vpp_cpus']
287 if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
288 node['cpu']['reserve_vpp_main_core'] = \
289 self._nodes[key]['cpu']['reserve_vpp_main_core']
292 if 'active_open_sessions' in self._nodes[key]['tcp']:
293 node['tcp']['active_open_sessions'] = \
294 self._nodes[key]['tcp']['active_open_sessions']
295 if 'passive_open_sessions' in self._nodes[key]['tcp']:
296 node['tcp']['passive_open_sessions'] = \
297 self._nodes[key]['tcp']['passive_open_sessions']
300 node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
302 # Write the auto config config file
303 with open(self._autoconfig_filename, 'w') as yamlfile:
304 yaml.dump(ydata, yamlfile)
306 def apply_huge_pages(self):
308 Apply the huge page config
312 for i in self._nodes.items():
315 hpg = VppHugePageUtil(node)
316 hpg.hugepages_dryrun_apply()
319 def _apply_vpp_unix(node):
321 Apply the VPP Unix config
323 :param node: Node dictionary with cpuinfo.
328 if 'unix' not in node['vpp']:
331 unixv = node['vpp']['unix']
332 if 'interactive' in unixv:
333 interactive = unixv['interactive']
334 if interactive is True:
335 unix = ' interactive\n'
337 return unix.rstrip('\n')
340 def _apply_vpp_cpu(node):
342 Apply the VPP cpu config
344 :param node: Node dictionary with cpuinfo.
350 if 'vpp_main_core' in node['cpu']:
351 vpp_main_core = node['cpu']['vpp_main_core']
354 if vpp_main_core is not 0:
355 cpu += ' main-core {}\n'.format(vpp_main_core)
358 vpp_workers = node['cpu']['vpp_workers']
359 vpp_worker_len = len(vpp_workers)
360 if vpp_worker_len > 0:
362 for i, worker in enumerate(vpp_workers):
364 vpp_worker_str += ','
365 if worker[0] == worker[1]:
366 vpp_worker_str += "{}".format(worker[0])
368 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
370 cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
375 def _apply_vpp_devices(node):
377 Apply VPP PCI Device configuration to vpp startup.
379 :param node: Node dictionary with cpuinfo.
384 ports_per_numa = node['cpu']['ports_per_numa']
385 total_mbufs = node['cpu']['total_mbufs']
387 for item in ports_per_numa.items():
389 interfaces = value['interfaces']
391 # if 0 was specified for the number of vpp workers, use 1 queue
394 if 'rx_queues' in value:
395 num_rx_queues = value['rx_queues']
396 if 'tx_queues' in value:
397 num_tx_queues = value['tx_queues']
402 # Create the devices string
403 for interface in interfaces:
404 pci_address = interface['pci_address']
405 pci_address = pci_address.lstrip("'").rstrip("'")
407 devices += ' dev {} {{ \n'.format(pci_address)
409 devices += ' num-rx-queues {}\n'.format(num_rx_queues)
411 devices += ' num-rx-queues {}\n'.format(1)
413 devices += ' num-tx-queues {}\n'.format(num_tx_queues)
415 devices += ' num-rx-desc {}\n'.format(num_rx_desc)
417 devices += ' num-tx-desc {}\n'.format(num_tx_desc)
420 # If the total mbufs is not 0 or less than the default, set num-bufs
421 logging.debug("Total mbufs: {}".format(total_mbufs))
422 if total_mbufs is not 0 and total_mbufs > 16384:
423 devices += '\n num-mbufs {}'.format(total_mbufs)
428 def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end,
430 reserve_vpp_main_core):
432 Calculate the VPP worker information
434 :param node: Node dictionary
435 :param vpp_workers: List of VPP workers
436 :param numa_node: Numa node
437 :param other_cpus_end: The end of the cpus allocated for cores
439 :param total_vpp_workers: The number of vpp workers needed
440 :param reserve_vpp_main_core: Is there a core needed for
444 :type other_cpus_end: int
445 :type total_vpp_workers: int
446 :type reserve_vpp_main_core: bool
447 :returns: Is a core still needed for the vpp main core
451 # Can we fit the workers in one of these slices
452 cpus = node['cpu']['cpus_per_node'][numa_node]
456 if start <= other_cpus_end:
457 start = other_cpus_end + 1
459 if reserve_vpp_main_core:
462 workers_end = start + total_vpp_workers - 1
464 if workers_end <= end:
465 if reserve_vpp_main_core:
466 node['cpu']['vpp_main_core'] = start - 1
467 reserve_vpp_main_core = False
468 if total_vpp_workers:
469 vpp_workers.append((start, workers_end))
472 # We still need to reserve the main core
473 if reserve_vpp_main_core:
474 node['cpu']['vpp_main_core'] = other_cpus_end + 1
476 return reserve_vpp_main_core
479 def _calc_desc_and_queues(total_numa_nodes,
480 total_ports_per_numa,
482 ports_per_numa_value):
484 Calculate the number of descriptors and queues
486 :param total_numa_nodes: The total number of numa nodes
487 :param total_ports_per_numa: The total number of ports for this
489 :param total_rx_queues: The total number of rx queues / port
490 :param ports_per_numa_value: The value from the ports_per_numa
492 :type total_numa_nodes: int
493 :type total_ports_per_numa: int
494 :type total_rx_queues: int
495 :type ports_per_numa_value: dict
496 :returns The total number of message buffers
500 # Get the number of rx queues
501 rx_queues = max(1, total_rx_queues)
502 tx_queues = rx_queues * total_numa_nodes + 1
504 # Get the descriptor entries
506 ports_per_numa_value['rx_queues'] = rx_queues
507 total_mbufs = (((rx_queues * desc_entries) +
508 (tx_queues * desc_entries)) *
509 total_ports_per_numa)
510 total_mbufs = total_mbufs
515 def _create_ports_per_numa(node, interfaces):
517 Create a dictionary or ports per numa node
518 :param node: Node dictionary
519 :param interfaces: All the interfaces to be used by vpp
521 :type interfaces: dict
522 :returns: The ports per numa dictionary
526 # Make a list of ports by numa node
528 for item in interfaces.items():
530 if i['numa_node'] not in ports_per_numa:
531 ports_per_numa[i['numa_node']] = {'interfaces': []}
532 ports_per_numa[i['numa_node']]['interfaces'].append(i)
534 ports_per_numa[i['numa_node']]['interfaces'].append(i)
535 node['cpu']['ports_per_numa'] = ports_per_numa
537 return ports_per_numa
539 def calculate_cpu_parameters(self):
541 Calculate the cpu configuration.
545 # Calculate the cpu parameters, needed for the
546 # vpp_startup and grub configuration
547 for i in self._nodes.items():
550 # get total number of nic ports
551 interfaces = node['interfaces']
553 # Make a list of ports by numa node
554 ports_per_numa = self._create_ports_per_numa(node, interfaces)
556 # Get the number of cpus to skip, we never use the first cpu
558 other_cpus_end = other_cpus_start + \
559 node['cpu']['total_other_cpus'] - 1
561 if other_cpus_end is not 0:
562 other_workers = (other_cpus_start, other_cpus_end)
563 node['cpu']['other_workers'] = other_workers
565 # Allocate the VPP main core and workers
567 reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
568 total_vpp_cpus = node['cpu']['total_vpp_cpus']
569 total_rx_queues = node['cpu']['total_rx_queues']
571 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
572 # then we shouldn't get workers
573 total_workers_node = 0
574 if len(ports_per_numa):
575 total_workers_node = total_vpp_cpus / len(ports_per_numa)
577 if reserve_vpp_main_core:
580 if total_main + total_workers_node is not 0:
581 for item in ports_per_numa.items():
585 # Get the number of descriptors and queues
586 mbufs = self._calc_desc_and_queues(
588 len(value['interfaces']), total_rx_queues, value)
591 # Get the VPP workers
592 reserve_vpp_main_core = self._calc_vpp_workers(
593 node, vpp_workers, numa_node,
594 other_cpus_end, total_workers_node,
595 reserve_vpp_main_core)
598 total_mbufs = int(total_mbufs)
603 node['cpu']['vpp_workers'] = vpp_workers
604 node['cpu']['total_mbufs'] = total_mbufs
610 def _apply_vpp_tcp(node):
612 Apply the VPP Unix config
614 :param node: Node dictionary with cpuinfo.
618 active_open_sessions = node['tcp']['active_open_sessions']
619 aos = int(active_open_sessions)
621 passive_open_sessions = node['tcp']['passive_open_sessions']
622 pos = int(passive_open_sessions)
624 # Generate the api-segment gid vpp sheit in any case
631 return tcp.rstrip('\n')
634 "# TCP stack-related configuration parameters",
635 "# expecting {:d} client sessions, {:d} server sessions\n".format(
639 " global-size 2000M",
644 " event-queue-length {:d}".format(aos + pos),
645 " preallocated-sessions {:d}".format(aos + pos),
646 " v4-session-table-buckets {:d}".format((aos + pos) // 4),
647 " v4-session-table-memory 3g\n"
650 tcp = tcp + " v4-halfopen-table-buckets {:d}".format(
651 (aos + pos) // 4) + "\n"
652 tcp = tcp + " v4-halfopen-table-memory 3g\n"
653 tcp = tcp + " local-endpoints-table-buckets {:d}".format(
654 (aos + pos) // 4) + "\n"
655 tcp = tcp + " local-endpoints-table-memory 3g\n"
658 tcp = tcp + "tcp {\n"
659 tcp = tcp + " preallocated-connections {:d}".format(aos + pos) + "\n"
661 tcp = tcp + " preallocated-half-open-connections {:d}".format(
665 return tcp.rstrip('\n')
667 def apply_vpp_startup(self):
669 Apply the vpp startup configration
673 # Apply the VPP startup configruation
674 for i in self._nodes.items():
677 # Get the startup file
678 rootdir = node['rootdir']
679 sfile = rootdir + node['vpp']['startup_config_file']
682 devices = self._apply_vpp_devices(node)
685 cpu = self._apply_vpp_cpu(node)
687 # Get the unix config
688 unix = self._apply_vpp_unix(node)
690 # Get the TCP configuration, if any
691 tcp = self._apply_vpp_tcp(node)
693 # Make a backup if needed
694 self._autoconfig_backup_file(sfile)
697 tfile = sfile + '.template'
698 (ret, stdout, stderr) = \
699 VPPUtil.exec_command('cat {}'.format(tfile))
701 raise RuntimeError('Executing cat command failed to node {}'.
702 format(node['host']))
703 startup = stdout.format(unix=unix,
708 (ret, stdout, stderr) = \
709 VPPUtil.exec_command('rm {}'.format(sfile))
711 logging.debug(stderr)
713 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
714 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
716 raise RuntimeError('Writing config failed node {}'.
717 format(node['host']))
719 def apply_grub_cmdline(self):
721 Apply the grub cmdline
725 for i in self._nodes.items():
728 # Get the isolated CPUs
729 other_workers = node['cpu']['other_workers']
730 vpp_workers = node['cpu']['vpp_workers']
731 if 'vpp_main_core' in node['cpu']:
732 vpp_main_core = node['cpu']['vpp_main_core']
736 if other_workers is not None:
737 all_workers = [other_workers]
738 if vpp_main_core is not 0:
739 all_workers += [(vpp_main_core, vpp_main_core)]
740 all_workers += vpp_workers
742 for idx, worker in enumerate(all_workers):
747 if worker[0] == worker[1]:
748 isolated_cpus += "{}".format(worker[0])
750 isolated_cpus += "{}-{}".format(worker[0], worker[1])
752 vppgrb = VppGrubUtil(node)
753 current_cmdline = vppgrb.get_current_cmdline()
754 if 'grub' not in node:
756 node['grub']['current_cmdline'] = current_cmdline
757 node['grub']['default_cmdline'] = \
758 vppgrb.apply_cmdline(node, isolated_cpus)
762 def get_hugepages(self):
764 Get the hugepage configuration
768 for i in self._nodes.items():
771 hpg = VppHugePageUtil(node)
772 max_map_count, shmmax = hpg.get_huge_page_config()
773 node['hugepages']['max_map_count'] = max_map_count
774 node['hugepages']['shmax'] = shmmax
775 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
776 node['hugepages']['actual_total'] = total
777 node['hugepages']['free'] = free
778 node['hugepages']['size'] = size
779 node['hugepages']['memtotal'] = memtotal
780 node['hugepages']['memfree'] = memfree
786 Get the grub configuration
790 for i in self._nodes.items():
793 vppgrb = VppGrubUtil(node)
794 current_cmdline = vppgrb.get_current_cmdline()
795 default_cmdline = vppgrb.get_default_cmdline()
797 # Get the total number of isolated CPUs
799 iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
800 iso_cpurl = len(iso_cpur)
802 iso_cpu_str = iso_cpur[0]
803 iso_cpu_str = iso_cpu_str.split('=')[1]
804 iso_cpul = iso_cpu_str.split(',')
805 for iso_cpu in iso_cpul:
806 isocpuspl = iso_cpu.split('-')
807 if len(isocpuspl) is 1:
808 current_iso_cpus += 1
810 first = int(isocpuspl[0])
811 second = int(isocpuspl[1])
813 current_iso_cpus += 1
815 current_iso_cpus += second - first
817 if 'grub' not in node:
819 node['grub']['current_cmdline'] = current_cmdline
820 node['grub']['default_cmdline'] = default_cmdline
821 node['grub']['current_iso_cpus'] = current_iso_cpus
826 def _get_device(node):
828 Get the device configuration for a single node
830 :param node: Node dictionary with cpuinfo.
835 vpp = VppPCIUtil(node)
836 vpp.get_all_devices()
838 # Save the device information
840 node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
841 node['devices']['kernel_devices'] = vpp.get_kernel_devices()
842 node['devices']['other_devices'] = vpp.get_other_devices()
843 node['devices']['linkup_devices'] = vpp.get_link_up_devices()
845 def get_devices_per_node(self):
847 Get the device configuration for all the nodes
851 for i in self._nodes.items():
853 # Update the interface data
855 self._get_device(node)
860 def get_cpu_layout(node):
864 using lscpu -p get the cpu layout.
865 Returns a list with each item representing a single cpu.
867 :param node: Node dictionary.
869 :returns: The cpu layout
874 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
876 raise RuntimeError('{} failed on node {} {}'.
877 format(cmd, node['host'], stderr))
880 lines = stdout.split('\n')
882 if line == '' or line[0] == '#':
884 linesplit = line.split(',')
885 layout = {'cpu': linesplit[0], 'core': linesplit[1],
886 'socket': linesplit[2], 'node': linesplit[3]}
888 # cpu, core, socket, node
895 Get the cpu configuration
900 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
902 for i in self._nodes.items():
906 layout = self.get_cpu_layout(node)
907 node['cpu']['layout'] = layout
909 cpuinfo = node['cpuinfo']
910 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
911 node['cpu']['smt_enabled'] = smt_enabled
913 # We don't want to write the cpuinfo
921 Get the current system configuration.
925 # Get the Huge Page configuration
928 # Get the device configuration
929 self.get_devices_per_node()
931 # Get the CPU configuration
934 # Get the current grub cmdline
937 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
939 Ask the user questions related to the cpu configuration.
941 :param node: Node dictionary
942 :param total_cpus: The total number of cpus in the system
943 :param numa_nodes: The list of numa nodes in the system
945 :type total_cpus: int
946 :type numa_nodes: list
949 print("\nYour system has {} core(s) and {} Numa Nodes.".
950 format(total_cpus, len(numa_nodes)))
951 print("To begin, we suggest not reserving any cores for "
952 "VPP or other processes.")
953 print("Then to improve performance start reserving cores and "
954 "adding queues as needed.")
959 question = "\nHow many core(s) shall we reserve for " \
960 "VPP [0-{}][0]? ".format(max_vpp_cpus)
961 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
962 node['cpu']['total_vpp_cpus'] = total_vpp_cpus
964 max_other_cores = (total_cpus - total_vpp_cpus) / 2
965 question = 'How many core(s) do you want to reserve for ' \
966 'processes other than VPP? [0-{}][0]? '. \
967 format(str(max_other_cores))
968 total_other_cpus = self._ask_user_range(
969 question, 0, max_other_cores, 0)
970 node['cpu']['total_other_cpus'] = total_other_cpus
972 max_main_cpus = max_vpp_cpus + 1 - total_vpp_cpus
973 reserve_vpp_main_core = False
974 if max_main_cpus > 0:
975 question = "Should we reserve 1 core for the VPP Main thread? "
976 question += "[y/N]? "
977 answer = self._ask_user_yn(question, 'n')
979 reserve_vpp_main_core = True
980 node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
981 node['cpu']['vpp_main_core'] = 0
983 question = "How many RX queues per port shall we use for " \
984 "VPP [1-4][1]? ".format(max_vpp_cpus)
985 total_rx_queues = self._ask_user_range(question, 1, 4, 1)
986 node['cpu']['total_rx_queues'] = total_rx_queues
988 def modify_cpu(self, ask_questions=True):
990 Modify the cpu configuration, asking for the user for the values.
992 :param ask_questions: When true ask the user for config parameters
997 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
999 for i in self._nodes.items():
1002 total_cpus_per_slice = 0
1006 cpu_layout = self.get_cpu_layout(node)
1008 # Assume the number of cpus per slice is always the same as the
1011 for cpu in cpu_layout:
1012 if cpu['node'] != first_node:
1014 total_cpus_per_slice += 1
1016 # Get the total number of cpus, cores, and numa nodes from the
1018 for cpul in cpu_layout:
1019 numa_node = cpul['node']
1024 if numa_node not in cpus_per_node:
1025 cpus_per_node[numa_node] = []
1026 cpuperslice = int(cpu) % total_cpus_per_slice
1027 if cpuperslice == 0:
1028 cpus_per_node[numa_node].append((int(cpu), int(cpu) +
1029 total_cpus_per_slice - 1))
1030 if numa_node not in numa_nodes:
1031 numa_nodes.append(numa_node)
1032 if core not in cores:
1034 node['cpu']['cpus_per_node'] = cpus_per_node
1036 # Ask the user some questions
1037 if ask_questions and total_cpus >= 8:
1038 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1040 # Populate the interfaces with the numa node
1041 if 'interfaces' in node:
1042 ikeys = node['interfaces'].keys()
1043 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1045 # We don't want to write the cpuinfo
1046 node['cpuinfo'] = ""
1049 self._update_auto_config()
1052 def _modify_other_devices(self, node,
1053 other_devices, kernel_devices, dpdk_devices):
1055 Modify the devices configuration, asking for the user for the values.
1059 odevices_len = len(other_devices)
1060 if odevices_len > 0:
1061 print("\nThese device(s) are currently NOT being used "
1062 "by VPP or the OS.\n")
1063 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1064 question = "\nWould you like to give any of these devices"
1065 question += " back to the OS [Y/n]? "
1066 answer = self._ask_user_yn(question, 'Y')
1069 for dit in other_devices.items():
1072 question = "Would you like to use device {} for". \
1074 question += " the OS [y/N]? "
1075 answer = self._ask_user_yn(question, 'n')
1077 if 'unused' in device and len(
1078 device['unused']) != 0 and \
1079 device['unused'][0] != '':
1080 driver = device['unused'][0]
1081 ret = VppPCIUtil.bind_vpp_device(
1085 'Could not bind device {}'.format(dvid))
1088 for dit in vppd.items():
1091 kernel_devices[dvid] = device
1092 del other_devices[dvid]
1094 odevices_len = len(other_devices)
1095 if odevices_len > 0:
1096 print("\nThese device(s) are still NOT being used "
1097 "by VPP or the OS.\n")
1098 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1099 question = "\nWould you like use any of these for VPP [y/N]? "
1100 answer = self._ask_user_yn(question, 'N')
1103 for dit in other_devices.items():
1106 question = "Would you like to use device {} ".format(dvid)
1107 question += "for VPP [y/N]? "
1108 answer = self._ask_user_yn(question, 'n')
1111 for dit in vppd.items():
1114 if 'unused' in device and len(device['unused']) != 0 and \
1115 device['unused'][0] != '':
1116 driver = device['unused'][0]
1118 'Binding device {} to driver {}'.format(dvid,
1120 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1123 'Could not bind device {}'.format(dvid))
1125 dpdk_devices[dvid] = device
1126 del other_devices[dvid]
1128 def update_interfaces_config(self):
1130 Modify the interfaces directly from the config file.
1134 for i in self._nodes.items():
1136 devices = node['devices']
1137 all_devices = devices['other_devices']
1138 all_devices.update(devices['dpdk_devices'])
1139 all_devices.update(devices['kernel_devices'])
1143 if 'interfaces' in node:
1144 current_ifcs = node['interfaces']
1146 for ifc in current_ifcs.values():
1147 dvid = ifc['pci_address']
1148 if dvid in all_devices:
1149 VppPCIUtil.vpp_create_interface(interfaces, dvid,
1151 node['interfaces'] = interfaces
1155 def modify_devices(self):
1157 Modify the devices configuration, asking for the user for the values.
1161 for i in self._nodes.items():
1163 devices = node['devices']
1164 other_devices = devices['other_devices']
1165 kernel_devices = devices['kernel_devices']
1166 dpdk_devices = devices['dpdk_devices']
1169 self._modify_other_devices(node, other_devices,
1170 kernel_devices, dpdk_devices)
1172 # Get the devices again for this node
1173 self._get_device(node)
1174 devices = node['devices']
1175 kernel_devices = devices['kernel_devices']
1176 dpdk_devices = devices['dpdk_devices']
1178 klen = len(kernel_devices)
1180 print("\nThese devices are safe to be used with VPP.\n")
1181 VppPCIUtil.show_vpp_devices(kernel_devices)
1182 question = "\nWould you like to use any of these " \
1183 "device(s) for VPP [y/N]? "
1184 answer = self._ask_user_yn(question, 'n')
1187 for dit in kernel_devices.items():
1190 question = "Would you like to use device {} ".format(dvid)
1191 question += "for VPP [y/N]? "
1192 answer = self._ask_user_yn(question, 'n')
1195 for dit in vppd.items():
1198 if 'unused' in device and len(
1199 device['unused']) != 0 and device['unused'][
1201 driver = device['unused'][0]
1202 question = "Would you like to bind the driver {} for {} [y/N]? ".format(driver, dvid)
1203 answer = self._ask_user_yn(question, 'n')
1205 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1206 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1208 logging.debug('Could not bind device {}'.format(dvid))
1209 dpdk_devices[dvid] = device
1210 del kernel_devices[dvid]
1212 dlen = len(dpdk_devices)
1214 print("\nThese device(s) are already using DPDK.\n")
1215 VppPCIUtil.show_vpp_devices(dpdk_devices,
1216 show_interfaces=False)
1217 question = "\nWould you like to remove any of "
1218 question += "these device(s) [y/N]? "
1219 answer = self._ask_user_yn(question, 'n')
1222 for dit in dpdk_devices.items():
1225 question = "Would you like to remove {} [y/N]? ". \
1227 answer = self._ask_user_yn(question, 'n')
1229 vppdl[dvid] = device
1230 for dit in vppdl.items():
1233 if 'unused' in device and len(
1234 device['unused']) != 0 and device['unused'][
1236 driver = device['unused'][0]
1238 'Binding device {} to driver {}'.format(
1240 ret = VppPCIUtil.bind_vpp_device(node, driver,
1244 'Could not bind device {}'.format(dvid))
1246 kernel_devices[dvid] = device
1247 del dpdk_devices[dvid]
1250 for dit in dpdk_devices.items():
1253 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1254 node['interfaces'] = interfaces
1256 self._update_auto_config()
1259 def modify_huge_pages(self):
1261 Modify the huge page configuration, asking for the user for the values.
1265 for i in self._nodes.items():
1268 total = node['hugepages']['actual_total']
1269 free = node['hugepages']['free']
1270 size = node['hugepages']['size']
1271 memfree = node['hugepages']['memfree'].split(' ')[0]
1272 hugesize = int(size.split(' ')[0])
1273 # The max number of huge pages should be no more than
1274 # 70% of total free memory
1275 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES // 100) // \
1277 print("\nThere currently {} {} huge pages free.".format(
1279 question = "Do you want to reconfigure the number of " \
1280 "huge pages [y/N]? "
1281 answer = self._ask_user_yn(question, 'n')
1283 node['hugepages']['total'] = total
1286 print("\nThere currently a total of {} huge pages.".
1288 question = "How many huge pages do you want [{} - {}][{}]? ". \
1289 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1290 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1291 node['hugepages']['total'] = str(answer)
1293 # Update auto-config.yaml
1294 self._update_auto_config()
1296 # Rediscover just the hugepages
1297 self.get_hugepages()
1299 def get_tcp_params(self):
1301 Get the tcp configuration
1304 # maybe nothing to do here?
1307 def acquire_tcp_params(self):
1309 Ask the user for TCP stack configuration parameters
1313 for i in self._nodes.items():
1316 question = "\nHow many active-open / tcp client sessions are " \
1317 "expected [0-10000000][0]? "
1318 answer = self._ask_user_range(question, 0, 10000000, 0)
1319 # Less than 10K is equivalent to 0
1320 if int(answer) < 10000:
1322 node['tcp']['active_open_sessions'] = answer
1324 question = "How many passive-open / tcp server sessions are " \
1325 "expected [0-10000000][0]? "
1326 answer = self._ask_user_range(question, 0, 10000000, 0)
1327 # Less than 10K is equivalent to 0
1328 if int(answer) < 10000:
1330 node['tcp']['passive_open_sessions'] = answer
1332 # Update auto-config.yaml
1333 self._update_auto_config()
1335 # Rediscover tcp parameters
1336 self.get_tcp_params()
1339 def patch_qemu(node):
1341 Patch qemu with the correct patches.
1343 :param node: Node dictionary
1347 print('\nWe are patching the node "{}":\n'.format(node['host']))
1348 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1353 print the CPU information
1357 cpu = CpuUtils.get_cpu_info_per_node(node)
1361 print("{:>20}: {}".format(item, cpu[item]))
1364 print("{:>20}: {}".format(item, cpu[item]))
1365 item = 'Thread(s) per core'
1367 print("{:>20}: {}".format(item, cpu[item]))
1368 item = 'Core(s) per socket'
1370 print("{:>20}: {}".format(item, cpu[item]))
1373 print("{:>20}: {}".format(item, cpu[item]))
1374 item = 'NUMA node(s)'
1377 numa_nodes = int(cpu[item])
1378 for i in range(0, numa_nodes):
1379 item = "NUMA node{} CPU(s)".format(i)
1380 print("{:>20}: {}".format(item, cpu[item]))
1381 item = 'CPU max MHz'
1383 print("{:>20}: {}".format(item, cpu[item]))
1384 item = 'CPU min MHz'
1386 print("{:>20}: {}".format(item, cpu[item]))
1388 if node['cpu']['smt_enabled']:
1392 print("{:>20}: {}".format('SMT', smt))
1395 print("\nVPP Threads: (Name: Cpu Number)")
1396 vpp_processes = cpu['vpp_processes']
1397 for i in vpp_processes.items():
1398 print(" {:10}: {:4}".format(i[0], i[1]))
1401 def device_info(node):
1403 Show the device information.
1407 if 'cpu' in node and 'total_mbufs' in node['cpu']:
1408 total_mbufs = node['cpu']['total_mbufs']
1409 if total_mbufs is not 0:
1410 print("Total Number of Buffers: {}".format(total_mbufs))
1412 vpp = VppPCIUtil(node)
1413 vpp.get_all_devices()
1414 linkup_devs = vpp.get_link_up_devices()
1415 if len(linkup_devs):
1416 print("\nDevices with link up (can not be used with VPP):")
1417 vpp.show_vpp_devices(linkup_devs, show_header=False)
1418 # for dev in linkup_devs:
1420 kernel_devs = vpp.get_kernel_devices()
1421 if len(kernel_devs):
1422 print("\nDevices bound to kernel drivers:")
1423 vpp.show_vpp_devices(kernel_devs, show_header=False)
1425 print("\nNo devices bound to kernel drivers")
1427 dpdk_devs = vpp.get_dpdk_devices()
1429 print("\nDevices bound to DPDK drivers:")
1430 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1433 print("\nNo devices bound to DPDK drivers")
1435 other_devs = vpp.get_other_devices()
1437 print("\nDevices not bound to Kernel or DPDK drivers:")
1438 vpp.show_vpp_devices(other_devs, show_interfaces=True,
1441 print("\nNo devices not bound to Kernel or DPDK drivers")
1444 interfaces = vpputl.get_hardware(node)
1445 if interfaces == {}:
1448 print("\nDevices in use by VPP:")
1450 if len(interfaces.items()) < 2:
1454 print("{:30} {:4} {:4} {:7} {:4} {:7}".
1455 format('Name', 'Numa', 'RXQs',
1456 'RXDescs', 'TXQs', 'TXDescs'))
1457 for intf in sorted(interfaces.items()):
1460 if name == 'local0':
1462 numa = rx_qs = rx_ds = tx_qs = tx_ds = ''
1464 numa = int(value['numa'])
1465 if 'rx queues' in value:
1466 rx_qs = int(value['rx queues'])
1467 if 'rx descs' in value:
1468 rx_ds = int(value['rx descs'])
1469 if 'tx queues' in value:
1470 tx_qs = int(value['tx queues'])
1471 if 'tx descs' in value:
1472 tx_ds = int(value['tx descs'])
1474 print("{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".
1475 format(name, numa, rx_qs, rx_ds, tx_qs, tx_ds))
1478 def hugepage_info(node):
1480 Show the huge page information.
1484 hpg = VppHugePageUtil(node)
1485 hpg.show_huge_pages()
1488 def min_system_resources(node):
1490 Check the system for basic minimum resources, return true if
1500 if 'layout' in node['cpu']:
1501 total_cpus = len(node['cpu']['layout'])
1503 print("\nThere is only {} CPU(s) available on this system. "
1504 "This is not enough to run VPP.".format(total_cpus))
1508 if 'free' in node['hugepages'] and \
1509 'memfree' in node['hugepages'] and \
1510 'size' in node['hugepages']:
1511 free = node['hugepages']['free']
1512 memfree = float(node['hugepages']['memfree'].split(' ')[0])
1513 hugesize = float(node['hugepages']['size'].split(' ')[0])
1515 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1516 percentmemhugepages = (memhugepages / memfree) * 100
1517 if free is '0' and \
1518 percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1520 "\nThe System has only {} of free memory. You will not "
1521 "be able to allocate enough Huge Pages for VPP.".format(
1531 Print the system information
1535 for i in self._nodes.items():
1536 print("\n==============================")
1540 print("NODE: {}\n".format(name))
1547 print("\nGrub Command Line:")
1549 print(" Current: {}".format(
1550 node['grub']['current_cmdline']))
1551 print(" Configured: {}".format(
1552 node['grub']['default_cmdline']))
1555 print("\nHuge Pages:")
1556 self.hugepage_info(node)
1560 self.device_info(node)
1563 print("\nVPP Service Status:")
1564 state, errors = VPPUtil.status(node)
1565 print(" {}".format(state))
1567 print(" {}".format(e))
1569 # Minimum system resources
1570 self.min_system_resources(node)
1572 print("\n==============================")
1574 def _ipv4_interface_setup_questions(self, node):
1576 Ask the user some questions and get a list of interfaces
1577 and IPv4 addresses associated with those interfaces
1579 :param node: Node dictionary.
1581 :returns: A list or interfaces with ip addresses
1586 interfaces = vpputl.get_hardware(node)
1587 if interfaces == {}:
1590 interfaces_with_ip = []
1591 for intf in sorted(interfaces.items()):
1593 if name == 'local0':
1596 question = "Would you like add address to " \
1597 "interface {} [Y/n]? ".format(name)
1598 answer = self._ask_user_yn(question, 'y')
1601 addr = self._ask_user_ipv4()
1602 address['name'] = name
1603 address['addr'] = addr
1604 interfaces_with_ip.append(address)
1606 return interfaces_with_ip
1608 def ipv4_interface_setup(self):
1610 After asking the user some questions, get a list of interfaces
1611 and IPv4 addresses associated with those interfaces
1615 for i in self._nodes.items():
1618 # Show the current interfaces with IP addresses
1619 current_ints = VPPUtil.get_int_ip(node)
1620 if current_ints is not {}:
1621 print("\nThese are the current interfaces with IP addresses:")
1622 for items in sorted(current_ints.items()):
1625 if 'address' not in value:
1628 address = value['address']
1629 print("{:30} {:20} {:10}".format(name, address,
1631 question = "\nWould you like to keep this configuration " \
1633 answer = self._ask_user_yn(question, 'y')
1637 print("\nThere are currently no interfaces with IP "
1640 # Create a script that add the ip addresses to the interfaces
1641 # and brings the interfaces up
1642 ints_with_addrs = self._ipv4_interface_setup_questions(node)
1644 for ints in ints_with_addrs:
1647 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1648 setintupstr = 'set int state {} up\n'.format(name)
1649 content += setipstr + setintupstr
1651 # Write the content to the script
1652 rootdir = node['rootdir']
1653 filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1654 with open(filename, 'w+') as sfile:
1655 sfile.write(content)
1657 # Execute the script
1658 cmd = 'vppctl exec {}'.format(filename)
1659 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1661 logging.debug(stderr)
1663 print("\nA script as been created at {}".format(filename))
1664 print("This script can be run using the following:")
1665 print("vppctl exec {}\n".format(filename))
1667 def _create_vints_questions(self, node):
1669 Ask the user some questions and get a list of interfaces
1670 and IPv4 addresses associated with those interfaces
1672 :param node: Node dictionary.
1674 :returns: A list or interfaces with ip addresses
1679 interfaces = vpputl.get_hardware(node)
1680 if interfaces == {}:
1683 # First delete all the Virtual interfaces
1684 for intf in sorted(interfaces.items()):
1686 if name[:7] == 'Virtual':
1687 cmd = 'vppctl delete vhost-user {}'.format(name)
1688 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1690 logging.debug('{} failed on node {} {}'.format(
1691 cmd, node['host'], stderr))
1693 # Create a virtual interface, for each interface the user wants to use
1694 interfaces = vpputl.get_hardware(node)
1695 if interfaces == {}:
1697 interfaces_with_virtual_interfaces = []
1699 for intf in sorted(interfaces.items()):
1701 if name == 'local0':
1704 question = "Would you like connect this interface {} to " \
1705 "the VM [Y/n]? ".format(name)
1706 answer = self._ask_user_yn(question, 'y')
1708 sockfilename = '/var/run/vpp/{}.sock'.format(
1709 name.replace('/', '_'))
1710 if os.path.exists(sockfilename):
1711 os.remove(sockfilename)
1712 cmd = 'vppctl create vhost-user socket {} server'.format(
1714 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1717 "Couldn't execute the command {}, {}.".format(cmd,
1719 vintname = stdout.rstrip('\r\n')
1721 cmd = 'chmod 777 {}'.format(sockfilename)
1722 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1725 "Couldn't execute the command {}, {}.".format(cmd,
1728 interface = {'name': name,
1729 'virtualinterface': '{}'.format(vintname),
1730 'bridge': '{}'.format(inum)}
1732 interfaces_with_virtual_interfaces.append(interface)
1734 return interfaces_with_virtual_interfaces
1736 def create_and_bridge_virtual_interfaces(self):
1738 After asking the user some questions, create a VM and connect
1739 the interfaces to VPP interfaces
1743 for i in self._nodes.items():
1746 # Show the current bridge and interface configuration
1747 print("\nThis the current bridge configuration:")
1748 VPPUtil.show_bridge(node)
1749 question = "\nWould you like to keep this configuration [Y/n]? "
1750 answer = self._ask_user_yn(question, 'y')
1754 # Create a script that builds a bridge configuration with
1755 # physical interfaces and virtual interfaces
1756 ints_with_vints = self._create_vints_questions(node)
1758 for intf in ints_with_vints:
1759 vhoststr = '\n'.join([
1760 'comment { The following command creates the socket }',
1761 'comment { and returns a virtual interface }',
1762 'comment {{ create vhost-user socket '
1763 '/var/run/vpp/sock{}.sock server }}\n'.format(
1767 setintdnstr = 'set interface state {} down\n'.format(
1770 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1771 intf['name'], intf['bridge'])
1772 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1773 intf['virtualinterface'], intf['bridge'])
1775 # set interface state VirtualEthernet/0/0/0 up
1776 setintvststr = 'set interface state {} up\n'.format(
1777 intf['virtualinterface'])
1779 # set interface state VirtualEthernet/0/0/0 down
1780 setintupstr = 'set interface state {} up\n'.format(
1783 content += vhoststr + setintdnstr + setintbrstr + \
1784 setvintbrstr + setintvststr + setintupstr
1786 # Write the content to the script
1787 rootdir = node['rootdir']
1788 filename = rootdir + \
1789 '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1790 with open(filename, 'w+') as sfile:
1791 sfile.write(content)
1793 # Execute the script
1794 cmd = 'vppctl exec {}'.format(filename)
1795 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1797 logging.debug(stderr)
1799 print("\nA script as been created at {}".format(filename))
1800 print("This script can be run using the following:")
1801 print("vppctl exec {}\n".format(filename))
1803 def _iperf_vm_questions(self, node):
1805 Ask the user some questions and get a list of interfaces
1806 and IPv4 addresses associated with those interfaces
1808 :param node: Node dictionary.
1810 :returns: A list or interfaces with ip addresses
1815 interfaces = vpputl.get_hardware(node)
1816 if interfaces == {}:
1819 # First delete all the Virtual interfaces
1820 for intf in sorted(interfaces.items()):
1822 if name[:7] == 'Virtual':
1823 cmd = 'vppctl delete vhost-user {}'.format(name)
1824 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1826 logging.debug('{} failed on node {} {}'.format(
1827 cmd, node['host'], stderr))
1829 # Create a virtual interface, for each interface the user wants to use
1830 interfaces = vpputl.get_hardware(node)
1831 if interfaces == {}:
1833 interfaces_with_virtual_interfaces = []
1837 print('\nPlease pick one interface to connect to the iperf VM.')
1838 for intf in sorted(interfaces.items()):
1840 if name == 'local0':
1843 question = "Would you like connect this interface {} to " \
1844 "the VM [y/N]? ".format(name)
1845 answer = self._ask_user_yn(question, 'n')
1847 self._sockfilename = '/var/run/vpp/{}.sock'.format(
1848 name.replace('/', '_'))
1849 if os.path.exists(self._sockfilename):
1850 os.remove(self._sockfilename)
1851 cmd = 'vppctl create vhost-user socket {} server'.format(
1853 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1856 "Couldn't execute the command {}, {}.".format(
1858 vintname = stdout.rstrip('\r\n')
1860 cmd = 'chmod 777 {}'.format(self._sockfilename)
1861 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1864 "Couldn't execute the command {}, {}.".format(
1867 interface = {'name': name,
1868 'virtualinterface': '{}'.format(vintname),
1869 'bridge': '{}'.format(inum)}
1871 interfaces_with_virtual_interfaces.append(interface)
1872 return interfaces_with_virtual_interfaces
1874 def create_and_bridge_iperf_virtual_interface(self):
1876 After asking the user some questions, and create and bridge a
1877 virtual interface to be used with iperf VM
1881 for i in self._nodes.items():
1884 # Show the current bridge and interface configuration
1885 print("\nThis the current bridge configuration:")
1886 ifaces = VPPUtil.show_bridge(node)
1887 question = "\nWould you like to keep this configuration [Y/n]? "
1888 answer = self._ask_user_yn(question, 'y')
1890 self._sockfilename = '/var/run/vpp/{}.sock'.format(
1891 ifaces[0]['name'].replace('/', '_'))
1892 if os.path.exists(self._sockfilename):
1895 # Create a script that builds a bridge configuration with
1896 # physical interfaces and virtual interfaces
1897 ints_with_vints = self._iperf_vm_questions(node)
1899 for intf in ints_with_vints:
1900 vhoststr = '\n'.join([
1901 'comment { The following command creates the socket }',
1902 'comment { and returns a virtual interface }',
1903 'comment {{ create vhost-user socket '
1904 '/var/run/vpp/sock{}.sock server }}\n'.format(
1908 setintdnstr = 'set interface state {} down\n'.format(
1911 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1912 intf['name'], intf['bridge'])
1913 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1914 intf['virtualinterface'], intf['bridge'])
1916 # set interface state VirtualEthernet/0/0/0 up
1917 setintvststr = 'set interface state {} up\n'.format(
1918 intf['virtualinterface'])
1920 # set interface state VirtualEthernet/0/0/0 down
1921 setintupstr = 'set interface state {} up\n'.format(
1924 content += vhoststr + setintdnstr + setintbrstr + \
1925 setvintbrstr + setintvststr + setintupstr
1927 # Write the content to the script
1928 rootdir = node['rootdir']
1929 filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
1930 with open(filename, 'w+') as sfile:
1931 sfile.write(content)
1933 # Execute the script
1934 cmd = 'vppctl exec {}'.format(filename)
1935 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1937 logging.debug(stderr)
1939 print("\nA script as been created at {}".format(filename))
1940 print("This script can be run using the following:")
1941 print("vppctl exec {}\n".format(filename))
1944 def destroy_iperf_vm(name):
1946 After asking the user some questions, create a VM and connect
1947 the interfaces to VPP interfaces
1949 :param name: The name of the VM to be be destroyed
1954 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1956 logging.debug(stderr)
1958 "Couldn't execute the command {} : {}".format(cmd, stderr))
1960 if re.findall(name, stdout):
1961 cmd = 'virsh destroy {}'.format(name)
1962 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1964 logging.debug(stderr)
1966 "Couldn't execute the command {} : {}".format(
1969 def create_iperf_vm(self, vmname):
1971 After asking the user some questions, create a VM and connect
1972 the interfaces to VPP interfaces
1976 # Read the iperf VM template file
1977 distro = VPPUtil.get_linux_distro()
1978 if distro[0] == 'Ubuntu':
1980 '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(
1984 '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(
1987 with open(tfilename, 'r') as tfile:
1988 tcontents = tfile.read()
1992 imagename = '{}/vpp/vpp-config/{}'.format(
1993 self._rootdir, IPERFVM_IMAGE)
1994 isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
1995 tcontents = tcontents.format(vmname=vmname, imagename=imagename,
1997 vhostsocketname=self._sockfilename)
2000 ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
2001 with open(ifilename, 'w+') as ifile:
2002 ifile.write(tcontents)
2005 cmd = 'virsh create {}'.format(ifilename)
2006 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
2008 logging.debug(stderr)
2010 "Couldn't execute the command {} : {}".format(cmd, stderr))