1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
15 from __future__ import absolute_import, division, print_function
20 from ipaddress import ip_address
24 from vpplib.VPPUtil import VPPUtil
25 from vpplib.VppPCIUtil import VppPCIUtil
26 from vpplib.VppHugePageUtil import VppHugePageUtil
27 from vpplib.CpuUtils import CpuUtils
28 from vpplib.VppGrubUtil import VppGrubUtil
29 from vpplib.QemuUtils import QemuUtils
31 # Python2/3 compatible
33 input = raw_input # noqa
37 __all__ = ["AutoConfig"]
41 MIN_TOTAL_HUGE_PAGES = 1024
42 MAX_PERCENT_FOR_HUGE_PAGES = 70
44 IPERFVM_XML = 'configs/iperf-vm.xml'
45 IPERFVM_IMAGE = 'images/xenial-mod.img'
46 IPERFVM_ISO = 'configs/cloud-config.iso'
49 class AutoConfig(object):
50 """Auto Configuration Tools"""
52 def __init__(self, rootdir, filename, clean=False):
54 The Auto Configure class.
56 :param rootdir: The root directory for all the auto configuration files
57 :param filename: The autoconfiguration file
58 :param clean: When set initialize the nodes from the auto-config file
63 self._autoconfig_filename = rootdir + filename
64 self._rootdir = rootdir
67 self._vpp_devices_node = {}
68 self._hugepage_config = ""
71 self._sockfilename = ""
75 Returns the nodes dictionary.
84 def _autoconfig_backup_file(filename):
88 :param filename: The file to backup
92 # Does a copy of the file exist, if not create one
93 ofile = filename + '.orig'
94 (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
97 if stdout.strip('\n') != ofile:
98 cmd = 'sudo cp {} {}'.format(filename, ofile)
99 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
101 logging.debug(stderr)
103 # noinspection PyBroadException
105 def _ask_user_ipv4():
107 Asks the user for a number within a range.
108 default is returned if return is entered.
110 :returns: IP address with cidr
115 answer = input("Please enter the IPv4 Address [n.n.n.n/n]: ")
117 ipinput = answer.split('/')
118 ipaddr = ip_address(ipinput[0])
120 plen = answer.split('/')[1]
122 answer = input("Please enter the netmask [n.n.n.n]: ")
123 plen = ip_address(answer).netmask_bits()
124 return '{}/{}'.format(ipaddr, plen)
126 print("Please enter a valid IPv4 address.")
129 def _ask_user_range(question, first, last, default):
131 Asks the user for a number within a range.
132 default is returned if return is entered.
134 :param question: Text of a question.
135 :param first: First number in the range
136 :param last: Last number in the range
137 :param default: The value returned when return is entered
138 :type question: string
142 :returns: The answer to the question
147 answer = input(question)
151 if re.findall(r'[0-9+]', answer):
152 if int(answer) in range(first, last + 1):
155 print("Please a value between {} and {} or Return.".
158 print("Please a number between {} and {} or Return.".
164 def _ask_user_yn(question, default):
166 Asks the user for a yes or no question.
168 :param question: Text of a question.
169 :param default: The value returned when return is entered
170 :type question: string
171 :type default: string
172 :returns: The answer to the question
177 default = default.lower()
179 while not input_valid:
180 answer = input(question)
183 if re.findall(r'[YyNn]', answer):
185 answer = answer[0].lower()
187 print("Please answer Y, N or Return.")
191 def _loadconfig(self):
193 Load the testbed configuration, given the auto configuration file.
197 # Get the Topology, from the topology layout file
199 with open(self._autoconfig_filename, 'r') as stream:
201 topo = yaml.load(stream)
202 if 'metadata' in topo:
203 self._metadata = topo['metadata']
204 except yaml.YAMLError as exc:
206 "Couldn't read the Auto config file {}.".format(
207 self._autoconfig_filename, exc))
209 systemfile = self._rootdir + self._metadata['system_config_file']
210 if self._clean is False and os.path.isfile(systemfile):
211 with open(systemfile, 'r') as sysstream:
213 systopo = yaml.load(sysstream)
214 if 'nodes' in systopo:
215 self._nodes = systopo['nodes']
216 except yaml.YAMLError as sysexc:
218 "Couldn't read the System config file {}.".format(
221 # Get the nodes from Auto Config
223 self._nodes = topo['nodes']
225 # Set the root directory in all the nodes
226 for i in self._nodes.items():
228 node['rootdir'] = self._rootdir
230 def updateconfig(self):
232 Update the testbed configuration, given the auto configuration file.
233 We will write the system configuration file with the current node
238 # Initialize the yaml data
239 ydata = {'metadata': self._metadata, 'nodes': self._nodes}
241 # Write the system config file
242 filename = self._rootdir + self._metadata['system_config_file']
243 with open(filename, 'w') as yamlfile:
244 yaml.dump(ydata, yamlfile)
246 def _update_auto_config(self):
248 Write the auto configuration file with the new configuration data,
253 # Initialize the yaml data
255 with open(self._autoconfig_filename, 'r') as stream:
257 ydata = yaml.load(stream)
259 nodes = ydata['nodes']
260 except yaml.YAMLError as exc:
264 for i in nodes.items():
269 node['interfaces'] = {}
270 for item in self._nodes[key]['interfaces'].items():
274 node['interfaces'][port] = {}
275 addr = '{}'.format(interface['pci_address'])
276 node['interfaces'][port]['pci_address'] = addr
277 if 'mac_address' in interface:
278 node['interfaces'][port]['mac_address'] = \
279 interface['mac_address']
281 if 'total_other_cpus' in self._nodes[key]['cpu']:
282 node['cpu']['total_other_cpus'] = \
283 self._nodes[key]['cpu']['total_other_cpus']
284 if 'total_vpp_cpus' in self._nodes[key]['cpu']:
285 node['cpu']['total_vpp_cpus'] = \
286 self._nodes[key]['cpu']['total_vpp_cpus']
287 if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
288 node['cpu']['reserve_vpp_main_core'] = \
289 self._nodes[key]['cpu']['reserve_vpp_main_core']
292 if 'active_open_sessions' in self._nodes[key]['tcp']:
293 node['tcp']['active_open_sessions'] = \
294 self._nodes[key]['tcp']['active_open_sessions']
295 if 'passive_open_sessions' in self._nodes[key]['tcp']:
296 node['tcp']['passive_open_sessions'] = \
297 self._nodes[key]['tcp']['passive_open_sessions']
300 node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
302 # Write the auto config config file
303 with open(self._autoconfig_filename, 'w') as yamlfile:
304 yaml.dump(ydata, yamlfile)
306 def apply_huge_pages(self):
308 Apply the huge page config
312 for i in self._nodes.items():
315 hpg = VppHugePageUtil(node)
316 hpg.hugepages_dryrun_apply()
319 def _apply_vpp_unix(node):
321 Apply the VPP Unix config
323 :param node: Node dictionary with cpuinfo.
328 if 'unix' not in node['vpp']:
331 unixv = node['vpp']['unix']
332 if 'interactive' in unixv:
333 interactive = unixv['interactive']
334 if interactive is True:
335 unix = ' interactive\n'
337 return unix.rstrip('\n')
340 def _apply_vpp_cpu(node):
342 Apply the VPP cpu config
344 :param node: Node dictionary with cpuinfo.
350 if 'vpp_main_core' in node['cpu']:
351 vpp_main_core = node['cpu']['vpp_main_core']
354 if vpp_main_core != 0:
355 cpu += ' main-core {}\n'.format(vpp_main_core)
358 vpp_workers = node['cpu']['vpp_workers']
359 vpp_worker_len = len(vpp_workers)
360 if vpp_worker_len > 0:
362 for i, worker in enumerate(vpp_workers):
364 vpp_worker_str += ','
365 if worker[0] == worker[1]:
366 vpp_worker_str += "{}".format(worker[0])
368 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
370 cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
375 def _apply_vpp_devices(node):
377 Apply VPP PCI Device configuration to vpp startup.
379 :param node: Node dictionary with cpuinfo.
384 ports_per_numa = node['cpu']['ports_per_numa']
385 total_mbufs = node['cpu']['total_mbufs']
387 for item in ports_per_numa.items():
389 interfaces = value['interfaces']
391 # if 0 was specified for the number of vpp workers, use 1 queue
394 if 'rx_queues' in value:
395 num_rx_queues = value['rx_queues']
396 if 'tx_queues' in value:
397 num_tx_queues = value['tx_queues']
402 # Create the devices string
403 for interface in interfaces:
404 pci_address = interface['pci_address']
405 pci_address = pci_address.lstrip("'").rstrip("'")
407 devices += ' dev {} {{ \n'.format(pci_address)
409 devices += ' num-rx-queues {}\n'.format(num_rx_queues)
411 devices += ' num-rx-queues {}\n'.format(1)
413 devices += ' num-tx-queues {}\n'.format(num_tx_queues)
415 devices += ' num-rx-desc {}\n'.format(num_rx_desc)
417 devices += ' num-tx-desc {}\n'.format(num_tx_desc)
420 # If the total mbufs is not 0 or less than the default, set num-bufs
421 logging.debug("Total mbufs: {}".format(total_mbufs))
422 if total_mbufs != 0 and total_mbufs > 16384:
423 devices += '\n num-mbufs {}'.format(total_mbufs)
428 def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end,
430 reserve_vpp_main_core):
432 Calculate the VPP worker information
434 :param node: Node dictionary
435 :param vpp_workers: List of VPP workers
436 :param numa_node: Numa node
437 :param other_cpus_end: The end of the cpus allocated for cores
439 :param total_vpp_workers: The number of vpp workers needed
440 :param reserve_vpp_main_core: Is there a core needed for
444 :type other_cpus_end: int
445 :type total_vpp_workers: int
446 :type reserve_vpp_main_core: bool
447 :returns: Is a core still needed for the vpp main core
451 # Can we fit the workers in one of these slices
452 cpus = node['cpu']['cpus_per_node'][numa_node]
456 if start <= other_cpus_end:
457 start = other_cpus_end + 1
459 if reserve_vpp_main_core:
462 workers_end = start + total_vpp_workers - 1
464 if workers_end <= end:
465 if reserve_vpp_main_core:
466 node['cpu']['vpp_main_core'] = start - 1
467 reserve_vpp_main_core = False
468 if total_vpp_workers:
469 vpp_workers.append((start, workers_end))
472 # We still need to reserve the main core
473 if reserve_vpp_main_core:
474 node['cpu']['vpp_main_core'] = other_cpus_end + 1
476 return reserve_vpp_main_core
479 def _calc_desc_and_queues(total_numa_nodes,
480 total_ports_per_numa,
482 ports_per_numa_value):
484 Calculate the number of descriptors and queues
486 :param total_numa_nodes: The total number of numa nodes
487 :param total_ports_per_numa: The total number of ports for this
489 :param total_rx_queues: The total number of rx queues / port
490 :param ports_per_numa_value: The value from the ports_per_numa
492 :type total_numa_nodes: int
493 :type total_ports_per_numa: int
494 :type total_rx_queues: int
495 :type ports_per_numa_value: dict
496 :returns The total number of message buffers
500 # Get the number of rx queues
501 rx_queues = max(1, total_rx_queues)
502 tx_queues = rx_queues * total_numa_nodes + 1
504 # Get the descriptor entries
506 ports_per_numa_value['rx_queues'] = rx_queues
507 total_mbufs = (((rx_queues * desc_entries) +
508 (tx_queues * desc_entries)) *
509 total_ports_per_numa)
510 total_mbufs = total_mbufs
515 def _create_ports_per_numa(node, interfaces):
517 Create a dictionary or ports per numa node
518 :param node: Node dictionary
519 :param interfaces: All the interfaces to be used by vpp
521 :type interfaces: dict
522 :returns: The ports per numa dictionary
526 # Make a list of ports by numa node
528 for item in interfaces.items():
530 if i['numa_node'] not in ports_per_numa:
531 ports_per_numa[i['numa_node']] = {'interfaces': []}
532 ports_per_numa[i['numa_node']]['interfaces'].append(i)
534 ports_per_numa[i['numa_node']]['interfaces'].append(i)
535 node['cpu']['ports_per_numa'] = ports_per_numa
537 return ports_per_numa
539 def calculate_cpu_parameters(self):
541 Calculate the cpu configuration.
545 # Calculate the cpu parameters, needed for the
546 # vpp_startup and grub configuration
547 for i in self._nodes.items():
550 # get total number of nic ports
551 interfaces = node['interfaces']
553 # Make a list of ports by numa node
554 ports_per_numa = self._create_ports_per_numa(node, interfaces)
556 # Get the number of cpus to skip, we never use the first cpu
558 other_cpus_end = other_cpus_start + \
559 node['cpu']['total_other_cpus'] - 1
561 if other_cpus_end != 0:
562 other_workers = (other_cpus_start, other_cpus_end)
563 node['cpu']['other_workers'] = other_workers
565 # Allocate the VPP main core and workers
567 reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
568 total_vpp_cpus = node['cpu']['total_vpp_cpus']
569 total_rx_queues = node['cpu']['total_rx_queues']
571 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
572 # then we shouldn't get workers
573 total_workers_node = 0
574 if len(ports_per_numa):
575 total_workers_node = total_vpp_cpus // len(ports_per_numa)
577 if reserve_vpp_main_core:
580 if total_main + total_workers_node != 0:
581 for item in ports_per_numa.items():
585 # Get the number of descriptors and queues
586 mbufs = self._calc_desc_and_queues(
588 len(value['interfaces']), total_rx_queues, value)
591 # Get the VPP workers
592 reserve_vpp_main_core = self._calc_vpp_workers(
593 node, vpp_workers, numa_node,
594 other_cpus_end, total_workers_node,
595 reserve_vpp_main_core)
598 total_mbufs = int(total_mbufs)
603 node['cpu']['vpp_workers'] = vpp_workers
604 node['cpu']['total_mbufs'] = total_mbufs
610 def _apply_vpp_tcp(node):
612 Apply the VPP Unix config
614 :param node: Node dictionary with cpuinfo.
618 active_open_sessions = node['tcp']['active_open_sessions']
619 aos = int(active_open_sessions)
621 passive_open_sessions = node['tcp']['passive_open_sessions']
622 pos = int(passive_open_sessions)
624 # Generate the api-segment gid vpp sheit in any case
631 return tcp.rstrip('\n')
634 "# TCP stack-related configuration parameters",
635 "# expecting {:d} client sessions, {:d} server sessions\n".format(
639 " global-size 2000M",
644 " event-queue-length {:d}".format(aos + pos),
645 " preallocated-sessions {:d}".format(aos + pos),
646 " v4-session-table-buckets {:d}".format((aos + pos) // 4),
647 " v4-session-table-memory 3g\n"
650 tcp = tcp + " v4-halfopen-table-buckets {:d}".format(
651 (aos + pos) // 4) + "\n"
652 tcp = tcp + " v4-halfopen-table-memory 3g\n"
653 tcp = tcp + " local-endpoints-table-buckets {:d}".format(
654 (aos + pos) // 4) + "\n"
655 tcp = tcp + " local-endpoints-table-memory 3g\n"
658 tcp = tcp + "tcp {\n"
659 tcp = tcp + " preallocated-connections {:d}".format(aos + pos) + "\n"
661 tcp = tcp + " preallocated-half-open-connections {:d}".format(
665 return tcp.rstrip('\n')
667 def apply_vpp_startup(self):
669 Apply the vpp startup configration
673 # Apply the VPP startup configruation
674 for i in self._nodes.items():
677 # Get the startup file
678 rootdir = node['rootdir']
679 sfile = rootdir + node['vpp']['startup_config_file']
682 devices = self._apply_vpp_devices(node)
685 cpu = self._apply_vpp_cpu(node)
687 # Get the unix config
688 unix = self._apply_vpp_unix(node)
690 # Get the TCP configuration, if any
691 tcp = self._apply_vpp_tcp(node)
693 # Make a backup if needed
694 self._autoconfig_backup_file(sfile)
697 tfile = sfile + '.template'
698 (ret, stdout, stderr) = \
699 VPPUtil.exec_command('cat {}'.format(tfile))
701 raise RuntimeError('Executing cat command failed to node {}'.
702 format(node['host']))
703 startup = stdout.format(unix=unix,
708 (ret, stdout, stderr) = \
709 VPPUtil.exec_command('rm {}'.format(sfile))
711 logging.debug(stderr)
713 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
714 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
716 raise RuntimeError('Writing config failed node {}'.
717 format(node['host']))
719 def apply_grub_cmdline(self):
721 Apply the grub cmdline
725 for i in self._nodes.items():
728 # Get the isolated CPUs
729 other_workers = node['cpu']['other_workers']
730 vpp_workers = node['cpu']['vpp_workers']
731 if 'vpp_main_core' in node['cpu']:
732 vpp_main_core = node['cpu']['vpp_main_core']
736 if other_workers is not None:
737 all_workers = [other_workers]
738 if vpp_main_core != 0:
739 all_workers += [(vpp_main_core, vpp_main_core)]
740 all_workers += vpp_workers
742 for idx, worker in enumerate(all_workers):
747 if worker[0] == worker[1]:
748 isolated_cpus += "{}".format(worker[0])
750 isolated_cpus += "{}-{}".format(worker[0], worker[1])
752 vppgrb = VppGrubUtil(node)
753 current_cmdline = vppgrb.get_current_cmdline()
754 if 'grub' not in node:
756 node['grub']['current_cmdline'] = current_cmdline
757 node['grub']['default_cmdline'] = \
758 vppgrb.apply_cmdline(node, isolated_cpus)
762 def get_hugepages(self):
764 Get the hugepage configuration
768 for i in self._nodes.items():
771 hpg = VppHugePageUtil(node)
772 max_map_count, shmmax = hpg.get_huge_page_config()
773 node['hugepages']['max_map_count'] = max_map_count
774 node['hugepages']['shmax'] = shmmax
775 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
776 node['hugepages']['actual_total'] = total
777 node['hugepages']['free'] = free
778 node['hugepages']['size'] = size
779 node['hugepages']['memtotal'] = memtotal
780 node['hugepages']['memfree'] = memfree
786 Get the grub configuration
790 for i in self._nodes.items():
793 vppgrb = VppGrubUtil(node)
794 current_cmdline = vppgrb.get_current_cmdline()
795 default_cmdline = vppgrb.get_default_cmdline()
797 # Get the total number of isolated CPUs
799 iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
800 iso_cpurl = len(iso_cpur)
802 iso_cpu_str = iso_cpur[0]
803 iso_cpu_str = iso_cpu_str.split('=')[1]
804 iso_cpul = iso_cpu_str.split(',')
805 for iso_cpu in iso_cpul:
806 isocpuspl = iso_cpu.split('-')
807 if len(isocpuspl) == 1:
808 current_iso_cpus += 1
810 first = int(isocpuspl[0])
811 second = int(isocpuspl[1])
813 current_iso_cpus += 1
815 current_iso_cpus += second - first
817 if 'grub' not in node:
819 node['grub']['current_cmdline'] = current_cmdline
820 node['grub']['default_cmdline'] = default_cmdline
821 node['grub']['current_iso_cpus'] = current_iso_cpus
826 def _get_device(node):
828 Get the device configuration for a single node
830 :param node: Node dictionary with cpuinfo.
835 vpp = VppPCIUtil(node)
836 vpp.get_all_devices()
838 # Save the device information
840 node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
841 node['devices']['kernel_devices'] = vpp.get_kernel_devices()
842 node['devices']['other_devices'] = vpp.get_other_devices()
843 node['devices']['linkup_devices'] = vpp.get_link_up_devices()
845 def get_devices_per_node(self):
847 Get the device configuration for all the nodes
851 for i in self._nodes.items():
853 # Update the interface data
855 self._get_device(node)
860 def get_cpu_layout(node):
864 using lscpu -p get the cpu layout.
865 Returns a list with each item representing a single cpu.
867 :param node: Node dictionary.
869 :returns: The cpu layout
874 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
876 raise RuntimeError('{} failed on node {} {}'.
877 format(cmd, node['host'], stderr))
880 lines = stdout.split('\n')
882 if line == '' or line[0] == '#':
884 linesplit = line.split(',')
885 layout = {'cpu': linesplit[0], 'core': linesplit[1],
886 'socket': linesplit[2], 'node': linesplit[3]}
888 # cpu, core, socket, node
895 Get the cpu configuration
900 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
902 for i in self._nodes.items():
906 layout = self.get_cpu_layout(node)
907 node['cpu']['layout'] = layout
909 cpuinfo = node['cpuinfo']
910 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
911 node['cpu']['smt_enabled'] = smt_enabled
913 # We don't want to write the cpuinfo
921 Get the current system configuration.
925 # Get the Huge Page configuration
928 # Get the device configuration
929 self.get_devices_per_node()
931 # Get the CPU configuration
934 # Get the current grub cmdline
937 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
939 Ask the user questions related to the cpu configuration.
941 :param node: Node dictionary
942 :param total_cpus: The total number of cpus in the system
943 :param numa_nodes: The list of numa nodes in the system
945 :type total_cpus: int
946 :type numa_nodes: list
949 print("\nYour system has {} core(s) and {} Numa Nodes.".
950 format(total_cpus, len(numa_nodes)))
951 print("To begin, we suggest not reserving any cores for "
952 "VPP or other processes.")
953 print("Then to improve performance start reserving cores and "
954 "adding queues as needed.")
956 # Leave 1 for the general system
958 max_vpp_cpus = min(total_cpus, 4)
961 question = "\nHow many core(s) shall we reserve for " \
962 "VPP [0-{}][0]? ".format(max_vpp_cpus)
963 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
964 node['cpu']['total_vpp_cpus'] = total_vpp_cpus
967 max_other_cores = total_cpus - total_vpp_cpus
968 if max_other_cores > 0:
969 question = 'How many core(s) do you want to reserve for ' \
970 'processes other than VPP? [0-{}][0]? '. \
971 format(str(max_other_cores))
972 total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
973 node['cpu']['total_other_cpus'] = total_other_cpus
975 max_main_cpus = total_cpus - total_vpp_cpus - total_other_cpus
976 reserve_vpp_main_core = False
977 if max_main_cpus > 0:
978 question = "Should we reserve 1 core for the VPP Main thread? "
979 question += "[y/N]? "
980 answer = self._ask_user_yn(question, 'n')
982 reserve_vpp_main_core = True
983 node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
984 node['cpu']['vpp_main_core'] = 0
986 question = "How many RX queues per port shall we use for " \
987 "VPP [1-4][1]? ".format(max_vpp_cpus)
988 total_rx_queues = self._ask_user_range(question, 1, 4, 1)
989 node['cpu']['total_rx_queues'] = total_rx_queues
991 def modify_cpu(self, ask_questions=True):
993 Modify the cpu configuration, asking for the user for the values.
995 :param ask_questions: When true ask the user for config parameters
1000 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
1002 for i in self._nodes.items():
1005 total_cpus_per_slice = 0
1009 cpu_layout = self.get_cpu_layout(node)
1011 # Assume the number of cpus per slice is always the same as the
1014 for cpu in cpu_layout:
1015 if cpu['node'] != first_node:
1017 total_cpus_per_slice += 1
1019 # Get the total number of cpus, cores, and numa nodes from the
1021 for cpul in cpu_layout:
1022 numa_node = cpul['node']
1027 if numa_node not in cpus_per_node:
1028 cpus_per_node[numa_node] = []
1029 cpuperslice = int(cpu) % total_cpus_per_slice
1030 if cpuperslice == 0:
1031 cpus_per_node[numa_node].append((int(cpu), int(cpu) +
1032 total_cpus_per_slice - 1))
1033 if numa_node not in numa_nodes:
1034 numa_nodes.append(numa_node)
1035 if core not in cores:
1037 node['cpu']['cpus_per_node'] = cpus_per_node
1039 # Ask the user some questions
1040 if ask_questions and total_cpus >= 4:
1041 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1043 # Populate the interfaces with the numa node
1044 if 'interfaces' in node:
1045 ikeys = node['interfaces'].keys()
1046 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1048 # We don't want to write the cpuinfo
1049 node['cpuinfo'] = ""
1052 self._update_auto_config()
1055 def _modify_other_devices(self, node,
1056 other_devices, kernel_devices, dpdk_devices):
1058 Modify the devices configuration, asking for the user for the values.
1062 odevices_len = len(other_devices)
1063 if odevices_len > 0:
1064 print("\nThese device(s) are currently NOT being used "
1065 "by VPP or the OS.\n")
1066 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1067 question = "\nWould you like to give any of these devices"
1068 question += " back to the OS [Y/n]? "
1069 answer = self._ask_user_yn(question, 'Y')
1072 for dit in other_devices.items():
1075 question = "Would you like to use device {} for". \
1077 question += " the OS [y/N]? "
1078 answer = self._ask_user_yn(question, 'n')
1080 if 'unused' in device and len(
1081 device['unused']) != 0 and \
1082 device['unused'][0] != '':
1083 driver = device['unused'][0]
1084 ret = VppPCIUtil.bind_vpp_device(
1088 'Could not bind device {}'.format(dvid))
1091 for dit in vppd.items():
1094 kernel_devices[dvid] = device
1095 del other_devices[dvid]
1097 odevices_len = len(other_devices)
1098 if odevices_len > 0:
1099 print("\nThese device(s) are still NOT being used "
1100 "by VPP or the OS.\n")
1101 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1102 question = "\nWould you like use any of these for VPP [y/N]? "
1103 answer = self._ask_user_yn(question, 'N')
1106 for dit in other_devices.items():
1109 question = "Would you like to use device {} ".format(dvid)
1110 question += "for VPP [y/N]? "
1111 answer = self._ask_user_yn(question, 'n')
1114 for dit in vppd.items():
1117 if 'unused' in device and len(device['unused']) != 0 and \
1118 device['unused'][0] != '':
1119 driver = device['unused'][0]
1121 'Binding device {} to driver {}'.format(dvid,
1123 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1126 'Could not bind device {}'.format(dvid))
1128 dpdk_devices[dvid] = device
1129 del other_devices[dvid]
1131 def update_interfaces_config(self):
1133 Modify the interfaces directly from the config file.
1137 for i in self._nodes.items():
1139 devices = node['devices']
1140 all_devices = devices['other_devices']
1141 all_devices.update(devices['dpdk_devices'])
1142 all_devices.update(devices['kernel_devices'])
1146 if 'interfaces' in node:
1147 current_ifcs = node['interfaces']
1149 for ifc in current_ifcs.values():
1150 dvid = ifc['pci_address']
1151 if dvid in all_devices:
1152 VppPCIUtil.vpp_create_interface(interfaces, dvid,
1154 node['interfaces'] = interfaces
1158 def modify_devices(self):
1160 Modify the devices configuration, asking for the user for the values.
1164 for i in self._nodes.items():
1166 devices = node['devices']
1167 other_devices = devices['other_devices']
1168 kernel_devices = devices['kernel_devices']
1169 dpdk_devices = devices['dpdk_devices']
1172 self._modify_other_devices(node, other_devices,
1173 kernel_devices, dpdk_devices)
1175 # Get the devices again for this node
1176 self._get_device(node)
1177 devices = node['devices']
1178 kernel_devices = devices['kernel_devices']
1179 dpdk_devices = devices['dpdk_devices']
1181 klen = len(kernel_devices)
1183 print("\nThese devices are safe to be used with VPP.\n")
1184 VppPCIUtil.show_vpp_devices(kernel_devices)
1185 question = "\nWould you like to use any of these " \
1186 "device(s) for VPP [y/N]? "
1187 answer = self._ask_user_yn(question, 'n')
1190 for dit in kernel_devices.items():
1193 question = "Would you like to use device {} ".format(dvid)
1194 question += "for VPP [y/N]? "
1195 answer = self._ask_user_yn(question, 'n')
1198 for dit in vppd.items():
1201 if 'unused' in device and len(
1202 device['unused']) != 0 and device['unused'][
1204 driver = device['unused'][0]
1205 question = "Would you like to bind the driver {} for {} [y/N]? ".format(driver, dvid)
1206 answer = self._ask_user_yn(question, 'n')
1208 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1209 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1211 logging.debug('Could not bind device {}'.format(dvid))
1212 dpdk_devices[dvid] = device
1213 del kernel_devices[dvid]
1215 dlen = len(dpdk_devices)
1217 print("\nThese device(s) are already using DPDK.\n")
1218 VppPCIUtil.show_vpp_devices(dpdk_devices,
1219 show_interfaces=False)
1220 question = "\nWould you like to remove any of "
1221 question += "these device(s) [y/N]? "
1222 answer = self._ask_user_yn(question, 'n')
1225 for dit in dpdk_devices.items():
1228 question = "Would you like to remove {} [y/N]? ". \
1230 answer = self._ask_user_yn(question, 'n')
1232 vppdl[dvid] = device
1233 for dit in vppdl.items():
1236 if 'unused' in device and len(
1237 device['unused']) != 0 and device['unused'][
1239 driver = device['unused'][0]
1241 'Binding device {} to driver {}'.format(
1243 ret = VppPCIUtil.bind_vpp_device(node, driver,
1247 'Could not bind device {}'.format(dvid))
1249 kernel_devices[dvid] = device
1250 del dpdk_devices[dvid]
1253 for dit in dpdk_devices.items():
1256 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1257 node['interfaces'] = interfaces
1259 self._update_auto_config()
1262 def modify_huge_pages(self):
1264 Modify the huge page configuration, asking for the user for the values.
1268 for i in self._nodes.items():
1271 total = node['hugepages']['actual_total']
1272 free = node['hugepages']['free']
1273 size = node['hugepages']['size']
1274 memfree = node['hugepages']['memfree'].split(' ')[0]
1275 hugesize = int(size.split(' ')[0])
1276 # The max number of huge pages should be no more than
1277 # 70% of total free memory
1278 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES // 100) // \
1280 print("\nThere currently {} {} huge pages free.".format(
1282 question = "Do you want to reconfigure the number of " \
1283 "huge pages [y/N]? "
1284 answer = self._ask_user_yn(question, 'n')
1286 node['hugepages']['total'] = total
1289 print("\nThere currently a total of {} huge pages.".
1291 question = "How many huge pages do you want [{} - {}][{}]? ". \
1292 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1293 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1294 node['hugepages']['total'] = str(answer)
1296 # Update auto-config.yaml
1297 self._update_auto_config()
1299 # Rediscover just the hugepages
1300 self.get_hugepages()
1302 def get_tcp_params(self):
1304 Get the tcp configuration
1307 # maybe nothing to do here?
1310 def acquire_tcp_params(self):
1312 Ask the user for TCP stack configuration parameters
1316 for i in self._nodes.items():
1319 question = "\nHow many active-open / tcp client sessions are " \
1320 "expected [0-10000000][0]? "
1321 answer = self._ask_user_range(question, 0, 10000000, 0)
1322 # Less than 10K is equivalent to 0
1323 if int(answer) < 10000:
1325 node['tcp']['active_open_sessions'] = answer
1327 question = "How many passive-open / tcp server sessions are " \
1328 "expected [0-10000000][0]? "
1329 answer = self._ask_user_range(question, 0, 10000000, 0)
1330 # Less than 10K is equivalent to 0
1331 if int(answer) < 10000:
1333 node['tcp']['passive_open_sessions'] = answer
1335 # Update auto-config.yaml
1336 self._update_auto_config()
1338 # Rediscover tcp parameters
1339 self.get_tcp_params()
1342 def patch_qemu(node):
1344 Patch qemu with the correct patches.
1346 :param node: Node dictionary
1350 print('\nWe are patching the node "{}":\n'.format(node['host']))
1351 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1356 print the CPU information
1360 cpu = CpuUtils.get_cpu_info_per_node(node)
1364 print("{:>20}: {}".format(item, cpu[item]))
1367 print("{:>20}: {}".format(item, cpu[item]))
1368 item = 'Thread(s) per core'
1370 print("{:>20}: {}".format(item, cpu[item]))
1371 item = 'Core(s) per socket'
1373 print("{:>20}: {}".format(item, cpu[item]))
1376 print("{:>20}: {}".format(item, cpu[item]))
1377 item = 'NUMA node(s)'
1380 numa_nodes = int(cpu[item])
1381 for i in range(0, numa_nodes):
1382 item = "NUMA node{} CPU(s)".format(i)
1383 print("{:>20}: {}".format(item, cpu[item]))
1384 item = 'CPU max MHz'
1386 print("{:>20}: {}".format(item, cpu[item]))
1387 item = 'CPU min MHz'
1389 print("{:>20}: {}".format(item, cpu[item]))
1391 if node['cpu']['smt_enabled']:
1395 print("{:>20}: {}".format('SMT', smt))
1398 print("\nVPP Threads: (Name: Cpu Number)")
1399 vpp_processes = cpu['vpp_processes']
1400 for i in vpp_processes.items():
1401 print(" {:10}: {:4}".format(i[0], i[1]))
1404 def device_info(node):
1406 Show the device information.
1410 if 'cpu' in node and 'total_mbufs' in node['cpu']:
1411 total_mbufs = node['cpu']['total_mbufs']
1412 if total_mbufs != 0:
1413 print("Total Number of Buffers: {}".format(total_mbufs))
1415 vpp = VppPCIUtil(node)
1416 vpp.get_all_devices()
1417 linkup_devs = vpp.get_link_up_devices()
1418 if len(linkup_devs):
1419 print("\nDevices with link up (can not be used with VPP):")
1420 vpp.show_vpp_devices(linkup_devs, show_header=False)
1421 # for dev in linkup_devs:
1423 kernel_devs = vpp.get_kernel_devices()
1424 if len(kernel_devs):
1425 print("\nDevices bound to kernel drivers:")
1426 vpp.show_vpp_devices(kernel_devs, show_header=False)
1428 print("\nNo devices bound to kernel drivers")
1430 dpdk_devs = vpp.get_dpdk_devices()
1432 print("\nDevices bound to DPDK drivers:")
1433 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1436 print("\nNo devices bound to DPDK drivers")
1438 other_devs = vpp.get_other_devices()
1440 print("\nDevices not bound to Kernel or DPDK drivers:")
1441 vpp.show_vpp_devices(other_devs, show_interfaces=True,
1444 print("\nNo devices not bound to Kernel or DPDK drivers")
1447 interfaces = vpputl.get_hardware(node)
1448 if interfaces == {}:
1451 print("\nDevices in use by VPP:")
1453 if len(interfaces.items()) < 2:
1457 print("{:30} {:4} {:4} {:7} {:4} {:7}".
1458 format('Name', 'Numa', 'RXQs',
1459 'RXDescs', 'TXQs', 'TXDescs'))
1460 for intf in sorted(interfaces.items()):
1463 if name == 'local0':
1465 numa = rx_qs = rx_ds = tx_qs = tx_ds = ''
1467 numa = int(value['numa'])
1468 if 'rx queues' in value:
1469 rx_qs = int(value['rx queues'])
1470 if 'rx descs' in value:
1471 rx_ds = int(value['rx descs'])
1472 if 'tx queues' in value:
1473 tx_qs = int(value['tx queues'])
1474 if 'tx descs' in value:
1475 tx_ds = int(value['tx descs'])
1477 print("{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".
1478 format(name, numa, rx_qs, rx_ds, tx_qs, tx_ds))
1481 def hugepage_info(node):
1483 Show the huge page information.
1487 hpg = VppHugePageUtil(node)
1488 hpg.show_huge_pages()
1491 def has_interfaces(node):
1493 Check for interfaces, return tru if there is at least one
1497 if 'interfaces' in node and len(node['interfaces']):
1503 def min_system_resources(node):
1505 Check the system for basic minimum resources, return true if
1514 if 'layout' in node['cpu']:
1515 total_cpus = len(node['cpu']['layout'])
1517 print("\nThere is only {} CPU(s) available on this system. "
1518 "This is not enough to run VPP.".format(total_cpus))
1522 if 'free' in node['hugepages'] and \
1523 'memfree' in node['hugepages'] and \
1524 'size' in node['hugepages']:
1525 free = node['hugepages']['free']
1526 memfree = float(node['hugepages']['memfree'].split(' ')[0])
1527 hugesize = float(node['hugepages']['size'].split(' ')[0])
1529 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1530 percentmemhugepages = (memhugepages / memfree) * 100
1531 if free is '0' and \
1532 percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1534 "\nThe System has only {} of free memory. You will not "
1535 "be able to allocate enough Huge Pages for VPP.".format(
1545 Print the system information
1549 for i in self._nodes.items():
1550 print("\n==============================")
1554 print("NODE: {}\n".format(name))
1561 print("\nGrub Command Line:")
1563 print(" Current: {}".format(
1564 node['grub']['current_cmdline']))
1565 print(" Configured: {}".format(
1566 node['grub']['default_cmdline']))
1569 print("\nHuge Pages:")
1570 self.hugepage_info(node)
1574 self.device_info(node)
1577 print("\nVPP Service Status:")
1578 state, errors = VPPUtil.status(node)
1579 print(" {}".format(state))
1581 print(" {}".format(e))
1583 # Minimum system resources
1584 self.min_system_resources(node)
1586 print("\n==============================")
1588 def _ipv4_interface_setup_questions(self, node):
1590 Ask the user some questions and get a list of interfaces
1591 and IPv4 addresses associated with those interfaces
1593 :param node: Node dictionary.
1595 :returns: A list or interfaces with ip addresses
1600 interfaces = vpputl.get_hardware(node)
1601 if interfaces == {}:
1604 interfaces_with_ip = []
1605 for intf in sorted(interfaces.items()):
1607 if name == 'local0':
1610 question = "Would you like add address to " \
1611 "interface {} [Y/n]? ".format(name)
1612 answer = self._ask_user_yn(question, 'y')
1615 addr = self._ask_user_ipv4()
1616 address['name'] = name
1617 address['addr'] = addr
1618 interfaces_with_ip.append(address)
1620 return interfaces_with_ip
1622 def ipv4_interface_setup(self):
1624 After asking the user some questions, get a list of interfaces
1625 and IPv4 addresses associated with those interfaces
1629 for i in self._nodes.items():
1632 # Show the current interfaces with IP addresses
1633 current_ints = VPPUtil.get_int_ip(node)
1634 if current_ints != {}:
1635 print("\nThese are the current interfaces with IP addresses:")
1636 for items in sorted(current_ints.items()):
1639 if 'address' not in value:
1642 address = value['address']
1643 print("{:30} {:20} {:10}".format(name, address,
1645 question = "\nWould you like to keep this configuration " \
1647 answer = self._ask_user_yn(question, 'y')
1651 print("\nThere are currently no interfaces with IP "
1654 # Create a script that add the ip addresses to the interfaces
1655 # and brings the interfaces up
1656 ints_with_addrs = self._ipv4_interface_setup_questions(node)
1658 for ints in ints_with_addrs:
1661 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1662 setintupstr = 'set int state {} up\n'.format(name)
1663 content += setipstr + setintupstr
1665 # Write the content to the script
1666 rootdir = node['rootdir']
1667 filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1668 with open(filename, 'w+') as sfile:
1669 sfile.write(content)
1671 # Execute the script
1672 cmd = 'vppctl exec {}'.format(filename)
1673 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1675 logging.debug(stderr)
1677 print("\nA script as been created at {}".format(filename))
1678 print("This script can be run using the following:")
1679 print("vppctl exec {}\n".format(filename))
1681 def _create_vints_questions(self, node):
1683 Ask the user some questions and get a list of interfaces
1684 and IPv4 addresses associated with those interfaces
1686 :param node: Node dictionary.
1688 :returns: A list or interfaces with ip addresses
1693 interfaces = vpputl.get_hardware(node)
1694 if interfaces == {}:
1697 # First delete all the Virtual interfaces
1698 for intf in sorted(interfaces.items()):
1700 if name[:7] == 'Virtual':
1701 cmd = 'vppctl delete vhost-user {}'.format(name)
1702 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1704 logging.debug('{} failed on node {} {}'.format(
1705 cmd, node['host'], stderr))
1707 # Create a virtual interface, for each interface the user wants to use
1708 interfaces = vpputl.get_hardware(node)
1709 if interfaces == {}:
1711 interfaces_with_virtual_interfaces = []
1713 for intf in sorted(interfaces.items()):
1715 if name == 'local0':
1718 question = "Would you like connect this interface {} to " \
1719 "the VM [Y/n]? ".format(name)
1720 answer = self._ask_user_yn(question, 'y')
1722 sockfilename = '/var/run/vpp/{}.sock'.format(
1723 name.replace('/', '_'))
1724 if os.path.exists(sockfilename):
1725 os.remove(sockfilename)
1726 cmd = 'vppctl create vhost-user socket {} server'.format(
1728 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1731 "Couldn't execute the command {}, {}.".format(cmd,
1733 vintname = stdout.rstrip('\r\n')
1735 cmd = 'chmod 777 {}'.format(sockfilename)
1736 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1739 "Couldn't execute the command {}, {}.".format(cmd,
1742 interface = {'name': name,
1743 'virtualinterface': '{}'.format(vintname),
1744 'bridge': '{}'.format(inum)}
1746 interfaces_with_virtual_interfaces.append(interface)
1748 return interfaces_with_virtual_interfaces
1750 def create_and_bridge_virtual_interfaces(self):
1752 After asking the user some questions, create a VM and connect
1753 the interfaces to VPP interfaces
1757 for i in self._nodes.items():
1760 # Show the current bridge and interface configuration
1761 print("\nThis the current bridge configuration:")
1762 VPPUtil.show_bridge(node)
1763 question = "\nWould you like to keep this configuration [Y/n]? "
1764 answer = self._ask_user_yn(question, 'y')
1768 # Create a script that builds a bridge configuration with
1769 # physical interfaces and virtual interfaces
1770 ints_with_vints = self._create_vints_questions(node)
1772 for intf in ints_with_vints:
1773 vhoststr = '\n'.join([
1774 'comment { The following command creates the socket }',
1775 'comment { and returns a virtual interface }',
1776 'comment {{ create vhost-user socket '
1777 '/var/run/vpp/sock{}.sock server }}\n'.format(
1781 setintdnstr = 'set interface state {} down\n'.format(
1784 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1785 intf['name'], intf['bridge'])
1786 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1787 intf['virtualinterface'], intf['bridge'])
1789 # set interface state VirtualEthernet/0/0/0 up
1790 setintvststr = 'set interface state {} up\n'.format(
1791 intf['virtualinterface'])
1793 # set interface state VirtualEthernet/0/0/0 down
1794 setintupstr = 'set interface state {} up\n'.format(
1797 content += vhoststr + setintdnstr + setintbrstr + \
1798 setvintbrstr + setintvststr + setintupstr
1800 # Write the content to the script
1801 rootdir = node['rootdir']
1802 filename = rootdir + \
1803 '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1804 with open(filename, 'w+') as sfile:
1805 sfile.write(content)
1807 # Execute the script
1808 cmd = 'vppctl exec {}'.format(filename)
1809 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1811 logging.debug(stderr)
1813 print("\nA script as been created at {}".format(filename))
1814 print("This script can be run using the following:")
1815 print("vppctl exec {}\n".format(filename))
1817 def _iperf_vm_questions(self, node):
1819 Ask the user some questions and get a list of interfaces
1820 and IPv4 addresses associated with those interfaces
1822 :param node: Node dictionary.
1824 :returns: A list or interfaces with ip addresses
1829 interfaces = vpputl.get_hardware(node)
1830 if interfaces == {}:
1833 # First delete all the Virtual interfaces
1834 for intf in sorted(interfaces.items()):
1836 if name[:7] == 'Virtual':
1837 cmd = 'vppctl delete vhost-user {}'.format(name)
1838 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1840 logging.debug('{} failed on node {} {}'.format(
1841 cmd, node['host'], stderr))
1843 # Create a virtual interface, for each interface the user wants to use
1844 interfaces = vpputl.get_hardware(node)
1845 if interfaces == {}:
1847 interfaces_with_virtual_interfaces = []
1851 print('\nPlease pick one interface to connect to the iperf VM.')
1852 for intf in sorted(interfaces.items()):
1854 if name == 'local0':
1857 question = "Would you like connect this interface {} to " \
1858 "the VM [y/N]? ".format(name)
1859 answer = self._ask_user_yn(question, 'n')
1861 self._sockfilename = '/var/run/vpp/{}.sock'.format(
1862 name.replace('/', '_'))
1863 if os.path.exists(self._sockfilename):
1864 os.remove(self._sockfilename)
1865 cmd = 'vppctl create vhost-user socket {} server'.format(
1867 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1870 "Couldn't execute the command {}, {}.".format(
1872 vintname = stdout.rstrip('\r\n')
1874 cmd = 'chmod 777 {}'.format(self._sockfilename)
1875 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1878 "Couldn't execute the command {}, {}.".format(
1881 interface = {'name': name,
1882 'virtualinterface': '{}'.format(vintname),
1883 'bridge': '{}'.format(inum)}
1885 interfaces_with_virtual_interfaces.append(interface)
1886 return interfaces_with_virtual_interfaces
1888 def create_and_bridge_iperf_virtual_interface(self):
1890 After asking the user some questions, and create and bridge a
1891 virtual interface to be used with iperf VM
1895 for i in self._nodes.items():
1898 # Show the current bridge and interface configuration
1899 print("\nThis the current bridge configuration:")
1900 ifaces = VPPUtil.show_bridge(node)
1901 question = "\nWould you like to keep this configuration [Y/n]? "
1902 answer = self._ask_user_yn(question, 'y')
1904 self._sockfilename = '/var/run/vpp/{}.sock'.format(
1905 ifaces[0]['name'].replace('/', '_'))
1906 if os.path.exists(self._sockfilename):
1909 # Create a script that builds a bridge configuration with
1910 # physical interfaces and virtual interfaces
1911 ints_with_vints = self._iperf_vm_questions(node)
1913 for intf in ints_with_vints:
1914 vhoststr = '\n'.join([
1915 'comment { The following command creates the socket }',
1916 'comment { and returns a virtual interface }',
1917 'comment {{ create vhost-user socket '
1918 '/var/run/vpp/sock{}.sock server }}\n'.format(
1922 setintdnstr = 'set interface state {} down\n'.format(
1925 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1926 intf['name'], intf['bridge'])
1927 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1928 intf['virtualinterface'], intf['bridge'])
1930 # set interface state VirtualEthernet/0/0/0 up
1931 setintvststr = 'set interface state {} up\n'.format(
1932 intf['virtualinterface'])
1934 # set interface state VirtualEthernet/0/0/0 down
1935 setintupstr = 'set interface state {} up\n'.format(
1938 content += vhoststr + setintdnstr + setintbrstr + \
1939 setvintbrstr + setintvststr + setintupstr
1941 # Write the content to the script
1942 rootdir = node['rootdir']
1943 filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
1944 with open(filename, 'w+') as sfile:
1945 sfile.write(content)
1947 # Execute the script
1948 cmd = 'vppctl exec {}'.format(filename)
1949 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1951 logging.debug(stderr)
1953 print("\nA script as been created at {}".format(filename))
1954 print("This script can be run using the following:")
1955 print("vppctl exec {}\n".format(filename))
1958 def destroy_iperf_vm(name):
1960 After asking the user some questions, create a VM and connect
1961 the interfaces to VPP interfaces
1963 :param name: The name of the VM to be be destroyed
1968 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1970 logging.debug(stderr)
1972 "Couldn't execute the command {} : {}".format(cmd, stderr))
1974 if re.findall(name, stdout):
1975 cmd = 'virsh destroy {}'.format(name)
1976 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1978 logging.debug(stderr)
1980 "Couldn't execute the command {} : {}".format(
1983 def create_iperf_vm(self, vmname):
1985 After asking the user some questions, create a VM and connect
1986 the interfaces to VPP interfaces
1990 # Read the iperf VM template file
1991 distro = VPPUtil.get_linux_distro()
1992 if distro[0] == 'Ubuntu':
1994 '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(
1998 '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(
2001 with open(tfilename, 'r') as tfile:
2002 tcontents = tfile.read()
2006 imagename = '{}/vpp/vpp-config/{}'.format(
2007 self._rootdir, IPERFVM_IMAGE)
2008 isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
2009 tcontents = tcontents.format(vmname=vmname, imagename=imagename,
2011 vhostsocketname=self._sockfilename)
2014 ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
2015 with open(ifilename, 'w+') as ifile:
2016 ifile.write(tcontents)
2019 cmd = 'virsh create {}'.format(ifilename)
2020 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
2022 logging.debug(stderr)
2024 "Couldn't execute the command {} : {}".format(cmd, stderr))