1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
20 from netaddr import IPAddress
22 from vpplib.VPPUtil import VPPUtil
23 from vpplib.VppPCIUtil import VppPCIUtil
24 from vpplib.VppHugePageUtil import VppHugePageUtil
25 from vpplib.CpuUtils import CpuUtils
26 from vpplib.VppGrubUtil import VppGrubUtil
27 from vpplib.QemuUtils import QemuUtils
29 __all__ = ["AutoConfig"]
33 MIN_TOTAL_HUGE_PAGES = 1024
34 MAX_PERCENT_FOR_HUGE_PAGES = 70
36 IPERFVM_XML = 'configs/iperf-vm.xml'
37 IPERFVM_IMAGE = 'images/xenial-mod.img'
38 IPERFVM_ISO = 'configs/cloud-config.iso'
41 class AutoConfig(object):
42 """Auto Configuration Tools"""
44 def __init__(self, rootdir, filename, clean=False):
46 The Auto Configure class.
48 :param rootdir: The root directory for all the auto configuration files
49 :param filename: The autoconfiguration file
50 :param clean: When set initialize the nodes from the auto-config file
55 self._autoconfig_filename = rootdir + filename
56 self._rootdir = rootdir
59 self._vpp_devices_node = {}
60 self._hugepage_config = ""
63 self._sockfilename = ""
67 Returns the nodes dictionary.
76 def _autoconfig_backup_file(filename):
80 :param filename: The file to backup
84 # Does a copy of the file exist, if not create one
85 ofile = filename + '.orig'
86 (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
89 if stdout.strip('\n') != ofile:
90 cmd = 'sudo cp {} {}'.format(filename, ofile)
91 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
95 # noinspection PyBroadException
99 Asks the user for a number within a range.
100 default is returned if return is entered.
102 :returns: IP address with cidr
107 answer = raw_input("Please enter the IPv4 Address [n.n.n.n/n]: ")
109 ipinput = answer.split('/')
110 ipaddr = IPAddress(ipinput[0])
112 plen = answer.split('/')[1]
114 answer = raw_input("Please enter the netmask [n.n.n.n]: ")
115 plen = IPAddress(answer).netmask_bits()
116 return '{}/{}'.format(ipaddr, plen)
118 print "Please enter a valid IPv4 address."
121 def _ask_user_range(question, first, last, default):
123 Asks the user for a number within a range.
124 default is returned if return is entered.
126 :param question: Text of a question.
127 :param first: First number in the range
128 :param last: Last number in the range
129 :param default: The value returned when return is entered
130 :type question: string
134 :returns: The answer to the question
139 answer = raw_input(question)
143 if re.findall(r'[0-9+]', answer):
144 if int(answer) in range(first, last + 1):
147 print "Please a value between {} and {} or Return.". \
150 print "Please a number between {} and {} or Return.". \
156 def _ask_user_yn(question, default):
158 Asks the user for a yes or no question.
160 :param question: Text of a question.
161 :param default: The value returned when return is entered
162 :type question: string
163 :type default: string
164 :returns: The answer to the question
169 default = default.lower()
171 while not input_valid:
172 answer = raw_input(question)
175 if re.findall(r'[YyNn]', answer):
177 answer = answer[0].lower()
179 print "Please answer Y, N or Return."
183 def _loadconfig(self):
185 Load the testbed configuration, given the auto configuration file.
189 # Get the Topology, from the topology layout file
191 with open(self._autoconfig_filename, 'r') as stream:
193 topo = yaml.load(stream)
194 if 'metadata' in topo:
195 self._metadata = topo['metadata']
196 except yaml.YAMLError as exc:
197 raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc))
199 systemfile = self._rootdir + self._metadata['system_config_file']
200 if self._clean is False and os.path.isfile(systemfile):
201 with open(systemfile, 'r') as sysstream:
203 systopo = yaml.load(sysstream)
204 if 'nodes' in systopo:
205 self._nodes = systopo['nodes']
206 except yaml.YAMLError as sysexc:
207 raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc))
209 # Get the nodes from Auto Config
211 self._nodes = topo['nodes']
213 # Set the root directory in all the nodes
214 for i in self._nodes.items():
216 node['rootdir'] = self._rootdir
218 def updateconfig(self):
220 Update the testbed configuration, given the auto configuration file.
221 We will write the system configuration file with the current node
226 # Initialize the yaml data
227 ydata = {'metadata': self._metadata, 'nodes': self._nodes}
229 # Write the system config file
230 filename = self._rootdir + self._metadata['system_config_file']
231 with open(filename, 'w') as yamlfile:
232 yaml.dump(ydata, yamlfile)
234 def _update_auto_config(self):
236 Write the auto configuration file with the new configuration data,
241 # Initialize the yaml data
243 with open(self._autoconfig_filename, 'r') as stream:
245 ydata = yaml.load(stream)
247 nodes = ydata['nodes']
248 except yaml.YAMLError as exc:
252 for i in nodes.items():
257 node['interfaces'] = {}
258 for item in self._nodes[key]['interfaces'].items():
262 node['interfaces'][port] = {}
263 addr = '{}'.format(interface['pci_address'])
264 node['interfaces'][port]['pci_address'] = addr
265 if 'mac_address' in interface:
266 node['interfaces'][port]['mac_address'] = \
267 interface['mac_address']
269 if 'total_other_cpus' in self._nodes[key]['cpu']:
270 node['cpu']['total_other_cpus'] = \
271 self._nodes[key]['cpu']['total_other_cpus']
272 if 'total_vpp_cpus' in self._nodes[key]['cpu']:
273 node['cpu']['total_vpp_cpus'] = \
274 self._nodes[key]['cpu']['total_vpp_cpus']
275 if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
276 node['cpu']['reserve_vpp_main_core'] = \
277 self._nodes[key]['cpu']['reserve_vpp_main_core']
280 if 'active_open_sessions' in self._nodes[key]['tcp']:
281 node['tcp']['active_open_sessions'] = \
282 self._nodes[key]['tcp']['active_open_sessions']
283 if 'passive_open_sessions' in self._nodes[key]['tcp']:
284 node['tcp']['passive_open_sessions'] = \
285 self._nodes[key]['tcp']['passive_open_sessions']
288 node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
290 # Write the auto config config file
291 with open(self._autoconfig_filename, 'w') as yamlfile:
292 yaml.dump(ydata, yamlfile)
294 def apply_huge_pages(self):
296 Apply the huge page config
300 for i in self._nodes.items():
303 hpg = VppHugePageUtil(node)
304 hpg.hugepages_dryrun_apply()
307 def _apply_vpp_unix(node):
309 Apply the VPP Unix config
311 :param node: Node dictionary with cpuinfo.
316 if 'unix' not in node['vpp']:
319 unixv = node['vpp']['unix']
320 if 'interactive' in unixv:
321 interactive = unixv['interactive']
322 if interactive is True:
323 unix = ' interactive\n'
325 return unix.rstrip('\n')
328 def _apply_vpp_cpu(node):
330 Apply the VPP cpu config
332 :param node: Node dictionary with cpuinfo.
338 if 'vpp_main_core' in node['cpu']:
339 vpp_main_core = node['cpu']['vpp_main_core']
342 if vpp_main_core is not 0:
343 cpu += ' main-core {}\n'.format(vpp_main_core)
346 vpp_workers = node['cpu']['vpp_workers']
347 vpp_worker_len = len(vpp_workers)
348 if vpp_worker_len > 0:
350 for i, worker in enumerate(vpp_workers):
352 vpp_worker_str += ','
353 if worker[0] == worker[1]:
354 vpp_worker_str += "{}".format(worker[0])
356 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
358 cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
363 def _apply_vpp_devices(node):
365 Apply VPP PCI Device configuration to vpp startup.
367 :param node: Node dictionary with cpuinfo.
372 ports_per_numa = node['cpu']['ports_per_numa']
373 total_mbufs = node['cpu']['total_mbufs']
375 for item in ports_per_numa.items():
377 interfaces = value['interfaces']
379 # if 0 was specified for the number of vpp workers, use 1 queue
382 if 'rx_queues' in value:
383 num_rx_queues = value['rx_queues']
384 if 'tx_queues' in value:
385 num_tx_queues = value['tx_queues']
390 # Create the devices string
391 for interface in interfaces:
392 pci_address = interface['pci_address']
393 pci_address = pci_address.lstrip("'").rstrip("'")
395 devices += ' dev {} {{ \n'.format(pci_address)
397 devices += ' num-rx-queues {}\n'.format(num_rx_queues)
399 devices += ' num-rx-queues {}\n'.format(1)
401 devices += ' num-tx-queues {}\n'.format(num_tx_queues)
403 devices += ' num-rx-desc {}\n'.format(num_rx_desc)
405 devices += ' num-tx-desc {}\n'.format(num_tx_desc)
408 # If the total mbufs is not 0 or less than the default, set num-bufs
409 logging.debug("Total mbufs: {}".format(total_mbufs))
410 if total_mbufs is not 0 and total_mbufs > 16384:
411 devices += '\n num-mbufs {}'.format(total_mbufs)
416 def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end, total_vpp_workers,
417 reserve_vpp_main_core):
419 Calculate the VPP worker information
421 :param node: Node dictionary
422 :param vpp_workers: List of VPP workers
423 :param numa_node: Numa node
424 :param other_cpus_end: The end of the cpus allocated for cores
426 :param total_vpp_workers: The number of vpp workers needed
427 :param reserve_vpp_main_core: Is there a core needed for
431 :type other_cpus_end: int
432 :type total_vpp_workers: int
433 :type reserve_vpp_main_core: bool
434 :returns: Is a core still needed for the vpp main core
438 # Can we fit the workers in one of these slices
439 cpus = node['cpu']['cpus_per_node'][numa_node]
443 if start <= other_cpus_end:
444 start = other_cpus_end + 1
446 if reserve_vpp_main_core:
449 workers_end = start + total_vpp_workers - 1
451 if workers_end <= end:
452 if reserve_vpp_main_core:
453 node['cpu']['vpp_main_core'] = start - 1
454 reserve_vpp_main_core = False
455 if total_vpp_workers:
456 vpp_workers.append((start, workers_end))
459 # We still need to reserve the main core
460 if reserve_vpp_main_core:
461 node['cpu']['vpp_main_core'] = other_cpus_end + 1
463 return reserve_vpp_main_core
466 def _calc_desc_and_queues(total_numa_nodes,
467 total_ports_per_numa,
469 ports_per_numa_value):
471 Calculate the number of descriptors and queues
473 :param total_numa_nodes: The total number of numa nodes
474 :param total_ports_per_numa: The total number of ports for this
476 :param total_rx_queues: The total number of rx queues / port
477 :param ports_per_numa_value: The value from the ports_per_numa
479 :type total_numa_nodes: int
480 :type total_ports_per_numa: int
481 :type total_rx_queues: int
482 :type ports_per_numa_value: dict
483 :returns The total number of message buffers
487 # Get the number of rx queues
488 rx_queues = max(1, total_rx_queues)
489 tx_queues = rx_queues * total_numa_nodes + 1
491 # Get the descriptor entries
493 ports_per_numa_value['rx_queues'] = rx_queues
494 total_mbufs = (((rx_queues * desc_entries) +
495 (tx_queues * desc_entries)) *
496 total_ports_per_numa)
497 total_mbufs = total_mbufs
502 def _create_ports_per_numa(node, interfaces):
504 Create a dictionary or ports per numa node
505 :param node: Node dictionary
506 :param interfaces: All the interfaces to be used by vpp
508 :type interfaces: dict
509 :returns: The ports per numa dictionary
513 # Make a list of ports by numa node
515 for item in interfaces.items():
517 if i['numa_node'] not in ports_per_numa:
518 ports_per_numa[i['numa_node']] = {'interfaces': []}
519 ports_per_numa[i['numa_node']]['interfaces'].append(i)
521 ports_per_numa[i['numa_node']]['interfaces'].append(i)
522 node['cpu']['ports_per_numa'] = ports_per_numa
524 return ports_per_numa
526 def calculate_cpu_parameters(self):
528 Calculate the cpu configuration.
532 # Calculate the cpu parameters, needed for the
533 # vpp_startup and grub configuration
534 for i in self._nodes.items():
537 # get total number of nic ports
538 interfaces = node['interfaces']
540 # Make a list of ports by numa node
541 ports_per_numa = self._create_ports_per_numa(node, interfaces)
543 # Get the number of cpus to skip, we never use the first cpu
545 other_cpus_end = other_cpus_start + node['cpu']['total_other_cpus'] - 1
547 if other_cpus_end is not 0:
548 other_workers = (other_cpus_start, other_cpus_end)
549 node['cpu']['other_workers'] = other_workers
551 # Allocate the VPP main core and workers
553 reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
554 total_vpp_cpus = node['cpu']['total_vpp_cpus']
555 total_rx_queues = node['cpu']['total_rx_queues']
557 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
558 # then we shouldn't get workers
559 total_workers_node = 0
560 if len(ports_per_numa):
561 total_workers_node = total_vpp_cpus / len(ports_per_numa)
563 if reserve_vpp_main_core:
566 if total_main + total_workers_node is not 0:
567 for item in ports_per_numa.items():
571 # Get the number of descriptors and queues
572 mbufs = self._calc_desc_and_queues(len(ports_per_numa),
573 len(value['interfaces']), total_rx_queues, value)
576 # Get the VPP workers
577 reserve_vpp_main_core = self._calc_vpp_workers(node, vpp_workers, numa_node,
578 other_cpus_end, total_workers_node,
579 reserve_vpp_main_core)
582 total_mbufs = int(total_mbufs)
587 node['cpu']['vpp_workers'] = vpp_workers
588 node['cpu']['total_mbufs'] = total_mbufs
594 def _apply_vpp_tcp(node):
596 Apply the VPP Unix config
598 :param node: Node dictionary with cpuinfo.
602 active_open_sessions = node['tcp']['active_open_sessions']
603 aos = int(active_open_sessions)
605 passive_open_sessions = node['tcp']['passive_open_sessions']
606 pos = int(passive_open_sessions)
608 # Generate the api-segment gid vpp sheit in any case
610 tcp = "api-segment {\n"
611 tcp = tcp + " gid vpp\n"
613 return tcp.rstrip('\n')
615 tcp = "# TCP stack-related configuration parameters\n"
616 tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos)
617 tcp = tcp + "heapsize 4g\n\n"
618 tcp = tcp + "api-segment {\n"
619 tcp = tcp + " global-size 2000M\n"
620 tcp = tcp + " api-size 1G\n"
623 tcp = tcp + "session {\n"
624 tcp = tcp + " event-queue-length " + "{:d}".format(aos + pos) + "\n"
625 tcp = tcp + " preallocated-sessions " + "{:d}".format(aos + pos) + "\n"
626 tcp = tcp + " v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
627 tcp = tcp + " v4-session-table-memory 3g\n"
629 tcp = tcp + " v4-halfopen-table-buckets " + \
630 "{:d}".format((aos + pos) / 4) + "\n"
631 tcp = tcp + " v4-halfopen-table-memory 3g\n"
632 tcp = tcp + " local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
633 tcp = tcp + " local-endpoints-table-memory 3g\n"
636 tcp = tcp + "tcp {\n"
637 tcp = tcp + " preallocated-connections " + "{:d}".format(aos + pos) + "\n"
639 tcp = tcp + " preallocated-half-open-connections " + "{:d}".format(aos) + "\n"
642 return tcp.rstrip('\n')
644 def apply_vpp_startup(self):
646 Apply the vpp startup configration
650 # Apply the VPP startup configruation
651 for i in self._nodes.items():
654 # Get the startup file
655 rootdir = node['rootdir']
656 sfile = rootdir + node['vpp']['startup_config_file']
659 devices = self._apply_vpp_devices(node)
662 cpu = self._apply_vpp_cpu(node)
664 # Get the unix config
665 unix = self._apply_vpp_unix(node)
667 # Get the TCP configuration, if any
668 tcp = self._apply_vpp_tcp(node)
670 # Make a backup if needed
671 self._autoconfig_backup_file(sfile)
674 tfile = sfile + '.template'
675 (ret, stdout, stderr) = \
676 VPPUtil.exec_command('cat {}'.format(tfile))
678 raise RuntimeError('Executing cat command failed to node {}'.
679 format(node['host']))
680 startup = stdout.format(unix=unix,
685 (ret, stdout, stderr) = \
686 VPPUtil.exec_command('rm {}'.format(sfile))
688 logging.debug(stderr)
690 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
691 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
693 raise RuntimeError('Writing config failed node {}'.
694 format(node['host']))
696 def apply_grub_cmdline(self):
698 Apply the grub cmdline
702 for i in self._nodes.items():
705 # Get the isolated CPUs
706 other_workers = node['cpu']['other_workers']
707 vpp_workers = node['cpu']['vpp_workers']
708 if 'vpp_main_core' in node['cpu']:
709 vpp_main_core = node['cpu']['vpp_main_core']
713 if other_workers is not None:
714 all_workers = [other_workers]
715 if vpp_main_core is not 0:
716 all_workers += [(vpp_main_core, vpp_main_core)]
717 all_workers += vpp_workers
719 for idx, worker in enumerate(all_workers):
724 if worker[0] == worker[1]:
725 isolated_cpus += "{}".format(worker[0])
727 isolated_cpus += "{}-{}".format(worker[0], worker[1])
729 vppgrb = VppGrubUtil(node)
730 current_cmdline = vppgrb.get_current_cmdline()
731 if 'grub' not in node:
733 node['grub']['current_cmdline'] = current_cmdline
734 node['grub']['default_cmdline'] = \
735 vppgrb.apply_cmdline(node, isolated_cpus)
739 def get_hugepages(self):
741 Get the hugepage configuration
745 for i in self._nodes.items():
748 hpg = VppHugePageUtil(node)
749 max_map_count, shmmax = hpg.get_huge_page_config()
750 node['hugepages']['max_map_count'] = max_map_count
751 node['hugepages']['shmax'] = shmmax
752 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
753 node['hugepages']['actual_total'] = total
754 node['hugepages']['free'] = free
755 node['hugepages']['size'] = size
756 node['hugepages']['memtotal'] = memtotal
757 node['hugepages']['memfree'] = memfree
763 Get the grub configuration
767 for i in self._nodes.items():
770 vppgrb = VppGrubUtil(node)
771 current_cmdline = vppgrb.get_current_cmdline()
772 default_cmdline = vppgrb.get_default_cmdline()
774 # Get the total number of isolated CPUs
776 iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
777 iso_cpurl = len(iso_cpur)
779 iso_cpu_str = iso_cpur[0]
780 iso_cpu_str = iso_cpu_str.split('=')[1]
781 iso_cpul = iso_cpu_str.split(',')
782 for iso_cpu in iso_cpul:
783 isocpuspl = iso_cpu.split('-')
784 if len(isocpuspl) is 1:
785 current_iso_cpus += 1
787 first = int(isocpuspl[0])
788 second = int(isocpuspl[1])
790 current_iso_cpus += 1
792 current_iso_cpus += second - first
794 if 'grub' not in node:
796 node['grub']['current_cmdline'] = current_cmdline
797 node['grub']['default_cmdline'] = default_cmdline
798 node['grub']['current_iso_cpus'] = current_iso_cpus
803 def _get_device(node):
805 Get the device configuration for a single node
807 :param node: Node dictionary with cpuinfo.
812 vpp = VppPCIUtil(node)
813 vpp.get_all_devices()
815 # Save the device information
817 node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
818 node['devices']['kernel_devices'] = vpp.get_kernel_devices()
819 node['devices']['other_devices'] = vpp.get_other_devices()
820 node['devices']['linkup_devices'] = vpp.get_link_up_devices()
822 def get_devices_per_node(self):
824 Get the device configuration for all the nodes
828 for i in self._nodes.items():
830 # Update the interface data
832 self._get_device(node)
837 def get_cpu_layout(node):
841 using lscpu -p get the cpu layout.
842 Returns a list with each item representing a single cpu.
844 :param node: Node dictionary.
846 :returns: The cpu layout
851 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
853 raise RuntimeError('{} failed on node {} {}'.
854 format(cmd, node['host'], stderr))
857 lines = stdout.split('\n')
859 if line == '' or line[0] == '#':
861 linesplit = line.split(',')
862 layout = {'cpu': linesplit[0], 'core': linesplit[1],
863 'socket': linesplit[2], 'node': linesplit[3]}
865 # cpu, core, socket, node
872 Get the cpu configuration
877 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
879 for i in self._nodes.items():
883 layout = self.get_cpu_layout(node)
884 node['cpu']['layout'] = layout
886 cpuinfo = node['cpuinfo']
887 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
888 node['cpu']['smt_enabled'] = smt_enabled
890 # We don't want to write the cpuinfo
898 Get the current system configuration.
902 # Get the Huge Page configuration
905 # Get the device configuration
906 self.get_devices_per_node()
908 # Get the CPU configuration
911 # Get the current grub cmdline
914 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
916 Ask the user questions related to the cpu configuration.
918 :param node: Node dictionary
919 :param total_cpus: The total number of cpus in the system
920 :param numa_nodes: The list of numa nodes in the system
922 :type total_cpus: int
923 :type numa_nodes: list
926 print "\nYour system has {} core(s) and {} Numa Nodes.". \
927 format(total_cpus, len(numa_nodes))
928 print "To begin, we suggest not reserving any cores for VPP or other processes."
929 print "Then to improve performance start reserving cores and adding queues as needed. "
934 question = "\nHow many core(s) shall we reserve for VPP [0-{}][0]? ".format(max_vpp_cpus)
935 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
936 node['cpu']['total_vpp_cpus'] = total_vpp_cpus
938 max_other_cores = (total_cpus - total_vpp_cpus) / 2
939 question = 'How many core(s) do you want to reserve for processes other than VPP? [0-{}][0]? '. \
940 format(str(max_other_cores))
941 total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
942 node['cpu']['total_other_cpus'] = total_other_cpus
944 max_main_cpus = max_vpp_cpus + 1 - total_vpp_cpus
945 reserve_vpp_main_core = False
946 if max_main_cpus > 0:
947 question = "Should we reserve 1 core for the VPP Main thread? "
948 question += "[y/N]? "
949 answer = self._ask_user_yn(question, 'n')
951 reserve_vpp_main_core = True
952 node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
953 node['cpu']['vpp_main_core'] = 0
955 question = "How many RX queues per port shall we use for VPP [1-4][1]? ". \
957 total_rx_queues = self._ask_user_range(question, 1, 4, 1)
958 node['cpu']['total_rx_queues'] = total_rx_queues
960 def modify_cpu(self, ask_questions=True):
962 Modify the cpu configuration, asking for the user for the values.
964 :param ask_questions: When true ask the user for config parameters
969 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
971 for i in self._nodes.items():
974 total_cpus_per_slice = 0
978 cpu_layout = self.get_cpu_layout(node)
980 # Assume the number of cpus per slice is always the same as the
983 for cpu in cpu_layout:
984 if cpu['node'] != first_node:
986 total_cpus_per_slice += 1
988 # Get the total number of cpus, cores, and numa nodes from the
990 for cpul in cpu_layout:
991 numa_node = cpul['node']
996 if numa_node not in cpus_per_node:
997 cpus_per_node[numa_node] = []
998 cpuperslice = int(cpu) % total_cpus_per_slice
1000 cpus_per_node[numa_node].append((int(cpu), int(cpu) +
1001 total_cpus_per_slice - 1))
1002 if numa_node not in numa_nodes:
1003 numa_nodes.append(numa_node)
1004 if core not in cores:
1006 node['cpu']['cpus_per_node'] = cpus_per_node
1008 # Ask the user some questions
1009 if ask_questions and total_cpus >= 8:
1010 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1012 # Populate the interfaces with the numa node
1013 if 'interfaces' in node:
1014 ikeys = node['interfaces'].keys()
1015 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1017 # We don't want to write the cpuinfo
1018 node['cpuinfo'] = ""
1021 self._update_auto_config()
1024 def _modify_other_devices(self, node,
1025 other_devices, kernel_devices, dpdk_devices):
1027 Modify the devices configuration, asking for the user for the values.
1031 odevices_len = len(other_devices)
1032 if odevices_len > 0:
1033 print "\nThese device(s) are currently NOT being used",
1034 print "by VPP or the OS.\n"
1035 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1036 question = "\nWould you like to give any of these devices"
1037 question += " back to the OS [Y/n]? "
1038 answer = self._ask_user_yn(question, 'Y')
1041 for dit in other_devices.items():
1044 question = "Would you like to use device {} for". \
1046 question += " the OS [y/N]? "
1047 answer = self._ask_user_yn(question, 'n')
1049 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1050 driver = device['unused'][0]
1051 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1053 logging.debug('Could not bind device {}'.format(dvid))
1055 for dit in vppd.items():
1058 kernel_devices[dvid] = device
1059 del other_devices[dvid]
1061 odevices_len = len(other_devices)
1062 if odevices_len > 0:
1063 print "\nThese device(s) are still NOT being used ",
1064 print "by VPP or the OS.\n"
1065 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1066 question = "\nWould you like use any of these for VPP [y/N]? "
1067 answer = self._ask_user_yn(question, 'N')
1070 for dit in other_devices.items():
1073 question = "Would you like to use device {} ".format(dvid)
1074 question += "for VPP [y/N]? "
1075 answer = self._ask_user_yn(question, 'n')
1078 for dit in vppd.items():
1081 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1082 driver = device['unused'][0]
1083 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1084 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1086 logging.debug('Could not bind device {}'.format(dvid))
1087 dpdk_devices[dvid] = device
1088 del other_devices[dvid]
1090 def update_interfaces_config(self):
1092 Modify the interfaces directly from the config file.
1096 for i in self._nodes.items():
1098 devices = node['devices']
1099 all_devices = devices['other_devices']
1100 all_devices.update(devices['dpdk_devices'])
1101 all_devices.update(devices['kernel_devices'])
1105 if 'interfaces' in node:
1106 current_ifcs = node['interfaces']
1108 for ifc in current_ifcs.values():
1109 dvid = ifc['pci_address']
1110 if dvid in all_devices:
1111 VppPCIUtil.vpp_create_interface(interfaces, dvid,
1113 node['interfaces'] = interfaces
1117 def modify_devices(self):
1119 Modify the devices configuration, asking for the user for the values.
1123 for i in self._nodes.items():
1125 devices = node['devices']
1126 other_devices = devices['other_devices']
1127 kernel_devices = devices['kernel_devices']
1128 dpdk_devices = devices['dpdk_devices']
1131 self._modify_other_devices(node, other_devices,
1132 kernel_devices, dpdk_devices)
1134 # Get the devices again for this node
1135 self._get_device(node)
1136 devices = node['devices']
1137 kernel_devices = devices['kernel_devices']
1138 dpdk_devices = devices['dpdk_devices']
1140 klen = len(kernel_devices)
1142 print "\nThese devices have kernel interfaces, but",
1143 print "appear to be safe to use with VPP.\n"
1144 VppPCIUtil.show_vpp_devices(kernel_devices)
1145 question = "\nWould you like to use any of these "
1146 question += "device(s) for VPP [y/N]? "
1147 answer = self._ask_user_yn(question, 'n')
1150 for dit in kernel_devices.items():
1153 question = "Would you like to use device {} ". \
1155 question += "for VPP [y/N]? "
1156 answer = self._ask_user_yn(question, 'n')
1159 for dit in vppd.items():
1162 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1163 driver = device['unused'][0]
1164 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1165 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1167 logging.debug('Could not bind device {}'.format(dvid))
1168 dpdk_devices[dvid] = device
1169 del kernel_devices[dvid]
1171 dlen = len(dpdk_devices)
1173 print "\nThese device(s) will be used by VPP.\n"
1174 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1175 question = "\nWould you like to remove any of "
1176 question += "these device(s) [y/N]? "
1177 answer = self._ask_user_yn(question, 'n')
1180 for dit in dpdk_devices.items():
1183 question = "Would you like to remove {} [y/N]? ". \
1185 answer = self._ask_user_yn(question, 'n')
1188 for dit in vppd.items():
1191 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1192 driver = device['unused'][0]
1193 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1194 VppPCIUtil.bind_vpp_device(node, driver, dvid)
1196 logging.debug('Could not bind device {}'.format(dvid))
1197 kernel_devices[dvid] = device
1198 del dpdk_devices[dvid]
1201 for dit in dpdk_devices.items():
1204 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1205 node['interfaces'] = interfaces
1207 print "\nThese device(s) will be used by VPP, please",
1208 print "rerun this option if this is incorrect.\n"
1209 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1211 self._update_auto_config()
1214 def modify_huge_pages(self):
1216 Modify the huge page configuration, asking for the user for the values.
1220 for i in self._nodes.items():
1223 total = node['hugepages']['actual_total']
1224 free = node['hugepages']['free']
1225 size = node['hugepages']['size']
1226 memfree = node['hugepages']['memfree'].split(' ')[0]
1227 hugesize = int(size.split(' ')[0])
1228 # The max number of huge pages should be no more than
1229 # 70% of total free memory
1230 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize
1231 print "\nThere currently {} {} huge pages free.". \
1233 question = "Do you want to reconfigure the number of "
1234 question += "huge pages [y/N]? "
1235 answer = self._ask_user_yn(question, 'n')
1237 node['hugepages']['total'] = total
1240 print "\nThere currently a total of {} huge pages.". \
1242 question = "How many huge pages do you want [{} - {}][{}]? ". \
1243 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1244 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1245 node['hugepages']['total'] = str(answer)
1247 # Update auto-config.yaml
1248 self._update_auto_config()
1250 # Rediscover just the hugepages
1251 self.get_hugepages()
1253 def get_tcp_params(self):
1255 Get the tcp configuration
1258 # maybe nothing to do here?
1261 def acquire_tcp_params(self):
1263 Ask the user for TCP stack configuration parameters
1267 for i in self._nodes.items():
1270 question = "\nHow many active-open / tcp client sessions are expected "
1271 question = question + "[0-10000000][0]? "
1272 answer = self._ask_user_range(question, 0, 10000000, 0)
1273 # Less than 10K is equivalent to 0
1274 if int(answer) < 10000:
1276 node['tcp']['active_open_sessions'] = answer
1278 question = "How many passive-open / tcp server sessions are expected "
1279 question = question + "[0-10000000][0]? "
1280 answer = self._ask_user_range(question, 0, 10000000, 0)
1281 # Less than 10K is equivalent to 0
1282 if int(answer) < 10000:
1284 node['tcp']['passive_open_sessions'] = answer
1286 # Update auto-config.yaml
1287 self._update_auto_config()
1289 # Rediscover tcp parameters
1290 self.get_tcp_params()
1293 def patch_qemu(node):
1295 Patch qemu with the correct patches.
1297 :param node: Node dictionary
1301 print '\nWe are patching the node "{}":\n'.format(node['host'])
1302 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1307 print the CPU information
1311 cpu = CpuUtils.get_cpu_info_per_node(node)
1315 print "{:>20}: {}".format(item, cpu[item])
1318 print "{:>20}: {}".format(item, cpu[item])
1319 item = 'Thread(s) per core'
1321 print "{:>20}: {}".format(item, cpu[item])
1322 item = 'Core(s) per socket'
1324 print "{:>20}: {}".format(item, cpu[item])
1327 print "{:>20}: {}".format(item, cpu[item])
1328 item = 'NUMA node(s)'
1331 numa_nodes = int(cpu[item])
1332 for i in xrange(0, numa_nodes):
1333 item = "NUMA node{} CPU(s)".format(i)
1334 print "{:>20}: {}".format(item, cpu[item])
1335 item = 'CPU max MHz'
1337 print "{:>20}: {}".format(item, cpu[item])
1338 item = 'CPU min MHz'
1340 print "{:>20}: {}".format(item, cpu[item])
1342 if node['cpu']['smt_enabled']:
1346 print "{:>20}: {}".format('SMT', smt)
1349 print "\nVPP Threads: (Name: Cpu Number)"
1350 vpp_processes = cpu['vpp_processes']
1351 for i in vpp_processes.items():
1352 print " {:10}: {:4}".format(i[0], i[1])
1355 def device_info(node):
1357 Show the device information.
1361 if 'cpu' in node and 'total_mbufs' in node['cpu']:
1362 total_mbufs = node['cpu']['total_mbufs']
1363 if total_mbufs is not 0:
1364 print "Total Number of Buffers: {}".format(total_mbufs)
1366 vpp = VppPCIUtil(node)
1367 vpp.get_all_devices()
1368 linkup_devs = vpp.get_link_up_devices()
1369 if len(linkup_devs):
1370 print ("\nDevices with link up (can not be used with VPP):")
1371 vpp.show_vpp_devices(linkup_devs, show_header=False)
1372 # for dev in linkup_devs:
1374 kernel_devs = vpp.get_kernel_devices()
1375 if len(kernel_devs):
1376 print ("\nDevices bound to kernel drivers:")
1377 vpp.show_vpp_devices(kernel_devs, show_header=False)
1379 print ("\nNo devices bound to kernel drivers")
1381 dpdk_devs = vpp.get_dpdk_devices()
1383 print ("\nDevices bound to DPDK drivers:")
1384 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1387 print ("\nNo devices bound to DPDK drivers")
1390 interfaces = vpputl.get_hardware(node)
1391 if interfaces == {}:
1394 print ("\nDevices in use by VPP:")
1396 if len(interfaces.items()) < 2:
1400 print "{:30} {:6} {:4} {:7} {:4} {:7}". \
1401 format('Name', 'Socket', 'RXQs',
1402 'RXDescs', 'TXQs', 'TXDescs')
1403 for intf in sorted(interfaces.items()):
1406 if name == 'local0':
1408 socket = rx_qs = rx_ds = tx_qs = tx_ds = ''
1409 if 'cpu socket' in value:
1410 socket = int(value['cpu socket'])
1411 if 'rx queues' in value:
1412 rx_qs = int(value['rx queues'])
1413 if 'rx descs' in value:
1414 rx_ds = int(value['rx descs'])
1415 if 'tx queues' in value:
1416 tx_qs = int(value['tx queues'])
1417 if 'tx descs' in value:
1418 tx_ds = int(value['tx descs'])
1420 print ("{:30} {:>6} {:>4} {:>7} {:>4} {:>7}".
1421 format(name, socket, rx_qs, rx_ds, tx_qs, tx_ds))
1424 def hugepage_info(node):
1426 Show the huge page information.
1430 hpg = VppHugePageUtil(node)
1431 hpg.show_huge_pages()
1434 def min_system_resources(node):
1436 Check the system for basic minimum resources, return true if
1446 if 'layout' in node['cpu']:
1447 total_cpus = len(node['cpu']['layout'])
1449 print "\nThere is only {} CPU(s) available on this system.".format(total_cpus)
1450 print "This is not enough to run VPP."
1454 if 'free' in node['hugepages'] and \
1455 'memfree' in node['hugepages'] and \
1456 'size' in node['hugepages']:
1457 free = node['hugepages']['free']
1458 memfree = float(node['hugepages']['memfree'].split(' ')[0])
1459 hugesize = float(node['hugepages']['size'].split(' ')[0])
1461 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1462 percentmemhugepages = (memhugepages / memfree) * 100
1463 if free is '0' and \
1464 percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1465 print "\nThe System has only {} of free memory.".format(int(memfree))
1466 print "You will not be able to allocate enough Huge Pages for VPP."
1473 Print the system information
1477 for i in self._nodes.items():
1478 print "\n=============================="
1482 print "NODE: {}\n".format(name)
1489 print "\nGrub Command Line:"
1492 " Current: {}".format(
1493 node['grub']['current_cmdline'])
1495 " Configured: {}".format(
1496 node['grub']['default_cmdline'])
1499 print "\nHuge Pages:"
1500 self.hugepage_info(node)
1504 self.device_info(node)
1507 print "\nVPP Service Status:"
1508 state, errors = VPPUtil.status(node)
1509 print " {}".format(state)
1511 print " {}".format(e)
1513 # Minimum system resources
1514 self.min_system_resources(node)
1516 print "\n=============================="
1518 def _ipv4_interface_setup_questions(self, node):
1520 Ask the user some questions and get a list of interfaces
1521 and IPv4 addresses associated with those interfaces
1523 :param node: Node dictionary.
1525 :returns: A list or interfaces with ip addresses
1530 interfaces = vpputl.get_hardware(node)
1531 if interfaces == {}:
1534 interfaces_with_ip = []
1535 for intf in sorted(interfaces.items()):
1537 if name == 'local0':
1540 question = "Would you like add address to interface {} [Y/n]? ".format(name)
1541 answer = self._ask_user_yn(question, 'y')
1544 addr = self._ask_user_ipv4()
1545 address['name'] = name
1546 address['addr'] = addr
1547 interfaces_with_ip.append(address)
1549 return interfaces_with_ip
1551 def ipv4_interface_setup(self):
1553 After asking the user some questions, get a list of interfaces
1554 and IPv4 addresses associated with those interfaces
1558 for i in self._nodes.items():
1561 # Show the current interfaces with IP addresses
1562 current_ints = VPPUtil.get_int_ip(node)
1563 if current_ints is not {}:
1564 print ("\nThese are the current interfaces with IP addresses:")
1565 for items in sorted(current_ints.items()):
1568 if 'address' not in value:
1571 address = value['address']
1572 print ("{:30} {:20} {:10}".format(name, address, value['state']))
1573 question = "\nWould you like to keep this configuration [Y/n]? "
1574 answer = self._ask_user_yn(question, 'y')
1578 print ("\nThere are currently no interfaces with IP addresses.")
1580 # Create a script that add the ip addresses to the interfaces
1581 # and brings the interfaces up
1582 ints_with_addrs = self._ipv4_interface_setup_questions(node)
1584 for ints in ints_with_addrs:
1587 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1588 setintupstr = 'set int state {} up\n'.format(name)
1589 content += setipstr + setintupstr
1591 # Write the content to the script
1592 rootdir = node['rootdir']
1593 filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1594 with open(filename, 'w+') as sfile:
1595 sfile.write(content)
1597 # Execute the script
1598 cmd = 'vppctl exec {}'.format(filename)
1599 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1601 logging.debug(stderr)
1603 print("\nA script as been created at {}".format(filename))
1604 print("This script can be run using the following:")
1605 print("vppctl exec {}\n".format(filename))
1607 def _create_vints_questions(self, node):
1609 Ask the user some questions and get a list of interfaces
1610 and IPv4 addresses associated with those interfaces
1612 :param node: Node dictionary.
1614 :returns: A list or interfaces with ip addresses
1619 interfaces = vpputl.get_hardware(node)
1620 if interfaces == {}:
1623 # First delete all the Virtual interfaces
1624 for intf in sorted(interfaces.items()):
1626 if name[:7] == 'Virtual':
1627 cmd = 'vppctl delete vhost-user {}'.format(name)
1628 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1630 logging.debug('{} failed on node {} {}'.format(
1631 cmd, node['host'], stderr))
1633 # Create a virtual interface, for each interface the user wants to use
1634 interfaces = vpputl.get_hardware(node)
1635 if interfaces == {}:
1637 interfaces_with_virtual_interfaces = []
1639 for intf in sorted(interfaces.items()):
1641 if name == 'local0':
1644 question = "Would you like connect this interface {} to the VM [Y/n]? ".format(name)
1645 answer = self._ask_user_yn(question, 'y')
1647 sockfilename = '/var/run/vpp/{}.sock'.format(name.replace('/', '_'))
1648 if os.path.exists(sockfilename):
1649 os.remove(sockfilename)
1650 cmd = 'vppctl create vhost-user socket {} server'.format(sockfilename)
1651 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1653 raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1654 vintname = stdout.rstrip('\r\n')
1656 cmd = 'chmod 777 {}'.format(sockfilename)
1657 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1659 raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1661 interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1662 'bridge': '{}'.format(inum)}
1664 interfaces_with_virtual_interfaces.append(interface)
1666 return interfaces_with_virtual_interfaces
1668 def create_and_bridge_virtual_interfaces(self):
1670 After asking the user some questions, create a VM and connect the interfaces
1675 for i in self._nodes.items():
1678 # Show the current bridge and interface configuration
1679 print "\nThis the current bridge configuration:"
1680 VPPUtil.show_bridge(node)
1681 question = "\nWould you like to keep this configuration [Y/n]? "
1682 answer = self._ask_user_yn(question, 'y')
1686 # Create a script that builds a bridge configuration with physical interfaces
1687 # and virtual interfaces
1688 ints_with_vints = self._create_vints_questions(node)
1690 for intf in ints_with_vints:
1691 vhoststr = 'comment { The following command creates the socket }\n'
1692 vhoststr += 'comment { and returns a virtual interface }\n'
1693 vhoststr += 'comment {{ create vhost-user socket /var/run/vpp/sock{}.sock server }}\n'. \
1694 format(intf['bridge'])
1696 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1698 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1699 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1701 # set interface state VirtualEthernet/0/0/0 up
1702 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1704 # set interface state VirtualEthernet/0/0/0 down
1705 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1707 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1709 # Write the content to the script
1710 rootdir = node['rootdir']
1711 filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1712 with open(filename, 'w+') as sfile:
1713 sfile.write(content)
1715 # Execute the script
1716 cmd = 'vppctl exec {}'.format(filename)
1717 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1719 logging.debug(stderr)
1721 print("\nA script as been created at {}".format(filename))
1722 print("This script can be run using the following:")
1723 print("vppctl exec {}\n".format(filename))
1725 def _iperf_vm_questions(self, node):
1727 Ask the user some questions and get a list of interfaces
1728 and IPv4 addresses associated with those interfaces
1730 :param node: Node dictionary.
1732 :returns: A list or interfaces with ip addresses
1737 interfaces = vpputl.get_hardware(node)
1738 if interfaces == {}:
1741 # First delete all the Virtual interfaces
1742 for intf in sorted(interfaces.items()):
1744 if name[:7] == 'Virtual':
1745 cmd = 'vppctl delete vhost-user {}'.format(name)
1746 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1748 logging.debug('{} failed on node {} {}'.format(
1749 cmd, node['host'], stderr))
1751 # Create a virtual interface, for each interface the user wants to use
1752 interfaces = vpputl.get_hardware(node)
1753 if interfaces == {}:
1755 interfaces_with_virtual_interfaces = []
1759 print '\nPlease pick one interface to connect to the iperf VM.'
1760 for intf in sorted(interfaces.items()):
1762 if name == 'local0':
1765 question = "Would you like connect this interface {} to the VM [y/N]? ".format(name)
1766 answer = self._ask_user_yn(question, 'n')
1768 self._sockfilename = '/var/run/vpp/{}.sock'.format(name.replace('/', '_'))
1769 if os.path.exists(self._sockfilename):
1770 os.remove(self._sockfilename)
1771 cmd = 'vppctl create vhost-user socket {} server'.format(self._sockfilename)
1772 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1774 raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1775 vintname = stdout.rstrip('\r\n')
1777 cmd = 'chmod 777 {}'.format(self._sockfilename)
1778 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1780 raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1782 interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1783 'bridge': '{}'.format(inum)}
1785 interfaces_with_virtual_interfaces.append(interface)
1786 return interfaces_with_virtual_interfaces
1788 def create_and_bridge_iperf_virtual_interface(self):
1790 After asking the user some questions, and create and bridge a virtual interface
1791 to be used with iperf VM
1795 for i in self._nodes.items():
1798 # Show the current bridge and interface configuration
1799 print "\nThis the current bridge configuration:"
1800 ifaces = VPPUtil.show_bridge(node)
1801 question = "\nWould you like to keep this configuration [Y/n]? "
1802 answer = self._ask_user_yn(question, 'y')
1804 self._sockfilename = '/var/run/vpp/{}.sock'.format(ifaces[0]['name'].replace('/', '_'))
1805 if os.path.exists(self._sockfilename):
1808 # Create a script that builds a bridge configuration with physical interfaces
1809 # and virtual interfaces
1810 ints_with_vints = self._iperf_vm_questions(node)
1812 for intf in ints_with_vints:
1813 vhoststr = 'comment { The following command creates the socket }\n'
1814 vhoststr += 'comment { and returns a virtual interface }\n'
1815 vhoststr += 'comment {{ create vhost-user socket /var/run/vpp/sock{}.sock server }}\n'. \
1816 format(intf['bridge'])
1818 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1820 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1821 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1823 # set interface state VirtualEthernet/0/0/0 up
1824 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1826 # set interface state VirtualEthernet/0/0/0 down
1827 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1829 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1831 # Write the content to the script
1832 rootdir = node['rootdir']
1833 filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
1834 with open(filename, 'w+') as sfile:
1835 sfile.write(content)
1837 # Execute the script
1838 cmd = 'vppctl exec {}'.format(filename)
1839 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1841 logging.debug(stderr)
1843 print("\nA script as been created at {}".format(filename))
1844 print("This script can be run using the following:")
1845 print("vppctl exec {}\n".format(filename))
1848 def destroy_iperf_vm(name):
1850 After asking the user some questions, create a VM and connect the interfaces
1853 :param name: The name of the VM to be be destroyed
1858 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1860 logging.debug(stderr)
1861 raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))
1863 if re.findall(name, stdout):
1864 cmd = 'virsh destroy {}'.format(name)
1865 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1867 logging.debug(stderr)
1868 raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))
1870 def create_iperf_vm(self, vmname):
1872 After asking the user some questions, create a VM and connect the interfaces
1877 # Read the iperf VM template file
1878 distro = VPPUtil.get_linux_distro()
1879 if distro[0] == 'Ubuntu':
1880 tfilename = '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(self._rootdir)
1882 tfilename = '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(self._rootdir)
1884 with open(tfilename, 'r') as tfile:
1885 tcontents = tfile.read()
1889 imagename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_IMAGE)
1890 isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
1891 tcontents = tcontents.format(vmname=vmname, imagename=imagename, isoname=isoname,
1892 vhostsocketname=self._sockfilename)
1895 ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
1896 with open(ifilename, 'w+') as ifile:
1897 ifile.write(tcontents)
1900 cmd = 'virsh create {}'.format(ifilename)
1901 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1903 logging.debug(stderr)
1904 raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))