1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
20 from netaddr import IPAddress
22 from vpplib.VPPUtil import VPPUtil
23 from vpplib.VppPCIUtil import VppPCIUtil
24 from vpplib.VppHugePageUtil import VppHugePageUtil
25 from vpplib.CpuUtils import CpuUtils
26 from vpplib.VppGrubUtil import VppGrubUtil
27 from vpplib.QemuUtils import QemuUtils
29 __all__ = ["AutoConfig"]
33 MIN_TOTAL_HUGE_PAGES = 1024
34 MAX_PERCENT_FOR_HUGE_PAGES = 70
36 IPERFVM_XML = 'configs/iperf-vm.xml'
37 IPERFVM_IMAGE = 'images/xenial-mod.img'
38 IPERFVM_ISO = 'configs/cloud-config.iso'
41 class AutoConfig(object):
42 """Auto Configuration Tools"""
44 def __init__(self, rootdir, filename, clean=False):
46 The Auto Configure class.
48 :param rootdir: The root directory for all the auto configuration files
49 :param filename: The autoconfiguration file
50 :param clean: When set initialize the nodes from the auto-config file
55 self._autoconfig_filename = rootdir + filename
56 self._rootdir = rootdir
59 self._vpp_devices_node = {}
60 self._hugepage_config = ""
63 self._sockfilename = ""
67 Returns the nodes dictionary.
76 def _autoconfig_backup_file(filename):
80 :param filename: The file to backup
84 # Does a copy of the file exist, if not create one
85 ofile = filename + '.orig'
86 (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
89 if stdout.strip('\n') != ofile:
90 cmd = 'sudo cp {} {}'.format(filename, ofile)
91 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
95 # noinspection PyBroadException
99 Asks the user for a number within a range.
100 default is returned if return is entered.
102 :returns: IP address with cidr
107 answer = raw_input("Please enter the IPv4 Address [n.n.n.n/n]: ")
109 ipinput = answer.split('/')
110 ipaddr = IPAddress(ipinput[0])
112 plen = answer.split('/')[1]
114 answer = raw_input("Please enter the netmask [n.n.n.n]: ")
115 plen = IPAddress(answer).netmask_bits()
116 return '{}/{}'.format(ipaddr, plen)
118 print "Please enter a valid IPv4 address."
121 def _ask_user_range(question, first, last, default):
123 Asks the user for a number within a range.
124 default is returned if return is entered.
126 :param question: Text of a question.
127 :param first: First number in the range
128 :param last: Last number in the range
129 :param default: The value returned when return is entered
130 :type question: string
134 :returns: The answer to the question
139 answer = raw_input(question)
143 if re.findall(r'[0-9+]', answer):
144 if int(answer) in range(first, last + 1):
147 print "Please a value between {} and {} or Return.". \
150 print "Please a number between {} and {} or Return.". \
156 def _ask_user_yn(question, default):
158 Asks the user for a yes or no question.
160 :param question: Text of a question.
161 :param default: The value returned when return is entered
162 :type question: string
163 :type default: string
164 :returns: The answer to the question
169 default = default.lower()
171 while not input_valid:
172 answer = raw_input(question)
175 if re.findall(r'[YyNn]', answer):
177 answer = answer[0].lower()
179 print "Please answer Y, N or Return."
183 def _loadconfig(self):
185 Load the testbed configuration, given the auto configuration file.
189 # Get the Topology, from the topology layout file
191 with open(self._autoconfig_filename, 'r') as stream:
193 topo = yaml.load(stream)
194 if 'metadata' in topo:
195 self._metadata = topo['metadata']
196 except yaml.YAMLError as exc:
197 raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc))
199 systemfile = self._rootdir + self._metadata['system_config_file']
200 if self._clean is False and os.path.isfile(systemfile):
201 with open(systemfile, 'r') as sysstream:
203 systopo = yaml.load(sysstream)
204 if 'nodes' in systopo:
205 self._nodes = systopo['nodes']
206 except yaml.YAMLError as sysexc:
207 raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc))
209 # Get the nodes from Auto Config
211 self._nodes = topo['nodes']
213 # Set the root directory in all the nodes
214 for i in self._nodes.items():
216 node['rootdir'] = self._rootdir
218 def updateconfig(self):
220 Update the testbed configuration, given the auto configuration file.
221 We will write the system configuration file with the current node
226 # Initialize the yaml data
227 ydata = {'metadata': self._metadata, 'nodes': self._nodes}
229 # Write the system config file
230 filename = self._rootdir + self._metadata['system_config_file']
231 with open(filename, 'w') as yamlfile:
232 yaml.dump(ydata, yamlfile)
234 def _update_auto_config(self):
236 Write the auto configuration file with the new configuration data,
241 # Initialize the yaml data
243 with open(self._autoconfig_filename, 'r') as stream:
245 ydata = yaml.load(stream)
247 nodes = ydata['nodes']
248 except yaml.YAMLError as exc:
252 for i in nodes.items():
257 node['interfaces'] = {}
258 for item in self._nodes[key]['interfaces'].items():
262 node['interfaces'][port] = {}
263 addr = '{}'.format(interface['pci_address'])
264 node['interfaces'][port]['pci_address'] = addr
265 if 'mac_address' in interface:
266 node['interfaces'][port]['mac_address'] = \
267 interface['mac_address']
269 if 'total_other_cpus' in self._nodes[key]['cpu']:
270 node['cpu']['total_other_cpus'] = \
271 self._nodes[key]['cpu']['total_other_cpus']
272 if 'total_vpp_cpus' in self._nodes[key]['cpu']:
273 node['cpu']['total_vpp_cpus'] = \
274 self._nodes[key]['cpu']['total_vpp_cpus']
275 if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
276 node['cpu']['reserve_vpp_main_core'] = \
277 self._nodes[key]['cpu']['reserve_vpp_main_core']
280 if 'active_open_sessions' in self._nodes[key]['tcp']:
281 node['tcp']['active_open_sessions'] = \
282 self._nodes[key]['tcp']['active_open_sessions']
283 if 'passive_open_sessions' in self._nodes[key]['tcp']:
284 node['tcp']['passive_open_sessions'] = \
285 self._nodes[key]['tcp']['passive_open_sessions']
288 node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
290 # Write the auto config config file
291 with open(self._autoconfig_filename, 'w') as yamlfile:
292 yaml.dump(ydata, yamlfile)
294 def apply_huge_pages(self):
296 Apply the huge page config
300 for i in self._nodes.items():
303 hpg = VppHugePageUtil(node)
304 hpg.hugepages_dryrun_apply()
307 def _apply_vpp_unix(node):
309 Apply the VPP Unix config
311 :param node: Node dictionary with cpuinfo.
316 if 'unix' not in node['vpp']:
319 unixv = node['vpp']['unix']
320 if 'interactive' in unixv:
321 interactive = unixv['interactive']
322 if interactive is True:
323 unix = ' interactive\n'
325 return unix.rstrip('\n')
328 def _apply_vpp_cpu(node):
330 Apply the VPP cpu config
332 :param node: Node dictionary with cpuinfo.
338 if 'vpp_main_core' in node['cpu']:
339 vpp_main_core = node['cpu']['vpp_main_core']
342 if vpp_main_core is not 0:
343 cpu += ' main-core {}\n'.format(vpp_main_core)
346 vpp_workers = node['cpu']['vpp_workers']
347 vpp_worker_len = len(vpp_workers)
348 if vpp_worker_len > 0:
350 for i, worker in enumerate(vpp_workers):
352 vpp_worker_str += ','
353 if worker[0] == worker[1]:
354 vpp_worker_str += "{}".format(worker[0])
356 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
358 cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
363 def _apply_vpp_devices(node):
365 Apply VPP PCI Device configuration to vpp startup.
367 :param node: Node dictionary with cpuinfo.
372 ports_per_numa = node['cpu']['ports_per_numa']
373 total_mbufs = node['cpu']['total_mbufs']
375 for item in ports_per_numa.items():
377 interfaces = value['interfaces']
379 # if 0 was specified for the number of vpp workers, use 1 queue
382 if 'rx_queues' in value:
383 num_rx_queues = value['rx_queues']
384 if 'tx_queues' in value:
385 num_tx_queues = value['tx_queues']
390 # Create the devices string
391 for interface in interfaces:
392 pci_address = interface['pci_address']
393 pci_address = pci_address.lstrip("'").rstrip("'")
395 devices += ' dev {} {{ \n'.format(pci_address)
397 devices += ' num-rx-queues {}\n'.format(num_rx_queues)
399 devices += ' num-rx-queues {}\n'.format(1)
401 devices += ' num-tx-queues {}\n'.format(num_tx_queues)
403 devices += ' num-rx-desc {}\n'.format(num_rx_desc)
405 devices += ' num-tx-desc {}\n'.format(num_tx_desc)
408 # If the total mbufs is not 0 or less than the default, set num-bufs
409 logging.debug("Total mbufs: {}".format(total_mbufs))
410 if total_mbufs is not 0 and total_mbufs > 16384:
411 devices += '\n num-mbufs {}'.format(total_mbufs)
416 def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end, total_vpp_workers,
417 reserve_vpp_main_core):
419 Calculate the VPP worker information
421 :param node: Node dictionary
422 :param vpp_workers: List of VPP workers
423 :param numa_node: Numa node
424 :param other_cpus_end: The end of the cpus allocated for cores
426 :param total_vpp_workers: The number of vpp workers needed
427 :param reserve_vpp_main_core: Is there a core needed for
431 :type other_cpus_end: int
432 :type total_vpp_workers: int
433 :type reserve_vpp_main_core: bool
434 :returns: Is a core still needed for the vpp main core
438 # Can we fit the workers in one of these slices
439 cpus = node['cpu']['cpus_per_node'][numa_node]
443 if start <= other_cpus_end:
444 start = other_cpus_end + 1
446 if reserve_vpp_main_core:
449 workers_end = start + total_vpp_workers - 1
451 if workers_end <= end:
452 if reserve_vpp_main_core:
453 node['cpu']['vpp_main_core'] = start - 1
454 reserve_vpp_main_core = False
455 if total_vpp_workers:
456 vpp_workers.append((start, workers_end))
459 # We still need to reserve the main core
460 if reserve_vpp_main_core:
461 node['cpu']['vpp_main_core'] = other_cpus_end + 1
463 return reserve_vpp_main_core
466 def _calc_desc_and_queues(total_numa_nodes,
467 total_ports_per_numa,
469 ports_per_numa_value):
471 Calculate the number of descriptors and queues
473 :param total_numa_nodes: The total number of numa nodes
474 :param total_ports_per_numa: The total number of ports for this
476 :param total_rx_queues: The total number of rx queues / port
477 :param ports_per_numa_value: The value from the ports_per_numa
479 :type total_numa_nodes: int
480 :type total_ports_per_numa: int
481 :type total_rx_queues: int
482 :type ports_per_numa_value: dict
483 :returns The total number of message buffers
487 # Get the number of rx queues
488 rx_queues = max(1, total_rx_queues)
489 tx_queues = rx_queues * total_numa_nodes + 1
491 # Get the descriptor entries
493 ports_per_numa_value['rx_queues'] = rx_queues
494 total_mbufs = (((rx_queues * desc_entries) +
495 (tx_queues * desc_entries)) *
496 total_ports_per_numa)
497 total_mbufs = total_mbufs
502 def _create_ports_per_numa(node, interfaces):
504 Create a dictionary or ports per numa node
505 :param node: Node dictionary
506 :param interfaces: All the interfaces to be used by vpp
508 :type interfaces: dict
509 :returns: The ports per numa dictionary
513 # Make a list of ports by numa node
515 for item in interfaces.items():
517 if i['numa_node'] not in ports_per_numa:
518 ports_per_numa[i['numa_node']] = {'interfaces': []}
519 ports_per_numa[i['numa_node']]['interfaces'].append(i)
521 ports_per_numa[i['numa_node']]['interfaces'].append(i)
522 node['cpu']['ports_per_numa'] = ports_per_numa
524 return ports_per_numa
526 def calculate_cpu_parameters(self):
528 Calculate the cpu configuration.
532 # Calculate the cpu parameters, needed for the
533 # vpp_startup and grub configuration
534 for i in self._nodes.items():
537 # get total number of nic ports
538 interfaces = node['interfaces']
540 # Make a list of ports by numa node
541 ports_per_numa = self._create_ports_per_numa(node, interfaces)
543 # Get the number of cpus to skip, we never use the first cpu
545 other_cpus_end = other_cpus_start + node['cpu']['total_other_cpus'] - 1
547 if other_cpus_end is not 0:
548 other_workers = (other_cpus_start, other_cpus_end)
549 node['cpu']['other_workers'] = other_workers
551 # Allocate the VPP main core and workers
553 reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
554 total_vpp_cpus = node['cpu']['total_vpp_cpus']
555 total_rx_queues = node['cpu']['total_rx_queues']
557 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
558 # then we shouldn't get workers
559 total_workers_node = 0
560 if len(ports_per_numa):
561 total_workers_node = total_vpp_cpus / len(ports_per_numa)
563 if reserve_vpp_main_core:
566 if total_main + total_workers_node is not 0:
567 for item in ports_per_numa.items():
571 # Get the number of descriptors and queues
572 mbufs = self._calc_desc_and_queues(len(ports_per_numa),
573 len(value['interfaces']), total_rx_queues, value)
576 # Get the VPP workers
577 reserve_vpp_main_core = self._calc_vpp_workers(node, vpp_workers, numa_node,
578 other_cpus_end, total_workers_node,
579 reserve_vpp_main_core)
582 total_mbufs = int(total_mbufs)
587 node['cpu']['vpp_workers'] = vpp_workers
588 node['cpu']['total_mbufs'] = total_mbufs
594 def _apply_vpp_tcp(node):
596 Apply the VPP Unix config
598 :param node: Node dictionary with cpuinfo.
602 active_open_sessions = node['tcp']['active_open_sessions']
603 aos = int(active_open_sessions)
605 passive_open_sessions = node['tcp']['passive_open_sessions']
606 pos = int(passive_open_sessions)
608 # Generate the api-segment gid vpp sheit in any case
610 tcp = "api-segment {\n"
611 tcp = tcp + " gid vpp\n"
613 return tcp.rstrip('\n')
615 tcp = "# TCP stack-related configuration parameters\n"
616 tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos)
617 tcp = tcp + "heapsize 4g\n\n"
618 tcp = tcp + "api-segment {\n"
619 tcp = tcp + " global-size 2000M\n"
620 tcp = tcp + " api-size 1G\n"
623 tcp = tcp + "session {\n"
624 tcp = tcp + " event-queue-length " + "{:d}".format(aos + pos) + "\n"
625 tcp = tcp + " preallocated-sessions " + "{:d}".format(aos + pos) + "\n"
626 tcp = tcp + " v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
627 tcp = tcp + " v4-session-table-memory 3g\n"
629 tcp = tcp + " v4-halfopen-table-buckets " + \
630 "{:d}".format((aos + pos) / 4) + "\n"
631 tcp = tcp + " v4-halfopen-table-memory 3g\n"
632 tcp = tcp + " local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
633 tcp = tcp + " local-endpoints-table-memory 3g\n"
636 tcp = tcp + "tcp {\n"
637 tcp = tcp + " preallocated-connections " + "{:d}".format(aos + pos) + "\n"
639 tcp = tcp + " preallocated-half-open-connections " + "{:d}".format(aos) + "\n"
642 return tcp.rstrip('\n')
644 def apply_vpp_startup(self):
646 Apply the vpp startup configration
650 # Apply the VPP startup configruation
651 for i in self._nodes.items():
654 # Get the startup file
655 rootdir = node['rootdir']
656 sfile = rootdir + node['vpp']['startup_config_file']
659 devices = self._apply_vpp_devices(node)
662 cpu = self._apply_vpp_cpu(node)
664 # Get the unix config
665 unix = self._apply_vpp_unix(node)
667 # Get the TCP configuration, if any
668 tcp = self._apply_vpp_tcp(node)
670 # Make a backup if needed
671 self._autoconfig_backup_file(sfile)
674 tfile = sfile + '.template'
675 (ret, stdout, stderr) = \
676 VPPUtil.exec_command('cat {}'.format(tfile))
678 raise RuntimeError('Executing cat command failed to node {}'.
679 format(node['host']))
680 startup = stdout.format(unix=unix,
685 (ret, stdout, stderr) = \
686 VPPUtil.exec_command('rm {}'.format(sfile))
688 logging.debug(stderr)
690 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
691 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
693 raise RuntimeError('Writing config failed node {}'.
694 format(node['host']))
696 def apply_grub_cmdline(self):
698 Apply the grub cmdline
702 for i in self._nodes.items():
705 # Get the isolated CPUs
706 other_workers = node['cpu']['other_workers']
707 vpp_workers = node['cpu']['vpp_workers']
708 if 'vpp_main_core' in node['cpu']:
709 vpp_main_core = node['cpu']['vpp_main_core']
713 if other_workers is not None:
714 all_workers = [other_workers]
715 if vpp_main_core is not 0:
716 all_workers += [(vpp_main_core, vpp_main_core)]
717 all_workers += vpp_workers
719 for idx, worker in enumerate(all_workers):
724 if worker[0] == worker[1]:
725 isolated_cpus += "{}".format(worker[0])
727 isolated_cpus += "{}-{}".format(worker[0], worker[1])
729 vppgrb = VppGrubUtil(node)
730 current_cmdline = vppgrb.get_current_cmdline()
731 if 'grub' not in node:
733 node['grub']['current_cmdline'] = current_cmdline
734 node['grub']['default_cmdline'] = \
735 vppgrb.apply_cmdline(node, isolated_cpus)
739 def get_hugepages(self):
741 Get the hugepage configuration
745 for i in self._nodes.items():
748 hpg = VppHugePageUtil(node)
749 max_map_count, shmmax = hpg.get_huge_page_config()
750 node['hugepages']['max_map_count'] = max_map_count
751 node['hugepages']['shmax'] = shmmax
752 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
753 node['hugepages']['actual_total'] = total
754 node['hugepages']['free'] = free
755 node['hugepages']['size'] = size
756 node['hugepages']['memtotal'] = memtotal
757 node['hugepages']['memfree'] = memfree
763 Get the grub configuration
767 for i in self._nodes.items():
770 vppgrb = VppGrubUtil(node)
771 current_cmdline = vppgrb.get_current_cmdline()
772 default_cmdline = vppgrb.get_default_cmdline()
774 # Get the total number of isolated CPUs
776 iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
777 iso_cpurl = len(iso_cpur)
779 iso_cpu_str = iso_cpur[0]
780 iso_cpu_str = iso_cpu_str.split('=')[1]
781 iso_cpul = iso_cpu_str.split(',')
782 for iso_cpu in iso_cpul:
783 isocpuspl = iso_cpu.split('-')
784 if len(isocpuspl) is 1:
785 current_iso_cpus += 1
787 first = int(isocpuspl[0])
788 second = int(isocpuspl[1])
790 current_iso_cpus += 1
792 current_iso_cpus += second - first
794 if 'grub' not in node:
796 node['grub']['current_cmdline'] = current_cmdline
797 node['grub']['default_cmdline'] = default_cmdline
798 node['grub']['current_iso_cpus'] = current_iso_cpus
803 def _get_device(node):
805 Get the device configuration for a single node
807 :param node: Node dictionary with cpuinfo.
812 vpp = VppPCIUtil(node)
813 vpp.get_all_devices()
815 # Save the device information
817 node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
818 node['devices']['kernel_devices'] = vpp.get_kernel_devices()
819 node['devices']['other_devices'] = vpp.get_other_devices()
820 node['devices']['linkup_devices'] = vpp.get_link_up_devices()
822 def get_devices_per_node(self):
824 Get the device configuration for all the nodes
828 for i in self._nodes.items():
830 # Update the interface data
832 self._get_device(node)
837 def get_cpu_layout(node):
841 using lscpu -p get the cpu layout.
842 Returns a list with each item representing a single cpu.
844 :param node: Node dictionary.
846 :returns: The cpu layout
851 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
853 raise RuntimeError('{} failed on node {} {}'.
854 format(cmd, node['host'], stderr))
857 lines = stdout.split('\n')
859 if line == '' or line[0] == '#':
861 linesplit = line.split(',')
862 layout = {'cpu': linesplit[0], 'core': linesplit[1],
863 'socket': linesplit[2], 'node': linesplit[3]}
865 # cpu, core, socket, node
872 Get the cpu configuration
877 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
879 for i in self._nodes.items():
883 layout = self.get_cpu_layout(node)
884 node['cpu']['layout'] = layout
886 cpuinfo = node['cpuinfo']
887 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
888 node['cpu']['smt_enabled'] = smt_enabled
890 # We don't want to write the cpuinfo
898 Get the current system configuration.
902 # Get the Huge Page configuration
905 # Get the device configuration
906 self.get_devices_per_node()
908 # Get the CPU configuration
911 # Get the current grub cmdline
914 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
916 Ask the user questions related to the cpu configuration.
918 :param node: Node dictionary
919 :param total_cpus: The total number of cpus in the system
920 :param numa_nodes: The list of numa nodes in the system
922 :type total_cpus: int
923 :type numa_nodes: list
926 print "\nYour system has {} core(s) and {} Numa Nodes.". \
927 format(total_cpus, len(numa_nodes))
928 print "To begin, we suggest not reserving any cores for VPP or other processes."
929 print "Then to improve performance start reserving cores and adding queues as needed. "
934 question = "\nHow many core(s) shall we reserve for VPP [0-{}][0]? ".format(max_vpp_cpus)
935 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
936 node['cpu']['total_vpp_cpus'] = total_vpp_cpus
938 max_other_cores = (total_cpus - total_vpp_cpus) / 2
939 question = 'How many core(s) do you want to reserve for processes other than VPP? [0-{}][0]? '. \
940 format(str(max_other_cores))
941 total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
942 node['cpu']['total_other_cpus'] = total_other_cpus
944 max_main_cpus = max_vpp_cpus + 1 - total_vpp_cpus
945 reserve_vpp_main_core = False
946 if max_main_cpus > 0:
947 question = "Should we reserve 1 core for the VPP Main thread? "
948 question += "[y/N]? "
949 answer = self._ask_user_yn(question, 'n')
951 reserve_vpp_main_core = True
952 node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
953 node['cpu']['vpp_main_core'] = 0
955 question = "How many RX queues per port shall we use for VPP [1-4][1]? ". \
957 total_rx_queues = self._ask_user_range(question, 1, 4, 1)
958 node['cpu']['total_rx_queues'] = total_rx_queues
960 def modify_cpu(self, ask_questions=True):
962 Modify the cpu configuration, asking for the user for the values.
964 :param ask_questions: When true ask the user for config parameters
969 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
971 for i in self._nodes.items():
974 total_cpus_per_slice = 0
978 cpu_layout = self.get_cpu_layout(node)
980 # Assume the number of cpus per slice is always the same as the
983 for cpu in cpu_layout:
984 if cpu['node'] != first_node:
986 total_cpus_per_slice += 1
988 # Get the total number of cpus, cores, and numa nodes from the
990 for cpul in cpu_layout:
991 numa_node = cpul['node']
996 if numa_node not in cpus_per_node:
997 cpus_per_node[numa_node] = []
998 cpuperslice = int(cpu) % total_cpus_per_slice
1000 cpus_per_node[numa_node].append((int(cpu), int(cpu) +
1001 total_cpus_per_slice - 1))
1002 if numa_node not in numa_nodes:
1003 numa_nodes.append(numa_node)
1004 if core not in cores:
1006 node['cpu']['cpus_per_node'] = cpus_per_node
1008 # Ask the user some questions
1009 if ask_questions and total_cpus >= 8:
1010 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1012 # Populate the interfaces with the numa node
1013 if 'interfaces' in node:
1014 ikeys = node['interfaces'].keys()
1015 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1017 # We don't want to write the cpuinfo
1018 node['cpuinfo'] = ""
1021 self._update_auto_config()
1024 def _modify_other_devices(self, node,
1025 other_devices, kernel_devices, dpdk_devices):
1027 Modify the devices configuration, asking for the user for the values.
1031 odevices_len = len(other_devices)
1032 if odevices_len > 0:
1033 print "\nThese device(s) are currently NOT being used",
1034 print "by VPP or the OS.\n"
1035 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1036 question = "\nWould you like to give any of these devices"
1037 question += " back to the OS [Y/n]? "
1038 answer = self._ask_user_yn(question, 'Y')
1041 for dit in other_devices.items():
1044 question = "Would you like to use device {} for". \
1046 question += " the OS [y/N]? "
1047 answer = self._ask_user_yn(question, 'n')
1049 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1050 driver = device['unused'][0]
1051 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1053 logging.debug('Could not bind device {}'.format(dvid))
1056 for dit in vppd.items():
1059 kernel_devices[dvid] = device
1060 del other_devices[dvid]
1062 odevices_len = len(other_devices)
1063 if odevices_len > 0:
1064 print "\nThese device(s) are still NOT being used ",
1065 print "by VPP or the OS.\n"
1066 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1067 question = "\nWould you like use any of these for VPP [y/N]? "
1068 answer = self._ask_user_yn(question, 'N')
1071 for dit in other_devices.items():
1074 question = "Would you like to use device {} ".format(dvid)
1075 question += "for VPP [y/N]? "
1076 answer = self._ask_user_yn(question, 'n')
1079 for dit in vppd.items():
1082 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1083 driver = device['unused'][0]
1084 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1085 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1087 logging.debug('Could not bind device {}'.format(dvid))
1089 dpdk_devices[dvid] = device
1090 del other_devices[dvid]
1092 def update_interfaces_config(self):
1094 Modify the interfaces directly from the config file.
1098 for i in self._nodes.items():
1100 devices = node['devices']
1101 all_devices = devices['other_devices']
1102 all_devices.update(devices['dpdk_devices'])
1103 all_devices.update(devices['kernel_devices'])
1107 if 'interfaces' in node:
1108 current_ifcs = node['interfaces']
1110 for ifc in current_ifcs.values():
1111 dvid = ifc['pci_address']
1112 if dvid in all_devices:
1113 VppPCIUtil.vpp_create_interface(interfaces, dvid,
1115 node['interfaces'] = interfaces
1119 def modify_devices(self):
1121 Modify the devices configuration, asking for the user for the values.
1125 for i in self._nodes.items():
1127 devices = node['devices']
1128 other_devices = devices['other_devices']
1129 kernel_devices = devices['kernel_devices']
1130 dpdk_devices = devices['dpdk_devices']
1133 self._modify_other_devices(node, other_devices,
1134 kernel_devices, dpdk_devices)
1136 # Get the devices again for this node
1137 self._get_device(node)
1138 devices = node['devices']
1139 kernel_devices = devices['kernel_devices']
1140 dpdk_devices = devices['dpdk_devices']
1142 klen = len(kernel_devices)
1144 print "\nThese devices have kernel interfaces, but",
1145 print "appear to be safe to use with VPP.\n"
1146 VppPCIUtil.show_vpp_devices(kernel_devices)
1147 question = "\nWould you like to use any of these "
1148 question += "device(s) for VPP [y/N]? "
1149 answer = self._ask_user_yn(question, 'n')
1152 for dit in kernel_devices.items():
1155 question = "Would you like to use device {} ". \
1157 question += "for VPP [y/N]? "
1158 answer = self._ask_user_yn(question, 'n')
1161 for dit in vppd.items():
1164 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1165 driver = device['unused'][0]
1166 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1167 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1169 logging.debug('Could not bind device {}'.format(dvid))
1171 dpdk_devices[dvid] = device
1172 del kernel_devices[dvid]
1174 dlen = len(dpdk_devices)
1176 print "\nThese device(s) will be used by VPP.\n"
1177 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1178 question = "\nWould you like to remove any of "
1179 question += "these device(s) [y/N]? "
1180 answer = self._ask_user_yn(question, 'n')
1183 for dit in dpdk_devices.items():
1186 question = "Would you like to remove {} [y/N]? ". \
1188 answer = self._ask_user_yn(question, 'n')
1191 for dit in vppd.items():
1194 if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1195 driver = device['unused'][0]
1196 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1197 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1199 logging.debug('Could not bind device {}'.format(dvid))
1201 kernel_devices[dvid] = device
1202 del dpdk_devices[dvid]
1205 for dit in dpdk_devices.items():
1208 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1209 node['interfaces'] = interfaces
1211 print "\nThese device(s) will be used by VPP, please",
1212 print "rerun this option if this is incorrect.\n"
1213 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1215 self._update_auto_config()
1218 def modify_huge_pages(self):
1220 Modify the huge page configuration, asking for the user for the values.
1224 for i in self._nodes.items():
1227 total = node['hugepages']['actual_total']
1228 free = node['hugepages']['free']
1229 size = node['hugepages']['size']
1230 memfree = node['hugepages']['memfree'].split(' ')[0]
1231 hugesize = int(size.split(' ')[0])
1232 # The max number of huge pages should be no more than
1233 # 70% of total free memory
1234 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize
1235 print "\nThere currently {} {} huge pages free.". \
1237 question = "Do you want to reconfigure the number of "
1238 question += "huge pages [y/N]? "
1239 answer = self._ask_user_yn(question, 'n')
1241 node['hugepages']['total'] = total
1244 print "\nThere currently a total of {} huge pages.". \
1246 question = "How many huge pages do you want [{} - {}][{}]? ". \
1247 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1248 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1249 node['hugepages']['total'] = str(answer)
1251 # Update auto-config.yaml
1252 self._update_auto_config()
1254 # Rediscover just the hugepages
1255 self.get_hugepages()
1257 def get_tcp_params(self):
1259 Get the tcp configuration
1262 # maybe nothing to do here?
1265 def acquire_tcp_params(self):
1267 Ask the user for TCP stack configuration parameters
1271 for i in self._nodes.items():
1274 question = "\nHow many active-open / tcp client sessions are expected "
1275 question = question + "[0-10000000][0]? "
1276 answer = self._ask_user_range(question, 0, 10000000, 0)
1277 # Less than 10K is equivalent to 0
1278 if int(answer) < 10000:
1280 node['tcp']['active_open_sessions'] = answer
1282 question = "How many passive-open / tcp server sessions are expected "
1283 question = question + "[0-10000000][0]? "
1284 answer = self._ask_user_range(question, 0, 10000000, 0)
1285 # Less than 10K is equivalent to 0
1286 if int(answer) < 10000:
1288 node['tcp']['passive_open_sessions'] = answer
1290 # Update auto-config.yaml
1291 self._update_auto_config()
1293 # Rediscover tcp parameters
1294 self.get_tcp_params()
1297 def patch_qemu(node):
1299 Patch qemu with the correct patches.
1301 :param node: Node dictionary
1305 print '\nWe are patching the node "{}":\n'.format(node['host'])
1306 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1311 print the CPU information
1315 cpu = CpuUtils.get_cpu_info_per_node(node)
1319 print "{:>20}: {}".format(item, cpu[item])
1322 print "{:>20}: {}".format(item, cpu[item])
1323 item = 'Thread(s) per core'
1325 print "{:>20}: {}".format(item, cpu[item])
1326 item = 'Core(s) per socket'
1328 print "{:>20}: {}".format(item, cpu[item])
1331 print "{:>20}: {}".format(item, cpu[item])
1332 item = 'NUMA node(s)'
1335 numa_nodes = int(cpu[item])
1336 for i in xrange(0, numa_nodes):
1337 item = "NUMA node{} CPU(s)".format(i)
1338 print "{:>20}: {}".format(item, cpu[item])
1339 item = 'CPU max MHz'
1341 print "{:>20}: {}".format(item, cpu[item])
1342 item = 'CPU min MHz'
1344 print "{:>20}: {}".format(item, cpu[item])
1346 if node['cpu']['smt_enabled']:
1350 print "{:>20}: {}".format('SMT', smt)
1353 print "\nVPP Threads: (Name: Cpu Number)"
1354 vpp_processes = cpu['vpp_processes']
1355 for i in vpp_processes.items():
1356 print " {:10}: {:4}".format(i[0], i[1])
1359 def device_info(node):
1361 Show the device information.
1365 if 'cpu' in node and 'total_mbufs' in node['cpu']:
1366 total_mbufs = node['cpu']['total_mbufs']
1367 if total_mbufs is not 0:
1368 print "Total Number of Buffers: {}".format(total_mbufs)
1370 vpp = VppPCIUtil(node)
1371 vpp.get_all_devices()
1372 linkup_devs = vpp.get_link_up_devices()
1373 if len(linkup_devs):
1374 print ("\nDevices with link up (can not be used with VPP):")
1375 vpp.show_vpp_devices(linkup_devs, show_header=False)
1376 # for dev in linkup_devs:
1378 kernel_devs = vpp.get_kernel_devices()
1379 if len(kernel_devs):
1380 print ("\nDevices bound to kernel drivers:")
1381 vpp.show_vpp_devices(kernel_devs, show_header=False)
1383 print ("\nNo devices bound to kernel drivers")
1385 dpdk_devs = vpp.get_dpdk_devices()
1387 print ("\nDevices bound to DPDK drivers:")
1388 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1391 print ("\nNo devices bound to DPDK drivers")
1393 other_devs = vpp.get_other_devices()
1395 print ("\nDevices not bound to Kernel or DPDK drivers:")
1396 vpp.show_vpp_devices(other_devs, show_interfaces=True,
1399 print ("\nNo devices not bound to Kernel or DPDK drivers")
1402 interfaces = vpputl.get_hardware(node)
1403 if interfaces == {}:
1406 print ("\nDevices in use by VPP:")
1408 if len(interfaces.items()) < 2:
1412 print "{:30} {:4} {:4} {:7} {:4} {:7}". \
1413 format('Name', 'Numa', 'RXQs',
1414 'RXDescs', 'TXQs', 'TXDescs')
1415 for intf in sorted(interfaces.items()):
1418 if name == 'local0':
1420 numa = rx_qs = rx_ds = tx_qs = tx_ds = ''
1422 numa = int(value['numa'])
1423 if 'rx queues' in value:
1424 rx_qs = int(value['rx queues'])
1425 if 'rx descs' in value:
1426 rx_ds = int(value['rx descs'])
1427 if 'tx queues' in value:
1428 tx_qs = int(value['tx queues'])
1429 if 'tx descs' in value:
1430 tx_ds = int(value['tx descs'])
1432 print ("{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".
1433 format(name, numa, rx_qs, rx_ds, tx_qs, tx_ds))
1436 def hugepage_info(node):
1438 Show the huge page information.
1442 hpg = VppHugePageUtil(node)
1443 hpg.show_huge_pages()
1446 def min_system_resources(node):
1448 Check the system for basic minimum resources, return true if
1458 if 'layout' in node['cpu']:
1459 total_cpus = len(node['cpu']['layout'])
1461 print "\nThere is only {} CPU(s) available on this system.".format(total_cpus)
1462 print "This is not enough to run VPP."
1466 if 'free' in node['hugepages'] and \
1467 'memfree' in node['hugepages'] and \
1468 'size' in node['hugepages']:
1469 free = node['hugepages']['free']
1470 memfree = float(node['hugepages']['memfree'].split(' ')[0])
1471 hugesize = float(node['hugepages']['size'].split(' ')[0])
1473 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1474 percentmemhugepages = (memhugepages / memfree) * 100
1475 if free is '0' and \
1476 percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1477 print "\nThe System has only {} of free memory.".format(int(memfree))
1478 print "You will not be able to allocate enough Huge Pages for VPP."
1485 Print the system information
1489 for i in self._nodes.items():
1490 print "\n=============================="
1494 print "NODE: {}\n".format(name)
1501 print "\nGrub Command Line:"
1504 " Current: {}".format(
1505 node['grub']['current_cmdline'])
1507 " Configured: {}".format(
1508 node['grub']['default_cmdline'])
1511 print "\nHuge Pages:"
1512 self.hugepage_info(node)
1516 self.device_info(node)
1519 print "\nVPP Service Status:"
1520 state, errors = VPPUtil.status(node)
1521 print " {}".format(state)
1523 print " {}".format(e)
1525 # Minimum system resources
1526 self.min_system_resources(node)
1528 print "\n=============================="
1530 def _ipv4_interface_setup_questions(self, node):
1532 Ask the user some questions and get a list of interfaces
1533 and IPv4 addresses associated with those interfaces
1535 :param node: Node dictionary.
1537 :returns: A list or interfaces with ip addresses
1542 interfaces = vpputl.get_hardware(node)
1543 if interfaces == {}:
1546 interfaces_with_ip = []
1547 for intf in sorted(interfaces.items()):
1549 if name == 'local0':
1552 question = "Would you like add address to interface {} [Y/n]? ".format(name)
1553 answer = self._ask_user_yn(question, 'y')
1556 addr = self._ask_user_ipv4()
1557 address['name'] = name
1558 address['addr'] = addr
1559 interfaces_with_ip.append(address)
1561 return interfaces_with_ip
1563 def ipv4_interface_setup(self):
1565 After asking the user some questions, get a list of interfaces
1566 and IPv4 addresses associated with those interfaces
1570 for i in self._nodes.items():
1573 # Show the current interfaces with IP addresses
1574 current_ints = VPPUtil.get_int_ip(node)
1575 if current_ints is not {}:
1576 print ("\nThese are the current interfaces with IP addresses:")
1577 for items in sorted(current_ints.items()):
1580 if 'address' not in value:
1583 address = value['address']
1584 print ("{:30} {:20} {:10}".format(name, address, value['state']))
1585 question = "\nWould you like to keep this configuration [Y/n]? "
1586 answer = self._ask_user_yn(question, 'y')
1590 print ("\nThere are currently no interfaces with IP addresses.")
1592 # Create a script that add the ip addresses to the interfaces
1593 # and brings the interfaces up
1594 ints_with_addrs = self._ipv4_interface_setup_questions(node)
1596 for ints in ints_with_addrs:
1599 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1600 setintupstr = 'set int state {} up\n'.format(name)
1601 content += setipstr + setintupstr
1603 # Write the content to the script
1604 rootdir = node['rootdir']
1605 filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1606 with open(filename, 'w+') as sfile:
1607 sfile.write(content)
1609 # Execute the script
1610 cmd = 'vppctl exec {}'.format(filename)
1611 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1613 logging.debug(stderr)
1615 print("\nA script as been created at {}".format(filename))
1616 print("This script can be run using the following:")
1617 print("vppctl exec {}\n".format(filename))
1619 def _create_vints_questions(self, node):
1621 Ask the user some questions and get a list of interfaces
1622 and IPv4 addresses associated with those interfaces
1624 :param node: Node dictionary.
1626 :returns: A list or interfaces with ip addresses
1631 interfaces = vpputl.get_hardware(node)
1632 if interfaces == {}:
1635 # First delete all the Virtual interfaces
1636 for intf in sorted(interfaces.items()):
1638 if name[:7] == 'Virtual':
1639 cmd = 'vppctl delete vhost-user {}'.format(name)
1640 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1642 logging.debug('{} failed on node {} {}'.format(
1643 cmd, node['host'], stderr))
1645 # Create a virtual interface, for each interface the user wants to use
1646 interfaces = vpputl.get_hardware(node)
1647 if interfaces == {}:
1649 interfaces_with_virtual_interfaces = []
1651 for intf in sorted(interfaces.items()):
1653 if name == 'local0':
1656 question = "Would you like connect this interface {} to the VM [Y/n]? ".format(name)
1657 answer = self._ask_user_yn(question, 'y')
1659 sockfilename = '/var/run/vpp/{}.sock'.format(name.replace('/', '_'))
1660 if os.path.exists(sockfilename):
1661 os.remove(sockfilename)
1662 cmd = 'vppctl create vhost-user socket {} server'.format(sockfilename)
1663 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1665 raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1666 vintname = stdout.rstrip('\r\n')
1668 cmd = 'chmod 777 {}'.format(sockfilename)
1669 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1671 raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1673 interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1674 'bridge': '{}'.format(inum)}
1676 interfaces_with_virtual_interfaces.append(interface)
1678 return interfaces_with_virtual_interfaces
1680 def create_and_bridge_virtual_interfaces(self):
1682 After asking the user some questions, create a VM and connect the interfaces
1687 for i in self._nodes.items():
1690 # Show the current bridge and interface configuration
1691 print "\nThis the current bridge configuration:"
1692 VPPUtil.show_bridge(node)
1693 question = "\nWould you like to keep this configuration [Y/n]? "
1694 answer = self._ask_user_yn(question, 'y')
1698 # Create a script that builds a bridge configuration with physical interfaces
1699 # and virtual interfaces
1700 ints_with_vints = self._create_vints_questions(node)
1702 for intf in ints_with_vints:
1703 vhoststr = 'comment { The following command creates the socket }\n'
1704 vhoststr += 'comment { and returns a virtual interface }\n'
1705 vhoststr += 'comment {{ create vhost-user socket /var/run/vpp/sock{}.sock server }}\n'. \
1706 format(intf['bridge'])
1708 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1710 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1711 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1713 # set interface state VirtualEthernet/0/0/0 up
1714 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1716 # set interface state VirtualEthernet/0/0/0 down
1717 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1719 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1721 # Write the content to the script
1722 rootdir = node['rootdir']
1723 filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1724 with open(filename, 'w+') as sfile:
1725 sfile.write(content)
1727 # Execute the script
1728 cmd = 'vppctl exec {}'.format(filename)
1729 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1731 logging.debug(stderr)
1733 print("\nA script as been created at {}".format(filename))
1734 print("This script can be run using the following:")
1735 print("vppctl exec {}\n".format(filename))
1737 def _iperf_vm_questions(self, node):
1739 Ask the user some questions and get a list of interfaces
1740 and IPv4 addresses associated with those interfaces
1742 :param node: Node dictionary.
1744 :returns: A list or interfaces with ip addresses
1749 interfaces = vpputl.get_hardware(node)
1750 if interfaces == {}:
1753 # First delete all the Virtual interfaces
1754 for intf in sorted(interfaces.items()):
1756 if name[:7] == 'Virtual':
1757 cmd = 'vppctl delete vhost-user {}'.format(name)
1758 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1760 logging.debug('{} failed on node {} {}'.format(
1761 cmd, node['host'], stderr))
1763 # Create a virtual interface, for each interface the user wants to use
1764 interfaces = vpputl.get_hardware(node)
1765 if interfaces == {}:
1767 interfaces_with_virtual_interfaces = []
1771 print '\nPlease pick one interface to connect to the iperf VM.'
1772 for intf in sorted(interfaces.items()):
1774 if name == 'local0':
1777 question = "Would you like connect this interface {} to the VM [y/N]? ".format(name)
1778 answer = self._ask_user_yn(question, 'n')
1780 self._sockfilename = '/var/run/vpp/{}.sock'.format(name.replace('/', '_'))
1781 if os.path.exists(self._sockfilename):
1782 os.remove(self._sockfilename)
1783 cmd = 'vppctl create vhost-user socket {} server'.format(self._sockfilename)
1784 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1786 raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1787 vintname = stdout.rstrip('\r\n')
1789 cmd = 'chmod 777 {}'.format(self._sockfilename)
1790 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1792 raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1794 interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1795 'bridge': '{}'.format(inum)}
1797 interfaces_with_virtual_interfaces.append(interface)
1798 return interfaces_with_virtual_interfaces
1800 def create_and_bridge_iperf_virtual_interface(self):
1802 After asking the user some questions, and create and bridge a virtual interface
1803 to be used with iperf VM
1807 for i in self._nodes.items():
1810 # Show the current bridge and interface configuration
1811 print "\nThis the current bridge configuration:"
1812 ifaces = VPPUtil.show_bridge(node)
1813 question = "\nWould you like to keep this configuration [Y/n]? "
1814 answer = self._ask_user_yn(question, 'y')
1816 self._sockfilename = '/var/run/vpp/{}.sock'.format(ifaces[0]['name'].replace('/', '_'))
1817 if os.path.exists(self._sockfilename):
1820 # Create a script that builds a bridge configuration with physical interfaces
1821 # and virtual interfaces
1822 ints_with_vints = self._iperf_vm_questions(node)
1824 for intf in ints_with_vints:
1825 vhoststr = 'comment { The following command creates the socket }\n'
1826 vhoststr += 'comment { and returns a virtual interface }\n'
1827 vhoststr += 'comment {{ create vhost-user socket /var/run/vpp/sock{}.sock server }}\n'. \
1828 format(intf['bridge'])
1830 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1832 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1833 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1835 # set interface state VirtualEthernet/0/0/0 up
1836 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1838 # set interface state VirtualEthernet/0/0/0 down
1839 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1841 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1843 # Write the content to the script
1844 rootdir = node['rootdir']
1845 filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
1846 with open(filename, 'w+') as sfile:
1847 sfile.write(content)
1849 # Execute the script
1850 cmd = 'vppctl exec {}'.format(filename)
1851 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1853 logging.debug(stderr)
1855 print("\nA script as been created at {}".format(filename))
1856 print("This script can be run using the following:")
1857 print("vppctl exec {}\n".format(filename))
1860 def destroy_iperf_vm(name):
1862 After asking the user some questions, create a VM and connect the interfaces
1865 :param name: The name of the VM to be be destroyed
1870 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1872 logging.debug(stderr)
1873 raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))
1875 if re.findall(name, stdout):
1876 cmd = 'virsh destroy {}'.format(name)
1877 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1879 logging.debug(stderr)
1880 raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))
1882 def create_iperf_vm(self, vmname):
1884 After asking the user some questions, create a VM and connect the interfaces
1889 # Read the iperf VM template file
1890 distro = VPPUtil.get_linux_distro()
1891 if distro[0] == 'Ubuntu':
1892 tfilename = '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(self._rootdir)
1894 tfilename = '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(self._rootdir)
1896 with open(tfilename, 'r') as tfile:
1897 tcontents = tfile.read()
1901 imagename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_IMAGE)
1902 isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
1903 tcontents = tcontents.format(vmname=vmname, imagename=imagename, isoname=isoname,
1904 vhostsocketname=self._sockfilename)
1907 ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
1908 with open(ifilename, 'w+') as ifile:
1909 ifile.write(tcontents)
1912 cmd = 'virsh create {}'.format(ifilename)
1913 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1915 logging.debug(stderr)
1916 raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))