1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library that supports Auto Configuration."""
15 from __future__ import absolute_import, division, print_function
20 from ipaddress import ip_address
24 from vpplib.VPPUtil import VPPUtil
25 from vpplib.VppPCIUtil import VppPCIUtil
26 from vpplib.VppHugePageUtil import VppHugePageUtil
27 from vpplib.CpuUtils import CpuUtils
28 from vpplib.VppGrubUtil import VppGrubUtil
29 from vpplib.QemuUtils import QemuUtils
31 # Python2/3 compatible
33 input = raw_input # noqa
37 __all__ = ["AutoConfig"]
41 MIN_TOTAL_HUGE_PAGES = 1024
42 MAX_PERCENT_FOR_HUGE_PAGES = 70
44 IPERFVM_XML = "configs/iperf-vm.xml"
45 IPERFVM_IMAGE = "images/xenial-mod.img"
46 IPERFVM_ISO = "configs/cloud-config.iso"
49 class AutoConfig(object):
50 """Auto Configuration Tools"""
52 def __init__(self, rootdir, filename, clean=False):
54 The Auto Configure class.
56 :param rootdir: The root directory for all the auto configuration files
57 :param filename: The autoconfiguration file
58 :param clean: When set initialize the nodes from the auto-config file
63 self._autoconfig_filename = rootdir + filename
64 self._rootdir = rootdir
67 self._vpp_devices_node = {}
68 self._hugepage_config = ""
71 self._sockfilename = ""
75 Returns the nodes dictionary.
84 def _autoconfig_backup_file(filename):
88 :param filename: The file to backup
92 # Does a copy of the file exist, if not create one
93 ofile = filename + ".orig"
94 (ret, stdout, stderr) = VPPUtil.exec_command("ls {}".format(ofile))
97 if stdout.strip("\n") != ofile:
98 cmd = "sudo cp {} {}".format(filename, ofile)
99 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
101 logging.debug(stderr)
103 # noinspection PyBroadException
105 def _ask_user_ipv4():
107 Asks the user for a number within a range.
108 default is returned if return is entered.
110 :returns: IP address with cidr
115 answer = input("Please enter the IPv4 Address [n.n.n.n/n]: ")
117 ipinput = answer.split("/")
118 ipaddr = ip_address(ipinput[0])
120 plen = answer.split("/")[1]
122 answer = input("Please enter the netmask [n.n.n.n]: ")
123 plen = ip_address(answer).netmask_bits()
124 return "{}/{}".format(ipaddr, plen)
126 print("Please enter a valid IPv4 address.")
129 def _ask_user_range(question, first, last, default):
131 Asks the user for a number within a range.
132 default is returned if return is entered.
134 :param question: Text of a question.
135 :param first: First number in the range
136 :param last: Last number in the range
137 :param default: The value returned when return is entered
138 :type question: string
142 :returns: The answer to the question
147 answer = input(question)
151 if re.findall(r"[0-9+]", answer):
152 if int(answer) in range(first, last + 1):
156 "Please a value between {} and {} or Return.".format(
162 "Please a number between {} and {} or Return.".format(first, last)
168 def _ask_user_yn(question, default):
170 Asks the user for a yes or no question.
172 :param question: Text of a question.
173 :param default: The value returned when return is entered
174 :type question: string
175 :type default: string
176 :returns: The answer to the question
181 default = default.lower()
183 while not input_valid:
184 answer = input(question)
187 if re.findall(r"[YyNn]", answer):
189 answer = answer[0].lower()
191 print("Please answer Y, N or Return.")
195 def _loadconfig(self):
197 Load the testbed configuration, given the auto configuration file.
201 # Get the Topology, from the topology layout file
203 with open(self._autoconfig_filename, "r") as stream:
205 topo = yaml.load(stream)
206 if "metadata" in topo:
207 self._metadata = topo["metadata"]
208 except yaml.YAMLError as exc:
210 "Couldn't read the Auto config file {}.".format(
211 self._autoconfig_filename, exc
215 systemfile = self._rootdir + self._metadata["system_config_file"]
216 if self._clean is False and os.path.isfile(systemfile):
217 with open(systemfile, "r") as sysstream:
219 systopo = yaml.load(sysstream)
220 if "nodes" in systopo:
221 self._nodes = systopo["nodes"]
222 except yaml.YAMLError as sysexc:
224 "Couldn't read the System config file {}.".format(
229 # Get the nodes from Auto Config
231 self._nodes = topo["nodes"]
233 # Set the root directory in all the nodes
234 for i in self._nodes.items():
236 node["rootdir"] = self._rootdir
238 def updateconfig(self):
240 Update the testbed configuration, given the auto configuration file.
241 We will write the system configuration file with the current node
246 # Initialize the yaml data
247 ydata = {"metadata": self._metadata, "nodes": self._nodes}
249 # Write the system config file
250 filename = self._rootdir + self._metadata["system_config_file"]
251 with open(filename, "w") as yamlfile:
252 yaml.dump(ydata, yamlfile)
254 def _update_auto_config(self):
256 Write the auto configuration file with the new configuration data,
261 # Initialize the yaml data
263 with open(self._autoconfig_filename, "r") as stream:
265 ydata = yaml.load(stream)
267 nodes = ydata["nodes"]
268 except yaml.YAMLError as exc:
272 for i in nodes.items():
277 node["interfaces"] = {}
278 for item in self._nodes[key]["interfaces"].items():
282 node["interfaces"][port] = {}
283 addr = "{}".format(interface["pci_address"])
284 node["interfaces"][port]["pci_address"] = addr
285 if "mac_address" in interface:
286 node["interfaces"][port]["mac_address"] = interface["mac_address"]
288 if "total_other_cpus" in self._nodes[key]["cpu"]:
289 node["cpu"]["total_other_cpus"] = self._nodes[key]["cpu"][
292 if "total_vpp_cpus" in self._nodes[key]["cpu"]:
293 node["cpu"]["total_vpp_cpus"] = self._nodes[key]["cpu"][
296 if "reserve_vpp_main_core" in self._nodes[key]["cpu"]:
297 node["cpu"]["reserve_vpp_main_core"] = self._nodes[key]["cpu"][
298 "reserve_vpp_main_core"
302 if "active_open_sessions" in self._nodes[key]["tcp"]:
303 node["tcp"]["active_open_sessions"] = self._nodes[key]["tcp"][
304 "active_open_sessions"
306 if "passive_open_sessions" in self._nodes[key]["tcp"]:
307 node["tcp"]["passive_open_sessions"] = self._nodes[key]["tcp"][
308 "passive_open_sessions"
312 node["hugepages"]["total"] = self._nodes[key]["hugepages"]["total"]
314 # Write the auto config config file
315 with open(self._autoconfig_filename, "w") as yamlfile:
316 yaml.dump(ydata, yamlfile)
318 def apply_huge_pages(self):
320 Apply the huge page config
324 for i in self._nodes.items():
327 hpg = VppHugePageUtil(node)
328 hpg.hugepages_dryrun_apply()
331 def _apply_vpp_cpu(node):
333 Apply the VPP cpu config
335 :param node: Node dictionary with cpuinfo.
341 if "vpp_main_core" in node["cpu"]:
342 vpp_main_core = node["cpu"]["vpp_main_core"]
345 if vpp_main_core != 0:
346 cpu += " main-core {}\n".format(vpp_main_core)
349 vpp_workers = node["cpu"]["vpp_workers"]
350 vpp_worker_len = len(vpp_workers)
351 if vpp_worker_len > 0:
353 for i, worker in enumerate(vpp_workers):
355 vpp_worker_str += ","
356 if worker[0] == worker[1]:
357 vpp_worker_str += "{}".format(worker[0])
359 vpp_worker_str += "{}-{}".format(worker[0], worker[1])
361 cpu += " corelist-workers {}\n".format(vpp_worker_str)
366 def _apply_vpp_devices(node):
368 Apply VPP PCI Device configuration to vpp startup.
370 :param node: Node dictionary with cpuinfo.
375 ports_per_numa = node["cpu"]["ports_per_numa"]
377 for item in ports_per_numa.items():
379 interfaces = value["interfaces"]
381 # if 0 was specified for the number of vpp workers, use 1 queue
384 if "rx_queues" in value:
385 num_rx_queues = value["rx_queues"]
386 if "tx_queues" in value:
387 num_tx_queues = value["tx_queues"]
392 # Create the devices string
393 for interface in interfaces:
394 pci_address = interface["pci_address"]
395 pci_address = pci_address.lstrip("'").rstrip("'")
397 devices += " dev {} {{ \n".format(pci_address)
399 devices += " num-rx-queues {}\n".format(num_rx_queues)
401 devices += " num-rx-queues {}\n".format(1)
403 devices += " num-tx-queues {}\n".format(num_tx_queues)
405 devices += " num-rx-desc {}\n".format(num_rx_desc)
407 devices += " num-tx-desc {}\n".format(num_tx_desc)
413 def _apply_buffers(node):
415 Apply VPP PCI Device configuration to vpp startup.
417 :param node: Node dictionary with cpuinfo.
421 total_mbufs = node["cpu"]["total_mbufs"]
423 # If the total mbufs is not 0 or less than the default, set num-bufs
424 logging.debug("Total mbufs: {}".format(total_mbufs))
425 if total_mbufs != 0 and total_mbufs > 16384:
426 buffers += " buffers-per-numa {}".format(total_mbufs)
431 def _calc_vpp_workers(
437 reserve_vpp_main_core,
440 Calculate the VPP worker information
442 :param node: Node dictionary
443 :param vpp_workers: List of VPP workers
444 :param numa_node: Numa node
445 :param other_cpus_end: The end of the cpus allocated for cores
447 :param total_vpp_workers: The number of vpp workers needed
448 :param reserve_vpp_main_core: Is there a core needed for
452 :type other_cpus_end: int
453 :type total_vpp_workers: int
454 :type reserve_vpp_main_core: bool
455 :returns: Is a core still needed for the vpp main core
459 # Can we fit the workers in one of these slices
460 cpus = node["cpu"]["cpus_per_node"][numa_node]
464 if start <= other_cpus_end:
465 start = other_cpus_end + 1
467 if reserve_vpp_main_core:
470 workers_end = start + total_vpp_workers - 1
472 if workers_end <= end:
473 if reserve_vpp_main_core:
474 node["cpu"]["vpp_main_core"] = start - 1
475 reserve_vpp_main_core = False
476 if total_vpp_workers:
477 vpp_workers.append((start, workers_end))
480 # We still need to reserve the main core
481 if reserve_vpp_main_core:
482 node["cpu"]["vpp_main_core"] = other_cpus_end + 1
484 return reserve_vpp_main_core
487 def _calc_desc_and_queues(
488 total_numa_nodes, total_ports_per_numa, total_rx_queues, ports_per_numa_value
491 Calculate the number of descriptors and queues
493 :param total_numa_nodes: The total number of numa nodes
494 :param total_ports_per_numa: The total number of ports for this
496 :param total_rx_queues: The total number of rx queues / port
497 :param ports_per_numa_value: The value from the ports_per_numa
499 :type total_numa_nodes: int
500 :type total_ports_per_numa: int
501 :type total_rx_queues: int
502 :type ports_per_numa_value: dict
503 :returns The total number of message buffers
507 # Get the number of rx queues
508 rx_queues = max(1, total_rx_queues)
509 tx_queues = rx_queues * total_numa_nodes + 1
511 # Get the descriptor entries
513 ports_per_numa_value["rx_queues"] = rx_queues
515 (rx_queues * desc_entries) + (tx_queues * desc_entries)
516 ) * total_ports_per_numa
521 def _create_ports_per_numa(node, interfaces):
523 Create a dictionary or ports per numa node
524 :param node: Node dictionary
525 :param interfaces: All the interfaces to be used by vpp
527 :type interfaces: dict
528 :returns: The ports per numa dictionary
532 # Make a list of ports by numa node
534 for item in interfaces.items():
536 if i["numa_node"] not in ports_per_numa:
537 ports_per_numa[i["numa_node"]] = {"interfaces": []}
538 ports_per_numa[i["numa_node"]]["interfaces"].append(i)
540 ports_per_numa[i["numa_node"]]["interfaces"].append(i)
541 node["cpu"]["ports_per_numa"] = ports_per_numa
543 return ports_per_numa
545 def calculate_cpu_parameters(self):
547 Calculate the cpu configuration.
551 # Calculate the cpu parameters, needed for the
552 # vpp_startup and grub configuration
553 for i in self._nodes.items():
556 # get total number of nic ports
557 interfaces = node["interfaces"]
559 # Make a list of ports by numa node
560 ports_per_numa = self._create_ports_per_numa(node, interfaces)
562 # Get the number of cpus to skip, we never use the first cpu
564 other_cpus_end = other_cpus_start + node["cpu"]["total_other_cpus"] - 1
566 if other_cpus_end != 0:
567 other_workers = (other_cpus_start, other_cpus_end)
568 node["cpu"]["other_workers"] = other_workers
570 # Allocate the VPP main core and workers
572 reserve_vpp_main_core = node["cpu"]["reserve_vpp_main_core"]
573 total_vpp_cpus = node["cpu"]["total_vpp_cpus"]
574 total_rx_queues = node["cpu"]["total_rx_queues"]
576 # If total_vpp_cpus is 0 or is less than the numa nodes with ports
577 # then we shouldn't get workers
578 total_workers_node = 0
579 if len(ports_per_numa):
580 total_workers_node = total_vpp_cpus // len(ports_per_numa)
582 if reserve_vpp_main_core:
585 if total_main + total_workers_node != 0:
586 for item in ports_per_numa.items():
590 # Get the number of descriptors and queues
591 mbufs = self._calc_desc_and_queues(
593 len(value["interfaces"]),
599 # Get the VPP workers
600 reserve_vpp_main_core = self._calc_vpp_workers(
606 reserve_vpp_main_core,
610 total_mbufs = int(total_mbufs)
615 node["cpu"]["vpp_workers"] = vpp_workers
616 node["cpu"]["total_mbufs"] = total_mbufs
622 def _apply_vpp_tcp(node):
626 :param node: Node dictionary with cpuinfo.
630 active_open_sessions = node["tcp"]["active_open_sessions"]
631 aos = int(active_open_sessions)
633 passive_open_sessions = node["tcp"]["passive_open_sessions"]
634 pos = int(passive_open_sessions)
636 # Generate the api-segment gid vpp sheit in any case
638 tcp = "\n".join(["api-segment {", " gid vpp", "}"])
639 return tcp.rstrip("\n")
643 "# TCP stack-related configuration parameters",
644 "# expecting {:d} client sessions, {:d} server sessions\n".format(
649 " global-size 2000M",
653 " event-queue-length {:d}".format(aos + pos),
654 " preallocated-sessions {:d}".format(aos + pos),
655 " v4-session-table-buckets {:d}".format((aos + pos) // 4),
656 " v4-session-table-memory 3g\n",
661 tcp + " v4-halfopen-table-buckets {:d}".format((aos + pos) // 4) + "\n"
663 tcp = tcp + " v4-halfopen-table-memory 3g\n"
666 + " local-endpoints-table-buckets {:d}".format((aos + pos) // 4)
669 tcp = tcp + " local-endpoints-table-memory 3g\n"
672 tcp = tcp + "tcp {\n"
673 tcp = tcp + " preallocated-connections {:d}".format(aos + pos) + "\n"
675 tcp = tcp + " preallocated-half-open-connections {:d}".format(aos) + "\n"
678 return tcp.rstrip("\n")
680 def apply_vpp_startup(self):
682 Apply the vpp startup configration
686 # Apply the VPP startup configruation
687 for i in self._nodes.items():
690 # Get the startup file
691 rootdir = node["rootdir"]
692 sfile = rootdir + node["vpp"]["startup_config_file"]
695 devices = self._apply_vpp_devices(node)
698 cpu = self._apply_vpp_cpu(node)
700 # Get the buffer configuration
701 buffers = self._apply_buffers(node)
702 # Get the TCP configuration, if any
703 tcp = self._apply_vpp_tcp(node)
705 # Make a backup if needed
706 self._autoconfig_backup_file(sfile)
709 tfile = sfile + ".template"
710 (ret, stdout, stderr) = VPPUtil.exec_command("cat {}".format(tfile))
713 "Executing cat command failed to node {}".format(node["host"])
715 startup = stdout.format(cpu=cpu, buffers=buffers, devices=devices, tcp=tcp)
717 (ret, stdout, stderr) = VPPUtil.exec_command("rm {}".format(sfile))
719 logging.debug(stderr)
721 cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
722 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
724 raise RuntimeError("Writing config failed node {}".format(node["host"]))
726 def apply_grub_cmdline(self):
728 Apply the grub cmdline
732 for i in self._nodes.items():
735 # Get the isolated CPUs
736 other_workers = node["cpu"]["other_workers"]
737 vpp_workers = node["cpu"]["vpp_workers"]
738 if "vpp_main_core" in node["cpu"]:
739 vpp_main_core = node["cpu"]["vpp_main_core"]
743 if other_workers is not None:
744 all_workers = [other_workers]
745 if vpp_main_core != 0:
746 all_workers += [(vpp_main_core, vpp_main_core)]
747 all_workers += vpp_workers
749 for idx, worker in enumerate(all_workers):
754 if worker[0] == worker[1]:
755 isolated_cpus += "{}".format(worker[0])
757 isolated_cpus += "{}-{}".format(worker[0], worker[1])
759 vppgrb = VppGrubUtil(node)
760 current_cmdline = vppgrb.get_current_cmdline()
761 if "grub" not in node:
763 node["grub"]["current_cmdline"] = current_cmdline
764 node["grub"]["default_cmdline"] = vppgrb.apply_cmdline(node, isolated_cpus)
768 def get_hugepages(self):
770 Get the hugepage configuration
774 for i in self._nodes.items():
777 hpg = VppHugePageUtil(node)
778 max_map_count, shmmax = hpg.get_huge_page_config()
779 node["hugepages"]["max_map_count"] = max_map_count
780 node["hugepages"]["shmax"] = shmmax
781 total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
782 node["hugepages"]["actual_total"] = total
783 node["hugepages"]["free"] = free
784 node["hugepages"]["size"] = size
785 node["hugepages"]["memtotal"] = memtotal
786 node["hugepages"]["memfree"] = memfree
792 Get the grub configuration
796 for i in self._nodes.items():
799 vppgrb = VppGrubUtil(node)
800 current_cmdline = vppgrb.get_current_cmdline()
801 default_cmdline = vppgrb.get_default_cmdline()
803 # Get the total number of isolated CPUs
805 iso_cpur = re.findall(r"isolcpus=[\w+\-,]+", current_cmdline)
806 iso_cpurl = len(iso_cpur)
808 iso_cpu_str = iso_cpur[0]
809 iso_cpu_str = iso_cpu_str.split("=")[1]
810 iso_cpul = iso_cpu_str.split(",")
811 for iso_cpu in iso_cpul:
812 isocpuspl = iso_cpu.split("-")
813 if len(isocpuspl) == 1:
814 current_iso_cpus += 1
816 first = int(isocpuspl[0])
817 second = int(isocpuspl[1])
819 current_iso_cpus += 1
821 current_iso_cpus += second - first
823 if "grub" not in node:
825 node["grub"]["current_cmdline"] = current_cmdline
826 node["grub"]["default_cmdline"] = default_cmdline
827 node["grub"]["current_iso_cpus"] = current_iso_cpus
832 def _get_device(node):
834 Get the device configuration for a single node
836 :param node: Node dictionary with cpuinfo.
841 vpp = VppPCIUtil(node)
842 vpp.get_all_devices()
844 # Save the device information
846 node["devices"]["dpdk_devices"] = vpp.get_dpdk_devices()
847 node["devices"]["kernel_devices"] = vpp.get_kernel_devices()
848 node["devices"]["other_devices"] = vpp.get_other_devices()
849 node["devices"]["linkup_devices"] = vpp.get_link_up_devices()
851 def get_devices_per_node(self):
853 Get the device configuration for all the nodes
857 for i in self._nodes.items():
859 # Update the interface data
861 self._get_device(node)
866 def get_cpu_layout(node):
870 using lscpu -p get the cpu layout.
871 Returns a list with each item representing a single cpu.
873 :param node: Node dictionary.
875 :returns: The cpu layout
880 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
883 "{} failed on node {} {}".format(cmd, node["host"], stderr)
887 lines = stdout.split("\n")
889 if line == "" or line[0] == "#":
891 linesplit = line.split(",")
894 "core": linesplit[1],
895 "socket": linesplit[2],
896 "node": linesplit[3],
899 # cpu, core, socket, node
906 Get the cpu configuration
911 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
913 for i in self._nodes.items():
917 layout = self.get_cpu_layout(node)
918 node["cpu"]["layout"] = layout
920 cpuinfo = node["cpuinfo"]
921 smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
922 node["cpu"]["smt_enabled"] = smt_enabled
924 # We don't want to write the cpuinfo
932 Get the current system configuration.
936 # Get the Huge Page configuration
939 # Get the device configuration
940 self.get_devices_per_node()
942 # Get the CPU configuration
945 # Get the current grub cmdline
948 def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
950 Ask the user questions related to the cpu configuration.
952 :param node: Node dictionary
953 :param total_cpus: The total number of cpus in the system
954 :param numa_nodes: The list of numa nodes in the system
956 :type total_cpus: int
957 :type numa_nodes: list
961 "\nYour system has {} core(s) and {} Numa Nodes.".format(
962 total_cpus, len(numa_nodes)
966 "To begin, we suggest not reserving any cores for "
967 "VPP or other processes."
970 "Then to improve performance start reserving cores and "
971 "adding queues as needed."
974 # Leave 1 for the general system
976 max_vpp_cpus = min(total_cpus, 4)
980 "\nHow many core(s) shall we reserve for "
981 "VPP [0-{}][0]? ".format(max_vpp_cpus)
983 total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
984 node["cpu"]["total_vpp_cpus"] = total_vpp_cpus
987 max_other_cores = total_cpus - total_vpp_cpus
988 if max_other_cores > 0:
990 "How many core(s) do you want to reserve for "
991 "processes other than VPP? [0-{}][0]? ".format(str(max_other_cores))
993 total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
994 node["cpu"]["total_other_cpus"] = total_other_cpus
996 max_main_cpus = total_cpus - total_vpp_cpus - total_other_cpus
997 reserve_vpp_main_core = False
998 if max_main_cpus > 0:
999 question = "Should we reserve 1 core for the VPP Main thread? "
1000 question += "[y/N]? "
1001 answer = self._ask_user_yn(question, "n")
1003 reserve_vpp_main_core = True
1004 node["cpu"]["reserve_vpp_main_core"] = reserve_vpp_main_core
1005 node["cpu"]["vpp_main_core"] = 0
1008 "How many RX queues per port shall we use for "
1009 "VPP [1-4][1]? ".format(max_vpp_cpus)
1011 total_rx_queues = self._ask_user_range(question, 1, 4, 1)
1012 node["cpu"]["total_rx_queues"] = total_rx_queues
1014 def modify_cpu(self, ask_questions=True):
1016 Modify the cpu configuration, asking for the user for the values.
1018 :param ask_questions: When true ask the user for config parameters
1022 # Get the CPU layout
1023 CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
1025 for i in self._nodes.items():
1028 total_cpus_per_slice = 0
1032 cpu_layout = self.get_cpu_layout(node)
1034 # Assume the number of cpus per slice is always the same as the
1037 for cpu in cpu_layout:
1038 if cpu["node"] != first_node:
1040 total_cpus_per_slice += 1
1042 # Get the total number of cpus, cores, and numa nodes from the
1044 for cpul in cpu_layout:
1045 numa_node = cpul["node"]
1050 if numa_node not in cpus_per_node:
1051 cpus_per_node[numa_node] = []
1052 cpuperslice = int(cpu) % total_cpus_per_slice
1053 if cpuperslice == 0:
1054 cpus_per_node[numa_node].append(
1055 (int(cpu), int(cpu) + total_cpus_per_slice - 1)
1057 if numa_node not in numa_nodes:
1058 numa_nodes.append(numa_node)
1059 if core not in cores:
1061 node["cpu"]["cpus_per_node"] = cpus_per_node
1063 # Ask the user some questions
1064 if ask_questions and total_cpus >= 4:
1065 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1067 # Populate the interfaces with the numa node
1068 if "interfaces" in node:
1069 ikeys = node["interfaces"].keys()
1070 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1072 # We don't want to write the cpuinfo
1073 node["cpuinfo"] = ""
1076 self._update_auto_config()
1079 def _modify_other_devices(self, node, other_devices, kernel_devices, dpdk_devices):
1081 Modify the devices configuration, asking for the user for the values.
1085 odevices_len = len(other_devices)
1086 if odevices_len > 0:
1088 "\nThese device(s) are currently NOT being used " "by VPP or the OS.\n"
1090 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1091 question = "\nWould you like to give any of these devices"
1092 question += " back to the OS [Y/n]? "
1093 answer = self._ask_user_yn(question, "Y")
1096 for dit in other_devices.items():
1099 question = "Would you like to use device {} for".format(dvid)
1100 question += " the OS [y/N]? "
1101 answer = self._ask_user_yn(question, "n")
1105 and len(device["unused"]) != 0
1106 and device["unused"][0] != ""
1108 driver = device["unused"][0]
1109 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1111 logging.debug("Could not bind device {}".format(dvid))
1114 for dit in vppd.items():
1117 kernel_devices[dvid] = device
1118 del other_devices[dvid]
1120 odevices_len = len(other_devices)
1121 if odevices_len > 0:
1122 print("\nThese device(s) are still NOT being used " "by VPP or the OS.\n")
1123 VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1124 question = "\nWould you like use any of these for VPP [y/N]? "
1125 answer = self._ask_user_yn(question, "N")
1128 for dit in other_devices.items():
1131 question = "Would you like to use device {} ".format(dvid)
1132 question += "for VPP [y/N]? "
1133 answer = self._ask_user_yn(question, "n")
1136 for dit in vppd.items():
1141 and len(device["unused"]) != 0
1142 and device["unused"][0] != ""
1144 driver = device["unused"][0]
1146 "Binding device {} to driver {}".format(dvid, driver)
1148 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1150 logging.debug("Could not bind device {}".format(dvid))
1152 dpdk_devices[dvid] = device
1153 del other_devices[dvid]
1155 def update_interfaces_config(self):
1157 Modify the interfaces directly from the config file.
1161 for i in self._nodes.items():
1163 devices = node["devices"]
1164 all_devices = devices["other_devices"]
1165 all_devices.update(devices["dpdk_devices"])
1166 all_devices.update(devices["kernel_devices"])
1170 if "interfaces" in node:
1171 current_ifcs = node["interfaces"]
1173 for ifc in current_ifcs.values():
1174 dvid = ifc["pci_address"]
1175 if dvid in all_devices:
1176 VppPCIUtil.vpp_create_interface(
1177 interfaces, dvid, all_devices[dvid]
1179 node["interfaces"] = interfaces
1183 def modify_devices(self):
1185 Modify the devices configuration, asking for the user for the values.
1189 for i in self._nodes.items():
1191 devices = node["devices"]
1192 other_devices = devices["other_devices"]
1193 kernel_devices = devices["kernel_devices"]
1194 dpdk_devices = devices["dpdk_devices"]
1197 self._modify_other_devices(
1198 node, other_devices, kernel_devices, dpdk_devices
1201 # Get the devices again for this node
1202 self._get_device(node)
1203 devices = node["devices"]
1204 kernel_devices = devices["kernel_devices"]
1205 dpdk_devices = devices["dpdk_devices"]
1207 klen = len(kernel_devices)
1209 print("\nThese devices are safe to be used with VPP.\n")
1210 VppPCIUtil.show_vpp_devices(kernel_devices)
1212 "\nWould you like to use any of these " "device(s) for VPP [y/N]? "
1214 answer = self._ask_user_yn(question, "n")
1217 for dit in kernel_devices.items():
1220 question = "Would you like to use device {} ".format(dvid)
1221 question += "for VPP [y/N]? "
1222 answer = self._ask_user_yn(question, "n")
1225 for dit in vppd.items():
1230 and len(device["unused"]) != 0
1231 and device["unused"][0] != ""
1233 driver = device["unused"][0]
1234 question = "Would you like to bind the driver {} for {} [y/N]? ".format(
1237 answer = self._ask_user_yn(question, "n")
1240 "Binding device {} to driver {}".format(
1244 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1247 "Could not bind device {}".format(dvid)
1249 dpdk_devices[dvid] = device
1250 del kernel_devices[dvid]
1252 dlen = len(dpdk_devices)
1254 print("\nThese device(s) are already using DPDK.\n")
1255 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1256 question = "\nWould you like to remove any of "
1257 question += "these device(s) [y/N]? "
1258 answer = self._ask_user_yn(question, "n")
1261 for dit in dpdk_devices.items():
1264 question = "Would you like to remove {} [y/N]? ".format(dvid)
1265 answer = self._ask_user_yn(question, "n")
1267 vppdl[dvid] = device
1268 for dit in vppdl.items():
1273 and len(device["unused"]) != 0
1274 and device["unused"][0] != ""
1276 driver = device["unused"][0]
1278 "Binding device {} to driver {}".format(dvid, driver)
1280 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1282 logging.debug("Could not bind device {}".format(dvid))
1284 kernel_devices[dvid] = device
1285 del dpdk_devices[dvid]
1288 for dit in dpdk_devices.items():
1291 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1292 node["interfaces"] = interfaces
1294 self._update_auto_config()
1297 def modify_huge_pages(self):
1299 Modify the huge page configuration, asking for the user for the values.
1303 for i in self._nodes.items():
1306 total = node["hugepages"]["actual_total"]
1307 free = node["hugepages"]["free"]
1308 size = node["hugepages"]["size"]
1309 memfree = node["hugepages"]["memfree"].split(" ")[0]
1310 hugesize = int(size.split(" ")[0])
1311 # The max number of huge pages should be no more than
1312 # 70% of total free memory
1313 maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES // 100) // hugesize
1314 print("\nThere currently {} {} huge pages free.".format(free, size))
1315 question = "Do you want to reconfigure the number of " "huge pages [y/N]? "
1316 answer = self._ask_user_yn(question, "n")
1318 node["hugepages"]["total"] = total
1321 print("\nThere currently a total of {} huge pages.".format(total))
1322 question = "How many huge pages do you want [{} - {}][{}]? ".format(
1323 MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES
1325 answer = self._ask_user_range(question, 1024, maxpages, 1024)
1326 node["hugepages"]["total"] = str(answer)
1328 # Update auto-config.yaml
1329 self._update_auto_config()
1331 # Rediscover just the hugepages
1332 self.get_hugepages()
1334 def get_tcp_params(self):
1336 Get the tcp configuration
1339 # maybe nothing to do here?
1342 def acquire_tcp_params(self):
1344 Ask the user for TCP stack configuration parameters
1348 for i in self._nodes.items():
1352 "\nHow many active-open / tcp client sessions are "
1353 "expected [0-10000000][0]? "
1355 answer = self._ask_user_range(question, 0, 10000000, 0)
1356 # Less than 10K is equivalent to 0
1357 if int(answer) < 10000:
1359 node["tcp"]["active_open_sessions"] = answer
1362 "How many passive-open / tcp server sessions are "
1363 "expected [0-10000000][0]? "
1365 answer = self._ask_user_range(question, 0, 10000000, 0)
1366 # Less than 10K is equivalent to 0
1367 if int(answer) < 10000:
1369 node["tcp"]["passive_open_sessions"] = answer
1371 # Update auto-config.yaml
1372 self._update_auto_config()
1374 # Rediscover tcp parameters
1375 self.get_tcp_params()
1378 def patch_qemu(node):
1380 Patch qemu with the correct patches.
1382 :param node: Node dictionary
1386 print('\nWe are patching the node "{}":\n'.format(node["host"]))
1387 QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1392 print the CPU information
1396 cpu = CpuUtils.get_cpu_info_per_node(node)
1400 print("{:>20}: {}".format(item, cpu[item]))
1403 print("{:>20}: {}".format(item, cpu[item]))
1404 item = "Thread(s) per core"
1406 print("{:>20}: {}".format(item, cpu[item]))
1407 item = "Core(s) per socket"
1409 print("{:>20}: {}".format(item, cpu[item]))
1412 print("{:>20}: {}".format(item, cpu[item]))
1413 item = "NUMA node(s)"
1416 numa_nodes = int(cpu[item])
1417 for i in range(0, numa_nodes):
1418 item = "NUMA node{} CPU(s)".format(i)
1419 print("{:>20}: {}".format(item, cpu[item]))
1420 item = "CPU max MHz"
1422 print("{:>20}: {}".format(item, cpu[item]))
1423 item = "CPU min MHz"
1425 print("{:>20}: {}".format(item, cpu[item]))
1427 if node["cpu"]["smt_enabled"]:
1431 print("{:>20}: {}".format("SMT", smt))
1434 print("\nVPP Threads: (Name: Cpu Number)")
1435 vpp_processes = cpu["vpp_processes"]
1436 for i in vpp_processes.items():
1437 print(" {:10}: {:4}".format(i[0], i[1]))
1440 def device_info(node):
1442 Show the device information.
1446 if "cpu" in node and "total_mbufs" in node["cpu"]:
1447 total_mbufs = node["cpu"]["total_mbufs"]
1448 if total_mbufs != 0:
1449 print("Total Number of Buffers: {}".format(total_mbufs))
1451 vpp = VppPCIUtil(node)
1452 vpp.get_all_devices()
1453 linkup_devs = vpp.get_link_up_devices()
1454 if len(linkup_devs):
1455 print("\nDevices with link up (can not be used with VPP):")
1456 vpp.show_vpp_devices(linkup_devs, show_header=False)
1457 # for dev in linkup_devs:
1459 kernel_devs = vpp.get_kernel_devices()
1460 if len(kernel_devs):
1461 print("\nDevices bound to kernel drivers:")
1462 vpp.show_vpp_devices(kernel_devs, show_header=False)
1464 print("\nNo devices bound to kernel drivers")
1466 dpdk_devs = vpp.get_dpdk_devices()
1468 print("\nDevices bound to DPDK drivers:")
1469 vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)
1471 print("\nNo devices bound to DPDK drivers")
1473 other_devs = vpp.get_other_devices()
1475 print("\nDevices not bound to Kernel or DPDK drivers:")
1476 vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)
1478 print("\nNo devices not bound to Kernel or DPDK drivers")
1481 interfaces = vpputl.get_hardware(node)
1482 if interfaces == {}:
1485 print("\nDevices in use by VPP:")
1487 if len(interfaces.items()) < 2:
1492 "{:30} {:4} {:4} {:7} {:4} {:7}".format(
1493 "Name", "Numa", "RXQs", "RXDescs", "TXQs", "TXDescs"
1496 for intf in sorted(interfaces.items()):
1499 if name == "local0":
1501 numa = rx_qs = rx_ds = tx_qs = tx_ds = ""
1503 numa = int(value["numa"])
1504 if "rx queues" in value:
1505 rx_qs = int(value["rx queues"])
1506 if "rx descs" in value:
1507 rx_ds = int(value["rx descs"])
1508 if "tx queues" in value:
1509 tx_qs = int(value["tx queues"])
1510 if "tx descs" in value:
1511 tx_ds = int(value["tx descs"])
1514 "{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".format(
1515 name, numa, rx_qs, rx_ds, tx_qs, tx_ds
1520 def hugepage_info(node):
1522 Show the huge page information.
1526 hpg = VppHugePageUtil(node)
1527 hpg.show_huge_pages()
1530 def has_interfaces(node):
1532 Check for interfaces, return tru if there is at least one
1536 if "interfaces" in node and len(node["interfaces"]):
1542 def min_system_resources(node):
1544 Check the system for basic minimum resources, return true if
1553 if "layout" in node["cpu"]:
1554 total_cpus = len(node["cpu"]["layout"])
1557 "\nThere is only {} CPU(s) available on this system. "
1558 "This is not enough to run VPP.".format(total_cpus)
1564 "free" in node["hugepages"]
1565 and "memfree" in node["hugepages"]
1566 and "size" in node["hugepages"]
1568 free = node["hugepages"]["free"]
1569 memfree = float(node["hugepages"]["memfree"].split(" ")[0])
1570 hugesize = float(node["hugepages"]["size"].split(" ")[0])
1572 memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1573 percentmemhugepages = (memhugepages / memfree) * 100
1574 if free is "0" and percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1576 "\nThe System has only {} of free memory. You will not "
1577 "be able to allocate enough Huge Pages for VPP.".format(
1587 Print the system information
1591 for i in self._nodes.items():
1592 print("\n==============================")
1596 print("NODE: {}\n".format(name))
1603 print("\nGrub Command Line:")
1605 print(" Current: {}".format(node["grub"]["current_cmdline"]))
1606 print(" Configured: {}".format(node["grub"]["default_cmdline"]))
1609 print("\nHuge Pages:")
1610 self.hugepage_info(node)
1614 self.device_info(node)
1617 print("\nVPP Service Status:")
1618 state, errors = VPPUtil.status(node)
1619 print(" {}".format(state))
1621 print(" {}".format(e))
1623 # Minimum system resources
1624 self.min_system_resources(node)
1626 print("\n==============================")
1628 def _ipv4_interface_setup_questions(self, node):
1630 Ask the user some questions and get a list of interfaces
1631 and IPv4 addresses associated with those interfaces
1633 :param node: Node dictionary.
1635 :returns: A list or interfaces with ip addresses
1640 interfaces = vpputl.get_hardware(node)
1641 if interfaces == {}:
1644 interfaces_with_ip = []
1645 for intf in sorted(interfaces.items()):
1647 if name == "local0":
1650 question = "Would you like add address to " "interface {} [Y/n]? ".format(
1653 answer = self._ask_user_yn(question, "y")
1656 addr = self._ask_user_ipv4()
1657 address["name"] = name
1658 address["addr"] = addr
1659 interfaces_with_ip.append(address)
1661 return interfaces_with_ip
1663 def ipv4_interface_setup(self):
1665 After asking the user some questions, get a list of interfaces
1666 and IPv4 addresses associated with those interfaces
1670 for i in self._nodes.items():
1673 # Show the current interfaces with IP addresses
1674 current_ints = VPPUtil.get_int_ip(node)
1675 if current_ints != {}:
1676 print("\nThese are the current interfaces with IP addresses:")
1677 for items in sorted(current_ints.items()):
1680 if "address" not in value:
1683 address = value["address"]
1684 print("{:30} {:20} {:10}".format(name, address, value["state"]))
1685 question = "\nWould you like to keep this configuration " "[Y/n]? "
1686 answer = self._ask_user_yn(question, "y")
1690 print("\nThere are currently no interfaces with IP " "addresses.")
1692 # Create a script that add the ip addresses to the interfaces
1693 # and brings the interfaces up
1694 ints_with_addrs = self._ipv4_interface_setup_questions(node)
1696 for ints in ints_with_addrs:
1699 setipstr = "set int ip address {} {}\n".format(name, addr)
1700 setintupstr = "set int state {} up\n".format(name)
1701 content += setipstr + setintupstr
1703 # Write the content to the script
1704 rootdir = node["rootdir"]
1705 filename = rootdir + "/vpp/vpp-config/scripts/set_int_ipv4_and_up"
1706 with open(filename, "w+") as sfile:
1707 sfile.write(content)
1709 # Execute the script
1710 cmd = "vppctl exec {}".format(filename)
1711 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1713 logging.debug(stderr)
1715 print("\nA script as been created at {}".format(filename))
1716 print("This script can be run using the following:")
1717 print("vppctl exec {}\n".format(filename))
1719 def _create_vints_questions(self, node):
1721 Ask the user some questions and get a list of interfaces
1722 and IPv4 addresses associated with those interfaces
1724 :param node: Node dictionary.
1726 :returns: A list or interfaces with ip addresses
1731 interfaces = vpputl.get_hardware(node)
1732 if interfaces == {}:
1735 # First delete all the Virtual interfaces
1736 for intf in sorted(interfaces.items()):
1738 if name[:7] == "Virtual":
1739 cmd = "vppctl delete vhost-user {}".format(name)
1740 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1743 "{} failed on node {} {}".format(cmd, node["host"], stderr)
1746 # Create a virtual interface, for each interface the user wants to use
1747 interfaces = vpputl.get_hardware(node)
1748 if interfaces == {}:
1750 interfaces_with_virtual_interfaces = []
1752 for intf in sorted(interfaces.items()):
1754 if name == "local0":
1758 "Would you like connect this interface {} to "
1759 "the VM [Y/n]? ".format(name)
1761 answer = self._ask_user_yn(question, "y")
1763 sockfilename = "/var/run/vpp/{}.sock".format(name.replace("/", "_"))
1764 if os.path.exists(sockfilename):
1765 os.remove(sockfilename)
1766 cmd = "vppctl create vhost-user socket {} server".format(sockfilename)
1767 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1770 "Couldn't execute the command {}, {}.".format(cmd, stderr)
1772 vintname = stdout.rstrip("\r\n")
1774 cmd = "chmod 777 {}".format(sockfilename)
1775 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1778 "Couldn't execute the command {}, {}.".format(cmd, stderr)
1783 "virtualinterface": "{}".format(vintname),
1784 "bridge": "{}".format(inum),
1787 interfaces_with_virtual_interfaces.append(interface)
1789 return interfaces_with_virtual_interfaces
1791 def create_and_bridge_virtual_interfaces(self):
1793 After asking the user some questions, create a VM and connect
1794 the interfaces to VPP interfaces
1798 for i in self._nodes.items():
1801 # Show the current bridge and interface configuration
1802 print("\nThis the current bridge configuration:")
1803 VPPUtil.show_bridge(node)
1804 question = "\nWould you like to keep this configuration [Y/n]? "
1805 answer = self._ask_user_yn(question, "y")
1809 # Create a script that builds a bridge configuration with
1810 # physical interfaces and virtual interfaces
1811 ints_with_vints = self._create_vints_questions(node)
1813 for intf in ints_with_vints:
1814 vhoststr = "\n".join(
1816 "comment { The following command creates the socket }",
1817 "comment { and returns a virtual interface }",
1818 "comment {{ create vhost-user socket "
1819 "/var/run/vpp/sock{}.sock server }}\n".format(intf["bridge"]),
1823 setintdnstr = "set interface state {} down\n".format(intf["name"])
1825 setintbrstr = "set interface l2 bridge {} {}\n".format(
1826 intf["name"], intf["bridge"]
1828 setvintbrstr = "set interface l2 bridge {} {}\n".format(
1829 intf["virtualinterface"], intf["bridge"]
1832 # set interface state VirtualEthernet/0/0/0 up
1833 setintvststr = "set interface state {} up\n".format(
1834 intf["virtualinterface"]
1837 # set interface state VirtualEthernet/0/0/0 down
1838 setintupstr = "set interface state {} up\n".format(intf["name"])
1849 # Write the content to the script
1850 rootdir = node["rootdir"]
1851 filename = rootdir + "/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp"
1852 with open(filename, "w+") as sfile:
1853 sfile.write(content)
1855 # Execute the script
1856 cmd = "vppctl exec {}".format(filename)
1857 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1859 logging.debug(stderr)
1861 print("\nA script as been created at {}".format(filename))
1862 print("This script can be run using the following:")
1863 print("vppctl exec {}\n".format(filename))
1865 def _iperf_vm_questions(self, node):
1867 Ask the user some questions and get a list of interfaces
1868 and IPv4 addresses associated with those interfaces
1870 :param node: Node dictionary.
1872 :returns: A list or interfaces with ip addresses
1877 interfaces = vpputl.get_hardware(node)
1878 if interfaces == {}:
1881 # First delete all the Virtual interfaces
1882 for intf in sorted(interfaces.items()):
1884 if name[:7] == "Virtual":
1885 cmd = "vppctl delete vhost-user {}".format(name)
1886 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1889 "{} failed on node {} {}".format(cmd, node["host"], stderr)
1892 # Create a virtual interface, for each interface the user wants to use
1893 interfaces = vpputl.get_hardware(node)
1894 if interfaces == {}:
1896 interfaces_with_virtual_interfaces = []
1900 print("\nPlease pick one interface to connect to the iperf VM.")
1901 for intf in sorted(interfaces.items()):
1903 if name == "local0":
1907 "Would you like connect this interface {} to "
1908 "the VM [y/N]? ".format(name)
1910 answer = self._ask_user_yn(question, "n")
1912 self._sockfilename = "/var/run/vpp/{}.sock".format(
1913 name.replace("/", "_")
1915 if os.path.exists(self._sockfilename):
1916 os.remove(self._sockfilename)
1917 cmd = "vppctl create vhost-user socket {} server".format(
1920 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1923 "Couldn't execute the command {}, {}.".format(cmd, stderr)
1925 vintname = stdout.rstrip("\r\n")
1927 cmd = "chmod 777 {}".format(self._sockfilename)
1928 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1931 "Couldn't execute the command {}, {}.".format(cmd, stderr)
1936 "virtualinterface": "{}".format(vintname),
1937 "bridge": "{}".format(inum),
1940 interfaces_with_virtual_interfaces.append(interface)
1941 return interfaces_with_virtual_interfaces
1943 def create_and_bridge_iperf_virtual_interface(self):
1945 After asking the user some questions, and create and bridge a
1946 virtual interface to be used with iperf VM
1950 for i in self._nodes.items():
1953 # Show the current bridge and interface configuration
1954 print("\nThis the current bridge configuration:")
1955 ifaces = VPPUtil.show_bridge(node)
1956 question = "\nWould you like to keep this configuration [Y/n]? "
1957 answer = self._ask_user_yn(question, "y")
1959 self._sockfilename = "/var/run/vpp/{}.sock".format(
1960 ifaces[0]["name"].replace("/", "_")
1962 if os.path.exists(self._sockfilename):
1965 # Create a script that builds a bridge configuration with
1966 # physical interfaces and virtual interfaces
1967 ints_with_vints = self._iperf_vm_questions(node)
1969 for intf in ints_with_vints:
1970 vhoststr = "\n".join(
1972 "comment { The following command creates the socket }",
1973 "comment { and returns a virtual interface }",
1974 "comment {{ create vhost-user socket "
1975 "/var/run/vpp/sock{}.sock server }}\n".format(intf["bridge"]),
1979 setintdnstr = "set interface state {} down\n".format(intf["name"])
1981 setintbrstr = "set interface l2 bridge {} {}\n".format(
1982 intf["name"], intf["bridge"]
1984 setvintbrstr = "set interface l2 bridge {} {}\n".format(
1985 intf["virtualinterface"], intf["bridge"]
1988 # set interface state VirtualEthernet/0/0/0 up
1989 setintvststr = "set interface state {} up\n".format(
1990 intf["virtualinterface"]
1993 # set interface state VirtualEthernet/0/0/0 down
1994 setintupstr = "set interface state {} up\n".format(intf["name"])
2005 # Write the content to the script
2006 rootdir = node["rootdir"]
2007 filename = rootdir + "/vpp/vpp-config/scripts/create_iperf_vm"
2008 with open(filename, "w+") as sfile:
2009 sfile.write(content)
2011 # Execute the script
2012 cmd = "vppctl exec {}".format(filename)
2013 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
2015 logging.debug(stderr)
2017 print("\nA script as been created at {}".format(filename))
2018 print("This script can be run using the following:")
2019 print("vppctl exec {}\n".format(filename))
2022 def destroy_iperf_vm(name):
2024 After asking the user some questions, create a VM and connect
2025 the interfaces to VPP interfaces
2027 :param name: The name of the VM to be be destroyed
2032 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
2034 logging.debug(stderr)
2036 "Couldn't execute the command {} : {}".format(cmd, stderr)
2039 if re.findall(name, stdout):
2040 cmd = "virsh destroy {}".format(name)
2041 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
2043 logging.debug(stderr)
2045 "Couldn't execute the command {} : {}".format(cmd, stderr)
2048 def create_iperf_vm(self, vmname):
2050 After asking the user some questions, create a VM and connect
2051 the interfaces to VPP interfaces
2055 # Read the iperf VM template file
2056 distro = VPPUtil.get_linux_distro()
2057 if distro[0] == "Ubuntu":
2058 tfilename = "{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template".format(
2062 tfilename = "{}/vpp/vpp-config/configs/iperf-centos.xml.template".format(
2066 with open(tfilename, "r") as tfile:
2067 tcontents = tfile.read()
2071 imagename = "{}/vpp/vpp-config/{}".format(self._rootdir, IPERFVM_IMAGE)
2072 isoname = "{}/vpp/vpp-config/{}".format(self._rootdir, IPERFVM_ISO)
2073 tcontents = tcontents.format(
2075 imagename=imagename,
2077 vhostsocketname=self._sockfilename,
2081 ifilename = "{}/vpp/vpp-config/{}".format(self._rootdir, IPERFVM_XML)
2082 with open(ifilename, "w+") as ifile:
2083 ifile.write(tcontents)
2086 cmd = "virsh create {}".format(ifilename)
2087 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
2089 logging.debug(stderr)
2091 "Couldn't execute the command {} : {}".format(cmd, stderr)