+ def vpp_get_interface_data(node, interface=None):
+ """Get all interface data from a VPP node. If a name or
+ sw_interface_index is provided, return only data for the matching
+ interface(s).
+
+ :param node: VPP node to get interface data from.
+ :param interface: Numeric index or name string of a specific interface.
+ :type node: dict
+ :type interface: int or str
+ :returns: List of dictionaries containing data for each interface, or a
+ single dictionary for the specified interface.
+ :rtype: list or dict
+ :raises TypeError: if the data type of interface is neither basestring
+ nor int.
+ """
+ def process_if_dump(if_dump):
+ """Process interface dump.
+
+ :param if_dump: Interface dump.
+ :type if_dump: dict
+ :returns: Processed interface dump.
+ :rtype: dict
+ """
+ if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
+ if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
+ if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
+ if_dump[u"flags"] = if_dump[u"flags"].value
+ if_dump[u"type"] = if_dump[u"type"].value
+ if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
+ if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
+ if hasattr(if_dump[u"sub_if_flags"], u"value") \
+ else int(if_dump[u"sub_if_flags"])
+
+ return if_dump
+
+ if interface is not None:
+ if isinstance(interface, str):
+ param = u"interface_name"
+ elif isinstance(interface, int):
+ param = u"sw_if_index"
+ else:
+ raise TypeError(f"Wrong interface format {interface}")
+ else:
+ param = u""
+
+ cmd = u"sw_interface_dump"
+ args = dict(
+ name_filter_valid=False,
+ name_filter=u""
+ )
+ err_msg = f"Failed to get interface dump on host {node[u'host']}"
+
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd, **args).get_details(err_msg)
+ logger.debug(f"Received data:\n{details!r}")
+
+ data = list() if interface is None else dict()
+ for dump in details:
+ if interface is None:
+ data.append(process_if_dump(dump))
+ elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
+ data = process_if_dump(dump)
+ break
+
+ logger.debug(f"Interface data:\n{data}")
+ return data
+
+ @staticmethod
+ def vpp_get_interface_name(node, sw_if_index):
+ """Get interface name for the given SW interface index from actual
+ interface dump.
+
+ :param node: VPP node to get interface data from.
+ :param sw_if_index: SW interface index of the specific interface.
+ :type node: dict
+ :type sw_if_index: int
+ :returns: Name of the given interface.
+ :rtype: str
+ """
+ if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
+ if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
+ if_data = InterfaceUtil.vpp_get_interface_data(
+ node, if_data[u"sup_sw_if_index"]
+ )
+
+ return if_data.get(u"interface_name")
+
+ @staticmethod
+ def vpp_get_interface_sw_index(node, interface_name):
+ """Get interface name for the given SW interface index from actual
+ interface dump.
+
+ :param node: VPP node to get interface data from.
+ :param interface_name: Interface name.
+ :type node: dict
+ :type interface_name: str
+ :returns: Name of the given interface.
+ :rtype: str
+ """
+ if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
+
+ return if_data.get(u"sw_if_index")
+
+ @staticmethod
+ def vpp_get_interface_mac(node, interface):
+ """Get MAC address for the given interface from actual interface dump.
+
+ :param node: VPP node to get interface data from.
+ :param interface: Numeric index or name string of a specific interface.
+ :type node: dict
+ :type interface: int or str
+ :returns: MAC address.
+ :rtype: str
+ """
+ if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
+ if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
+ if_data = InterfaceUtil.vpp_get_interface_data(
+ node, if_data[u"sup_sw_if_index"])
+
+ return if_data.get(u"l2_address")
+
+ @staticmethod
+ def tg_set_interface_driver(node, pci_addr, driver):
+ """Set interface driver on the TG node.
+
+ :param node: Node to set interface driver on (must be TG node).
+ :param pci_addr: PCI address of the interface.
+ :param driver: Driver name.
+ :type node: dict
+ :type pci_addr: str
+ :type driver: str
+ :raises RuntimeError: If unbinding from the current driver fails.
+ :raises RuntimeError: If binding to the new driver fails.
+ """
+ old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
+ if old_driver == driver:
+ return
+
+ ssh = SSH()
+ ssh.connect(node)
+
+ # Unbind from current driver
+ if old_driver is not None:
+ cmd = f"sh -c \"echo {pci_addr} > " \
+ f"/sys/bus/pci/drivers/{old_driver}/unbind\""
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
+
+ # Bind to the new driver
+ cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
+
+ @staticmethod
+ def tg_get_interface_driver(node, pci_addr):
+ """Get interface driver from the TG node.
+
+ :param node: Node to get interface driver on (must be TG node).
+ :param pci_addr: PCI address of the interface.
+ :type node: dict
+ :type pci_addr: str
+ :returns: Interface driver or None if not found.
+ :rtype: str
+ :raises RuntimeError: If PCI rescan or lspci command execution failed.
+ """
+ return DUTSetup.get_pci_dev_driver(node, pci_addr)
+
+ @staticmethod
+ def tg_set_interfaces_udev_rules(node):
+ """Set udev rules for interfaces.
+
+ Create udev rules file in /etc/udev/rules.d where are rules for each
+ interface used by TG node, based on MAC interface has specific name.
+ So after unbind and bind again to kernel driver interface has same
+ name as before. This must be called after TG has set name for each
+ port in topology dictionary.
+ udev rule example
+ SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
+ NAME="eth1"
+
+ :param node: Node to set udev rules on (must be TG node).
+ :type node: dict
+ :raises RuntimeError: If setting of udev rules fails.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = f"rm -f {InterfaceUtil.__UDEV_IF_RULES_FILE}"
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
+
+ for interface in node[u"interfaces"].values():
+ rule = u'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
+ u'==\\"' + interface[u"mac_address"] + u'\\", NAME=\\"' + \
+ interface[u"name"] + u'\\"'
+ cmd = f"sh -c \"echo '{rule}'\" >> " \
+ f"{InterfaceUtil.__UDEV_IF_RULES_FILE}'"
+
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
+
+ cmd = u"/etc/init.d/udev restart"
+ ssh.exec_command_sudo(cmd)
+
+ @staticmethod
+ def tg_set_interfaces_default_driver(node):
+ """Set interfaces default driver specified in topology yaml file.
+
+ :param node: Node to setup interfaces driver on (must be TG node).
+ :type node: dict
+ """
+ for interface in node[u"interfaces"].values():
+ InterfaceUtil.tg_set_interface_driver(
+ node, interface[u"pci_address"], interface[u"driver"]
+ )
+
+ @staticmethod
+ def update_vpp_interface_data_on_node(node):
+ """Update vpp generated interface data for a given node in DICT__nodes.
+
+ Updates interface names, software if index numbers and any other details
+ generated specifically by vpp that are unknown before testcase run.
+ It does this by dumping interface list from all devices using python
+ api, and pairing known information from topology (mac address) to state
+ from VPP.
+
+ :param node: Node selected from DICT__nodes.
+ :type node: dict
+ """
+ interface_list = InterfaceUtil.vpp_get_interface_data(node)
+ interface_dict = dict()
+ for ifc in interface_list:
+ interface_dict[ifc[u"l2_address"]] = ifc
+
+ for if_name, if_data in node[u"interfaces"].items():
+ ifc_dict = interface_dict.get(if_data[u"mac_address"])
+ if ifc_dict is not None:
+ if_data[u"name"] = ifc_dict[u"interface_name"]
+ if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
+ if_data[u"mtu"] = ifc_dict[u"mtu"][0]
+ logger.trace(
+ f"Interface {if_name} found by MAC "
+ f"{if_data[u'mac_address']}"
+ )
+ else:
+ logger.trace(
+ f"Interface {if_name} not found by MAC "
+ f"{if_data[u'mac_address']}"
+ )
+ if_data[u"vpp_sw_index"] = None
+
+ @staticmethod
+ def update_nic_interface_names(node):
+ """Update interface names based on nic type and PCI address.
+
+ This method updates interface names in the same format as VPP does.
+
+ :param node: Node dictionary.
+ :type node: dict
+ """
+ for ifc in node[u"interfaces"].values():
+ if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
+ loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
+ f"{int(if_pci[3], 16):x}"
+ if ifc[u"model"] == u"Intel-XL710":
+ ifc[u"name"] = f"FortyGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Intel-X710":
+ ifc[u"name"] = f"TenGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Intel-X520-DA2":
+ ifc[u"name"] = f"TenGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Cisco-VIC-1385":
+ ifc[u"name"] = f"FortyGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Cisco-VIC-1227":
+ ifc[u"name"] = f"TenGigabitEthernet{loc}"
+ else:
+ ifc[u"name"] = f"UnknownEthernet{loc}"
+
+ @staticmethod
+ def update_nic_interface_names_on_all_duts(nodes):
+ """Update interface names based on nic type and PCI address on all DUTs.
+
+ This method updates interface names in the same format as VPP does.
+
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ InterfaceUtil.update_nic_interface_names(node)
+
+ @staticmethod
+ def update_tg_interface_data_on_node(node, skip_tg_udev=False):
+ """Update interface name for TG/linux node in DICT__nodes.
+
+ .. note::
+ # for dev in `ls /sys/class/net/`;
+ > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
+ "52:54:00:9f:82:63": "eth0"
+ "52:54:00:77:ae:a9": "eth1"
+ "52:54:00:e1:8a:0f": "eth2"
+ "00:00:00:00:00:00": "lo"
+
+ :param node: Node selected from DICT__nodes.
+ :param skip_tg_udev: Skip udev rename on TG node.
+ :type node: dict
+ :type skip_tg_udev: bool
+ :raises RuntimeError: If getting of interface name and MAC fails.
+ """
+ # First setup interface driver specified in yaml file
+ InterfaceUtil.tg_set_interfaces_default_driver(node)
+
+ # Get interface names
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
+ u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
+
+ ret_code, stdout, _ = ssh.exec_command(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError(u"Get interface name and MAC failed")
+ tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
+
+ interfaces = JsonParser().parse_data(tmp)
+ for interface in node[u"interfaces"].values():
+ name = interfaces.get(interface[u"mac_address"])
+ if name is None:
+ continue
+ interface[u"name"] = name
+
+ # Set udev rules for interfaces
+ if not skip_tg_udev:
+ InterfaceUtil.tg_set_interfaces_udev_rules(node)
+
+ @staticmethod
+ def iface_update_numa_node(node):
+ """For all interfaces from topology file update numa node based on
+ information from the node.
+
+ :param node: Node from topology.
+ :type node: dict
+ :returns: Nothing.
+ :raises ValueError: If numa node ia less than 0.
+ :raises RuntimeError: If update of numa node failed.
+ """
+ def check_cpu_node_count(node_n, val):
+ val = int(val)
+ if val < 0:
+ if CpuUtils.cpu_node_count(node_n) == 1:
+ val = 0
+ else:
+ raise ValueError
+ return val
+ ssh = SSH()
+ for if_key in Topology.get_node_interfaces(node):
+ if_pci = Topology.get_interface_pci_addr(node, if_key)
+ ssh.connect(node)
+ cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
+ for _ in range(3):
+ ret, out, _ = ssh.exec_command(cmd)
+ if ret == 0:
+ try:
+ numa_node = check_cpu_node_count(node, out)
+ except ValueError:
+ logger.trace(
+ f"Reading numa location failed for: {if_pci}"
+ )
+ else:
+ Topology.set_interface_numa_node(
+ node, if_key, numa_node
+ )
+ break
+ else:
+ raise RuntimeError(f"Update numa node failed for: {if_pci}")
+
+ @staticmethod
+ def update_all_numa_nodes(nodes, skip_tg=False):
+ """For all nodes and all their interfaces from topology file update numa
+ node information based on information from the node.
+
+ :param nodes: Nodes in the topology.
+ :param skip_tg: Skip TG node
+ :type nodes: dict
+ :type skip_tg: bool
+ :returns: Nothing.
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ InterfaceUtil.iface_update_numa_node(node)
+ elif node[u"type"] == NodeType.TG and not skip_tg:
+ InterfaceUtil.iface_update_numa_node(node)
+
+ @staticmethod
+ def update_all_interface_data_on_all_nodes(
+ nodes, skip_tg=False, skip_tg_udev=False, numa_node=False):
+ """Update interface names on all nodes in DICT__nodes.
+
+ This method updates the topology dictionary by querying interface lists
+ of all nodes mentioned in the topology dictionary.
+
+ :param nodes: Nodes in the topology.
+ :param skip_tg: Skip TG node.
+ :param skip_tg_udev: Skip udev rename on TG node.
+ :param numa_node: Retrieve numa_node location.
+ :type nodes: dict
+ :type skip_tg: bool
+ :type skip_tg_udev: bool
+ :type numa_node: bool
+ """
+ for node_data in nodes.values():
+ if node_data[u"type"] == NodeType.DUT:
+ InterfaceUtil.update_vpp_interface_data_on_node(node_data)
+ elif node_data[u"type"] == NodeType.TG and not skip_tg:
+ InterfaceUtil.update_tg_interface_data_on_node(
+ node_data, skip_tg_udev)
+
+ if numa_node:
+ if node_data[u"type"] == NodeType.DUT:
+ InterfaceUtil.iface_update_numa_node(node_data)
+ elif node_data[u"type"] == NodeType.TG and not skip_tg:
+ InterfaceUtil.iface_update_numa_node(node_data)
+
+ @staticmethod
+ def create_vlan_subinterface(node, interface, vlan):
+ """Create VLAN sub-interface on node.
+
+ :param node: Node to add VLAN subinterface on.
+ :param interface: Interface name or index on which create VLAN
+ subinterface.
+ :param vlan: VLAN ID of the subinterface to be created.
+ :type node: dict
+ :type interface: str on int
+ :type vlan: int
+ :returns: Name and index of created subinterface.
+ :rtype: tuple
+ :raises RuntimeError: if it is unable to create VLAN subinterface on the
+ node or interface cannot be converted.
+ """
+ sw_if_index = InterfaceUtil.get_interface_index(node, interface)
+
+ cmd = u"create_vlan_subif"
+ args = dict(
+ sw_if_index=sw_if_index,
+ vlan_id=int(vlan)
+ )
+ err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
+
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"vlan_subif")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return f"{interface}.{vlan}", sw_if_index
+
+ @staticmethod
+ def create_vxlan_interface(node, vni, source_ip, destination_ip):
+ """Create VXLAN interface and return sw if index of created interface.
+
+ :param node: Node where to create VXLAN interface.
+ :param vni: VXLAN Network Identifier.
+ :param source_ip: Source IP of a VXLAN Tunnel End Point.
+ :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
+ :type node: dict
+ :type vni: int
+ :type source_ip: str
+ :type destination_ip: str
+ :returns: SW IF INDEX of created interface.
+ :rtype: int
+ :raises RuntimeError: if it is unable to create VxLAN interface on the
+ node.
+ """
+ src_address = ip_address(source_ip)
+ dst_address = ip_address(destination_ip)
+
+ cmd = u"vxlan_add_del_tunnel"
+ args = dict(
+ is_add=1,
+ is_ipv6=1 if src_address.version == 6 else 0,
+ instance=Constants.BITWISE_NON_ZERO,
+ src_address=src_address.packed,
+ dst_address=dst_address.packed,
+ mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+ encap_vrf_id=0,
+ decap_next_index=Constants.BITWISE_NON_ZERO,
+ vni=int(vni)
+ )
+ err_msg = f"Failed to create VXLAN tunnel interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"vxlan_tunnel")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return sw_if_index
+
+ @staticmethod
+ def set_vxlan_bypass(node, interface=None):
+ """Add the 'ip4-vxlan-bypass' graph node for a given interface.
+
+ By adding the IPv4 vxlan-bypass graph node to an interface, the node
+ checks for and validate input vxlan packet and bypass ip4-lookup,
+ ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
+ This node will cause extra overhead to for non-vxlan packets which is
+ kept at a minimum.
+
+ :param node: Node where to set VXLAN bypass.
+ :param interface: Numeric index or name string of a specific interface.
+ :type node: dict
+ :type interface: int or str
+ :raises RuntimeError: if it failed to set VXLAN bypass on interface.
+ """
+ sw_if_index = InterfaceUtil.get_interface_index(node, interface)
+
+ cmd = u"sw_interface_set_vxlan_bypass"
+ args = dict(
+ is_ipv6=0,
+ sw_if_index=sw_if_index,
+ enable=1
+ )
+ err_msg = f"Failed to set VXLAN bypass on interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_replies(err_msg)
+
+ @staticmethod
+ def vxlan_dump(node, interface=None):
+ """Get VxLAN data for the given interface.
+
+ :param node: VPP node to get interface data from.
+ :param interface: Numeric index or name string of a specific interface.
+ If None, information about all VxLAN interfaces is returned.
+ :type node: dict
+ :type interface: int or str
+ :returns: Dictionary containing data for the given VxLAN interface or if
+ interface=None, the list of dictionaries with all VxLAN interfaces.
+ :rtype: dict or list
+ :raises TypeError: if the data type of interface is neither basestring
+ nor int.
+ """
+ def process_vxlan_dump(vxlan_dump):
+ """Process vxlan dump.
+
+ :param vxlan_dump: Vxlan interface dump.
+ :type vxlan_dump: dict
+ :returns: Processed vxlan interface dump.
+ :rtype: dict
+ """
+ if vxlan_dump[u"is_ipv6"]:
+ vxlan_dump[u"src_address"] = \
+ ip_address(vxlan_dump[u"src_address"])
+ vxlan_dump[u"dst_address"] = \
+ ip_address(vxlan_dump[u"dst_address"])
+ else:
+ vxlan_dump[u"src_address"] = \
+ ip_address(vxlan_dump[u"src_address"][0:4])
+ vxlan_dump[u"dst_address"] = \
+ ip_address(vxlan_dump[u"dst_address"][0:4])
+ return vxlan_dump
+
+ if interface is not None:
+ sw_if_index = InterfaceUtil.get_interface_index(node, interface)
+ else:
+ sw_if_index = int(Constants.BITWISE_NON_ZERO)
+
+ cmd = u"vxlan_tunnel_dump"
+ args = dict(
+ sw_if_index=sw_if_index
+ )
+ err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
+
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd, **args).get_details(err_msg)
+
+ data = list() if interface is None else dict()
+ for dump in details:
+ if interface is None:
+ data.append(process_vxlan_dump(dump))
+ elif dump[u"sw_if_index"] == sw_if_index:
+ data = process_vxlan_dump(dump)
+ break
+
+ logger.debug(f"VXLAN data:\n{data}")
+ return data
+
+ @staticmethod
+ def create_subinterface(
+ node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
+ type_subif=None):
+ """Create sub-interface on node. It is possible to set required
+ sub-interface type and VLAN tag(s).
+
+ :param node: Node to add sub-interface.
+ :param interface: Interface name on which create sub-interface.
+ :param sub_id: ID of the sub-interface to be created.
+ :param outer_vlan_id: Optional outer VLAN ID.
+ :param inner_vlan_id: Optional inner VLAN ID.
+ :param type_subif: Optional type of sub-interface. Values supported by
+ VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
+ [default_sub]
+ :type node: dict
+ :type interface: str or int
+ :type sub_id: int
+ :type outer_vlan_id: int
+ :type inner_vlan_id: int
+ :type type_subif: str
+ :returns: Name and index of created sub-interface.
+ :rtype: tuple
+ :raises RuntimeError: If it is not possible to create sub-interface.
+ """
+ subif_types = type_subif.split()
+
+ flags = 0
+ if u"no_tags" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
+ if u"one_tag" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
+ if u"two_tags" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
+ if u"dot1ad" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
+ if u"exact_match" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
+ if u"default_sub" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
+ if type_subif == u"default_sub":
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
+ | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
+
+ cmd = u"create_subif"
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ sub_id=int(sub_id),
+ sub_if_flags=flags.value if hasattr(flags, u"value")
+ else int(flags),
+ outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
+ inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
+ )
+ err_msg = f"Failed to create sub-interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"subinterface")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return f"{interface}.{sub_id}", sw_if_index
+
+ @staticmethod
+ def create_gre_tunnel_interface(node, source_ip, destination_ip):
+ """Create GRE tunnel interface on node.
+
+ :param node: VPP node to add tunnel interface.
+ :param source_ip: Source of the GRE tunnel.
+ :param destination_ip: Destination of the GRE tunnel.
+ :type node: dict
+ :type source_ip: str
+ :type destination_ip: str
+ :returns: Name and index of created GRE tunnel interface.
+ :rtype: tuple
+ :raises RuntimeError: If unable to create GRE tunnel interface.
+ """
+ cmd = u"gre_tunnel_add_del"
+ tunnel = dict(
+ type=0,
+ instance=Constants.BITWISE_NON_ZERO,
+ src=str(source_ip),
+ dst=str(destination_ip),
+ outer_fib_id=0,
+ session_id=0
+ )
+ args = dict(
+ is_add=1,
+ tunnel=tunnel
+ )
+ err_msg = f"Failed to create GRE tunnel interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"gre_tunnel")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return ifc_name, sw_if_index
+
+ @staticmethod
+ def vpp_create_loopback(node, mac=None):
+ """Create loopback interface on VPP node.
+
+ :param node: Node to create loopback interface on.
+ :param mac: Optional MAC address for loopback interface.
+ :type node: dict
+ :type mac: str
+ :returns: SW interface index.
+ :rtype: int
+ :raises RuntimeError: If it is not possible to create loopback on the
+ node.
+ """
+ cmd = u"create_loopback"
+ args = dict(
+ mac_address=L2Util.mac_to_bin(mac) if mac else 0
+ )
+ err_msg = f"Failed to create loopback interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"loopback")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+ if mac:
+ mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
+ Topology.update_interface_mac_address(node, if_key, mac)
+
+ return sw_if_index
+
+ @staticmethod
+ def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
+ """Create bond interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param mode: Link bonding mode.
+ :param load_balance: Load balance (optional, valid for xor and lacp
+ modes, otherwise ignored).
+ :param mac: MAC address to assign to the bond interface (optional).
+ :type node: dict
+ :type mode: str
+ :type load_balance: str
+ :type mac: str
+ :returns: Interface key (name) in topology.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create bond interface on
+ the node.
+ """
+ cmd = u"bond_create"
+ args = dict(
+ id=int(Constants.BITWISE_NON_ZERO),
+ use_custom_mac=bool(mac is not None),
+ mac_address=L2Util.mac_to_bin(mac) if mac else None,
+ mode=getattr(
+ LinkBondMode,
+ f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
+ ).value,
+ lb=0 if load_balance is None else getattr(
+ LinkBondLoadBalanceAlgo,
+ f"BOND_API_LB_ALGO_{load_balance.upper()}"
+ ).value,
+ numa_only=False
+ )
+ err_msg = f"Failed to create bond interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ InterfaceUtil.add_eth_interface(
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
+ )
+ if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
+
+ return if_key
+
+ @staticmethod
+ def add_eth_interface(
+ node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
+ host_if_key=None):
+ """Add ethernet interface to current topology.
+
+ :param node: DUT node from topology.
+ :param ifc_name: Name of the interface.
+ :param sw_if_index: SW interface index.
+ :param ifc_pfx: Interface key prefix.
+ :param host_if_key: Host interface key from topology file.
+ :type node: dict
+ :type ifc_name: str
+ :type sw_if_index: int
+ :type ifc_pfx: str
+ :type host_if_key: str
+ """
+ if_key = Topology.add_new_port(node, ifc_pfx)
+
+ if ifc_name and sw_if_index is None:
+ sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
+ node, ifc_name)
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ if sw_if_index and ifc_name is None:
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+ ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
+ Topology.update_interface_mac_address(node, if_key, ifc_mac)
+ if host_if_key is not None:
+ Topology.set_interface_numa_node(
+ node, if_key, Topology.get_interface_numa_node(
+ node, host_if_key
+ )
+ )
+ Topology.update_interface_pci_address(
+ node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
+ )
+
+ @staticmethod
+ def vpp_create_avf_interface(node, if_key, num_rx_queues=None):
+ """Create AVF interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param if_key: Interface key from topology file of interface
+ to be bound to i40evf driver.
+ :param num_rx_queues: Number of RX queues.
+ :type node: dict
+ :type if_key: str
+ :type num_rx_queues: int
+ :returns: AVF interface key (name) in topology.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create AVF interface on
+ the node.
+ """
+ PapiSocketExecutor.run_cli_cmd(
+ node, u"set logging class avf level debug"
+ )
+
+ cmd = u"avf_create"
+ vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
+ args = dict(
+ pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
+ enable_elog=0,
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=0,
+ txq_size=0
+ )
+ err_msg = f"Failed to create AVF interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ InterfaceUtil.add_eth_interface(
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
+ host_if_key=if_key
+ )
+
+ return Topology.get_interface_by_sw_index(node, sw_if_index)
+
+ @staticmethod
+ def vpp_create_rdma_interface(
+ node, if_key, num_rx_queues=None, mode=u"auto"):
+ """Create RDMA interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param if_key: Physical interface key from topology file of interface
+ to be bound to rdma-core driver.
+ :param num_rx_queues: Number of RX queues.
+ :param mode: RDMA interface mode - auto/ibv/dv.
+ :type node: dict
+ :type if_key: str
+ :type num_rx_queues: int
+ :type mode: str
+ :returns: Interface key (name) in topology file.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create RDMA interface on
+ the node.
+ """
+ cmd = u"rdma_create"
+ pci_addr = Topology.get_interface_pci_addr(node, if_key)
+ args = dict(
+ name=InterfaceUtil.pci_to_eth(node, pci_addr),
+ host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=1024,
+ txq_size=1024,
+ mode=getattr(RdmaMode,f"RDMA_API_MODE_{mode.upper()}").value,
+ )
+ err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ InterfaceUtil.add_eth_interface(
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
+ host_if_key=if_key
+ )
+
+ return Topology.get_interface_by_sw_index(node, sw_if_index)
+
+ @staticmethod
+ def vpp_enslave_physical_interface(node, interface, bond_if):
+ """Enslave physical interface to bond interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param interface: Physical interface key from topology file.
+ :param bond_if: Load balance
+ :type node: dict
+ :type interface: str
+ :type bond_if: str
+ :raises RuntimeError: If it is not possible to enslave physical
+ interface to bond interface on the node.
+ """
+ cmd = u"bond_enslave"
+ args = dict(
+ sw_if_index=Topology.get_interface_sw_index(node, interface),
+ bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
+ is_passive=False,
+ is_long_timeout=False
+ )
+ err_msg = f"Failed to enslave physical interface {interface} to bond " \
+ f"interface {bond_if} on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def vpp_show_bond_data_on_node(node, verbose=False):
+ """Show (detailed) bond information on VPP node.
+
+ :param node: DUT node from topology.
+ :param verbose: If detailed information is required or not.
+ :type node: dict
+ :type verbose: bool
+ """
+ cmd = u"sw_interface_bond_dump"
+ err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
+
+ data = f"Bond data on node {node[u'host']}:\n"
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd).get_details(err_msg)
+
+ for bond in details:
+ data += f"{bond[u'interface_name']}\n"
+ data += u" mode: {m}\n".format(
+ m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
+ )
+ data += u" load balance: {lb}\n".format(
+ lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
+ )
+ data += f" number of active slaves: {bond[u'active_slaves']}\n"
+ if verbose:
+ slave_data = InterfaceUtil.vpp_bond_slave_dump(
+ node, Topology.get_interface_by_sw_index(
+ node, bond[u"sw_if_index"]
+ )
+ )
+ for slave in slave_data:
+ if not slave[u"is_passive"]:
+ data += f" {slave[u'interface_name']}\n"
+ data += f" number of slaves: {bond[u'slaves']}\n"
+ if verbose:
+ for slave in slave_data:
+ data += f" {slave[u'interface_name']}\n"
+ data += f" interface id: {bond[u'id']}\n"
+ data += f" sw_if_index: {bond[u'sw_if_index']}\n"
+ logger.info(data)
+
+ @staticmethod
+ def vpp_bond_slave_dump(node, interface):
+ """Get bond interface slave(s) data on VPP node.
+
+ :param node: DUT node from topology.
+ :param interface: Physical interface key from topology file.
+ :type node: dict
+ :type interface: str
+ :returns: Bond slave interface data.
+ :rtype: dict
+ """
+ cmd = u"sw_interface_slave_dump"
+ args = dict(
+ sw_if_index=Topology.get_interface_sw_index(node, interface)
+ )
+ err_msg = f"Failed to get slave dump on host {node[u'host']}"
+
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd, **args).get_details(err_msg)
+
+ logger.debug(f"Slave data:\n{details}")
+ return details
+
+ @staticmethod
+ def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
+ """Show (detailed) bond information on all VPP nodes in DICT__nodes.