+ def process_vxlan_gpe_dump(vxlan_dump):
+ """Process vxlan_gpe dump.
+
+ :param vxlan_dump: Vxlan_gpe nterface dump.
+ :type vxlan_dump: dict
+ :returns: Processed vxlan_gpe interface dump.
+ :rtype: dict
+ """
+ if vxlan_dump['is_ipv6']:
+ vxlan_dump['local'] = \
+ ip_address(unicode(vxlan_dump['local']))
+ vxlan_dump['remote'] = \
+ ip_address(unicode(vxlan_dump['remote']))
+ else:
+ vxlan_dump['local'] = \
+ ip_address(unicode(vxlan_dump['local'][0:4]))
+ vxlan_dump['remote'] = \
+ ip_address(unicode(vxlan_dump['remote'][0:4]))
+ return vxlan_dump
+
+ if interface_name is not None:
+ sw_if_index = InterfaceUtil.get_interface_index(
+ node, interface_name)
+ else:
+ sw_if_index = int(Constants.BITWISE_NON_ZERO)
+
+ cmd = 'vxlan_gpe_tunnel_dump'
+ args = dict(sw_if_index=sw_if_index)
+ err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format(
+ host=node['host'])
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd, **args).get_details(err_msg)
+
+ data = list() if interface_name is None else dict()
+ for dump in details:
+ if interface_name is None:
+ data.append(process_vxlan_gpe_dump(dump))
+ elif dump['sw_if_index'] == sw_if_index:
+ data = process_vxlan_gpe_dump(dump)
+ break
+
+ logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format(
+ vxlan_gpe_data=data))
+ return data
+
+ @staticmethod
+ def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
+ """Assign VPP interface to specific VRF/FIB table.
+
+ :param node: VPP node where the FIB and interface are located.
+ :param interface: Interface to be assigned to FIB.
+ :param table_id: VRF table ID.
+ :param ipv6: Assign to IPv6 table. Default False.
+ :type node: dict
+ :type interface: str or int
+ :type table_id: int
+ :type ipv6: bool
+ """
+ cmd = 'sw_interface_set_table'
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ is_ipv6=1 if ipv6 else 0,
+ vrf_id=int(table_id))
+ err_msg = 'Failed to assign interface {ifc} to FIB table'.format(
+ ifc=interface)
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def set_linux_interface_mac(node, interface, mac, namespace=None,
+ vf_id=None):
+ """Set MAC address for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param mac: MAC to be assigned to interface.
+ :param namespace: Execute command in namespace. Optional
+ :param vf_id: Virtual Function id. Optional
+ :type node: dict
+ :type interface: str
+ :type mac: str
+ :type namespace: str
+ :type vf_id: int
+ """
+ mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \
+ if vf_id is not None else 'address {mac}'.format(mac=mac)
+ ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
+
+ cmd = ('{ns} ip link set {interface} {mac}'.
+ format(ns=ns_str, interface=interface, mac=mac_str))
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def set_linux_interface_trust_on(node, interface, namespace=None,
+ vf_id=None):
+ """Set trust on (promisc) for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Execute command in namespace. Optional
+ :param vf_id: Virtual Function id. Optional
+ :type node: dict
+ :type interface: str
+ :type namespace: str
+ :type vf_id: int
+ """
+ trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \
+ if vf_id is not None else 'trust on'
+ ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
+
+ cmd = ('{ns} ip link set dev {interface} {trust}'.
+ format(ns=ns_str, interface=interface, trust=trust_str))
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def set_linux_interface_spoof_off(node, interface, namespace=None,
+ vf_id=None):
+ """Set spoof off for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Execute command in namespace. Optional
+ :param vf_id: Virtual Function id. Optional
+ :type node: dict
+ :type interface: str
+ :type namespace: str
+ :type vf_id: int
+ """
+ spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \
+ if vf_id is not None else 'spoof off'
+ ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
+
+ cmd = ('{ns} ip link set dev {interface} {spoof}'.
+ format(ns=ns_str, interface=interface, spoof=spoof_str))
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'):
+ """Init PCI device by creating VFs and bind them to vfio-pci for AVF
+ driver testing on DUT.
+
+ :param node: DUT node.
+ :param ifc_key: Interface key from topology file.
+ :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
+ :param osi_layer: OSI Layer type to initialize TG with.
+ Default value "L2" sets linux interface spoof off.
+ :type node: dict
+ :type ifc_key: str
+ :type numvfs: int
+ :type osi_layer: str
+ :returns: Virtual Function topology interface keys.
+ :rtype: list
+ :raises RuntimeError: If a reason preventing initialization is found.
+ """
+ # Read PCI address and driver.
+ pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
+ pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
+ uio_driver = Topology.get_uio_driver(node)
+ kernel_driver = Topology.get_interface_driver(node, ifc_key)
+ if kernel_driver not in ("i40e", "i40evf"):
+ raise RuntimeError(
+ "AVF needs i40e-compatible driver, not {driver} at node {host}"
+ " ifc {ifc}".format(
+ driver=kernel_driver, host=node["host"], ifc=ifc_key))
+ current_driver = DUTSetup.get_pci_dev_driver(
+ node, pf_pci_addr.replace(':', r'\:'))
+
+ VPPUtil.stop_vpp_service(node)
+ if current_driver != kernel_driver:
+ # PCI device must be re-bound to kernel driver before creating VFs.
+ DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
+ # Stop VPP to prevent deadlock.
+ # Unbind from current driver.
+ DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+ # Bind to kernel driver.
+ DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
+
+ # Initialize PCI VFs.
+ DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
+
+ vf_ifc_keys = []
+ # Set MAC address and bind each virtual function to uio driver.
+ for vf_id in range(numvfs):
+ vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2],
+ pf_mac_addr[3], pf_mac_addr[4],
+ pf_mac_addr[5], "{:02x}".format(vf_id)])
+
+ pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\
+ format(pci=pf_pci_addr)
+ InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
+ vf_id=vf_id)
+ if osi_layer == 'L2':
+ InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
+ vf_id=vf_id)
+ InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
+ vf_id=vf_id)
+
+ DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
+ DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
+
+ # Add newly created ports into topology file
+ vf_ifc_name = '{pf_if_key}_vf'.format(pf_if_key=ifc_key)
+ vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
+ vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
+ Topology.update_interface_name(node, vf_ifc_key,
+ vf_ifc_name+str(vf_id+1))
+ Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
+ Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
+ vf_ifc_keys.append(vf_ifc_key)
+
+ return vf_ifc_keys
+
+ @staticmethod
+ def vpp_sw_interface_rx_placement_dump(node):
+ """Dump VPP interface RX placement on node.
+
+ :param node: Node to run command on.
+ :type node: dict
+ :returns: Thread mapping information as a list of dictionaries.
+ :rtype: list
+ """
+ cmd = 'sw_interface_rx_placement_dump'
+ err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
+ cmd=cmd, host=node['host'])
+ with PapiSocketExecutor(node) as papi_exec:
+ for ifc in node['interfaces'].values():
+ if ifc['vpp_sw_index'] is not None:
+ papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
+ details = papi_exec.get_details(err_msg)
+ return sorted(details, key=lambda k: k['sw_if_index'])
+
+ @staticmethod
+ def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
+ worker_id):
+ """Set interface RX placement to worker on node.
+
+ :param node: Node to run command on.
+ :param sw_if_index: VPP SW interface index.
+ :param queue_id: VPP interface queue ID.
+ :param worker_id: VPP worker ID (indexing from 0).
+ :type node: dict
+ :type sw_if_index: int
+ :type queue_id: int
+ :type worker_id: int
+ :raises RuntimeError: If failed to run command on host or if no API
+ reply received.
+ """
+ cmd = 'sw_interface_set_rx_placement'
+ err_msg = "Failed to set interface RX placement to worker on host " \
+ "{host}!".format(host=node['host'])
+ args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
+ worker_id=worker_id)
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def vpp_round_robin_rx_placement(node, prefix):
+ """Set Round Robin interface RX placement on all worker threads
+ on node.