-# Copyright (c) 2016 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Interface util library"""
+"""Interface util library."""
-from time import time, sleep
+from time import sleep
+from enum import IntEnum
+from ipaddress import ip_address
from robot.api import logger
-from resources.libraries.python.ssh import SSH
-from resources.libraries.python.IPUtil import convert_ipv4_netmask_prefix
-from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import NodeType, Topology
-from resources.libraries.python.VatExecutor import VatExecutor, VatTerminal
-from resources.libraries.python.VatJsonUtil import VatJsonUtil
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.DUTSetup import DUTSetup
+from resources.libraries.python.IPAddress import IPAddress
+from resources.libraries.python.L2Util import L2Util
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.parsers.JsonParser import JsonParser
+from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
+from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.VPPUtil import VPPUtil
+
+
+class InterfaceStatusFlags(IntEnum):
+ """Interface status flags."""
+ IF_STATUS_API_FLAG_ADMIN_UP = 1
+ IF_STATUS_API_FLAG_LINK_UP = 2
+
+
+class MtuProto(IntEnum):
+ """MTU protocol."""
+ MTU_PROTO_API_L3 = 0
+ MTU_PROTO_API_IP4 = 1
+ MTU_PROTO_API_IP6 = 2
+ MTU_PROTO_API_MPLS = 3
+ MTU_PROTO_API_N = 4
+
+
+class LinkDuplex(IntEnum):
+ """Link duplex"""
+ LINK_DUPLEX_API_UNKNOWN = 0
+ LINK_DUPLEX_API_HALF = 1
+ LINK_DUPLEX_API_FULL = 2
+
+
+class SubInterfaceFlags(IntEnum):
+ """Sub-interface flags."""
+ SUB_IF_API_FLAG_NO_TAGS = 1
+ SUB_IF_API_FLAG_ONE_TAG = 2
+ SUB_IF_API_FLAG_TWO_TAGS = 4
+ SUB_IF_API_FLAG_DOT1AD = 8
+ SUB_IF_API_FLAG_EXACT_MATCH = 16
+ SUB_IF_API_FLAG_DEFAULT = 32
+ SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
+ SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
+ SUB_IF_API_FLAG_DOT1AH = 256
+
+
+class RxMode(IntEnum):
+ """RX mode"""
+ RX_MODE_API_UNKNOWN = 0
+ RX_MODE_API_POLLING = 1
+ RX_MODE_API_INTERRUPT = 2
+ RX_MODE_API_ADAPTIVE = 3
+ RX_MODE_API_DEFAULT = 4
+
+class IfType(IntEnum):
+ """Interface type"""
+ # A hw interface
+ IF_API_TYPE_HARDWARE = 0
+ # A sub-interface
+ IF_API_TYPE_SUB = 1
+ IF_API_TYPE_P2P = 2
+ IF_API_TYPE_PIPE = 3
-class InterfaceUtil(object):
+
+class LinkBondLoadBalanceAlgo(IntEnum):
+ """Link bonding load balance algorithm."""
+ BOND_API_LB_ALGO_L2 = 0
+ BOND_API_LB_ALGO_L34 = 1
+ BOND_API_LB_ALGO_L23 = 2
+ BOND_API_LB_ALGO_RR = 3
+ BOND_API_LB_ALGO_BC = 4
+ BOND_API_LB_ALGO_AB = 5
+
+
+class LinkBondMode(IntEnum):
+ """Link bonding mode."""
+ BOND_API_MODE_ROUND_ROBIN = 1
+ BOND_API_MODE_ACTIVE_BACKUP = 2
+ BOND_API_MODE_XOR = 3
+ BOND_API_MODE_BROADCAST = 4
+ BOND_API_MODE_LACP = 5
+
+
+class RdmaMode(IntEnum):
+ """RDMA interface mode."""
+ RDMA_API_MODE_AUTO = 0
+ RDMA_API_MODE_IBV = 1
+ RDMA_API_MODE_DV = 2
+
+
+class AfXdpMode(IntEnum):
+ """AF_XDP interface mode."""
+ AF_XDP_API_MODE_AUTO = 0
+ AF_XDP_API_MODE_COPY = 1
+ AF_XDP_API_MODE_ZERO_COPY = 2
+
+
+class InterfaceUtil:
"""General utilities for managing interfaces"""
- __UDEV_IF_RULES_FILE = '/etc/udev/rules.d/10-network.rules'
+ @staticmethod
+ def pci_to_int(pci_str):
+ """Convert PCI address from string format (0000:18:0a.0) to
+ integer representation (169345024).
+
+ :param pci_str: PCI address in string representation.
+ :type pci_str: str
+ :returns: Integer representation of PCI address.
+ :rtype: int
+ """
+ pci = list(pci_str.split(u":")[0:2])
+ pci.extend(pci_str.split(u":")[2].split(u"."))
+
+ return (int(pci[0], 16) | int(pci[1], 16) << 16 |
+ int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
+
+ @staticmethod
+ def pci_to_eth(node, pci_str):
+ """Convert PCI address on DUT to Linux ethernet name.
+
+ :param node: DUT node
+ :param pci_str: PCI address.
+ :type node: dict
+ :type pci_str: str
+ :returns: Ethernet name.
+ :rtype: str
+ """
+ cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
+ try:
+ stdout, _ = exec_cmd_no_error(node, cmd)
+ except RuntimeError:
+ raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
+
+ return stdout.strip()
+
+ @staticmethod
+ def get_interface_index(node, interface):
+ """Get interface sw_if_index from topology file.
+
+ :param node: Node where the interface is.
+ :param interface: Numeric index or name string of a specific interface.
+ :type node: dict
+ :type interface: str or int
+ :returns: SW interface index.
+ :rtype: int
+ """
+ try:
+ sw_if_index = int(interface)
+ except ValueError:
+ sw_if_index = Topology.get_interface_sw_index(node, interface)
+ if sw_if_index is None:
+ sw_if_index = \
+ Topology.get_interface_sw_index_by_name(node, interface)
+ except TypeError as err:
+ raise TypeError(f"Wrong interface format {interface}") from err
+
+ return sw_if_index
@staticmethod
- def set_interface_state(node, interface, state, if_type="key"):
+ def set_interface_state(node, interface, state, if_type=u"key"):
"""Set interface state on a node.
Function can be used for DUTs as well as for TGs.
:raises ValueError: If the state of interface is unexpected.
:raises ValueError: If the node has an unknown node type.
"""
-
- if if_type == "key":
- if isinstance(interface, basestring):
+ if if_type == u"key":
+ if isinstance(interface, str):
sw_if_index = Topology.get_interface_sw_index(node, interface)
iface_name = Topology.get_interface_name(node, interface)
else:
sw_if_index = interface
- elif if_type == "name":
+ elif if_type == u"name":
iface_key = Topology.get_interface_by_name(node, interface)
if iface_key is not None:
sw_if_index = Topology.get_interface_sw_index(node, iface_key)
iface_name = interface
else:
- raise ValueError("if_type unknown: {}".format(if_type))
+ raise ValueError(f"Unknown if_type: {if_type}")
- if node['type'] == NodeType.DUT:
- if state == 'up':
- state = 'admin-up'
- elif state == 'down':
- state = 'admin-down'
+ if node[u"type"] == NodeType.DUT:
+ if state == u"up":
+ flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
+ elif state == u"down":
+ flags = 0
else:
- raise ValueError('Unexpected interface state: {}'.format(state))
- VatExecutor.cmd_from_template(node, 'set_if_state.vat',
- sw_if_index=sw_if_index, state=state)
- elif node['type'] == NodeType.TG or node['type'] == NodeType.VM:
- cmd = 'ip link set {} {}'.format(iface_name, state)
+ raise ValueError(f"Unexpected interface state: {state}")
+ cmd = u"sw_interface_set_flags"
+ err_msg = f"Failed to set interface state on host {node[u'host']}"
+ args = dict(
+ sw_if_index=int(sw_if_index),
+ flags=flags
+ )
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+ elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
+ cmd = f"ip link set {iface_name} {state}"
exec_cmd_no_error(node, cmd, sudo=True)
else:
- raise ValueError('Node {} has unknown NodeType: "{}"'
- .format(node['host'], node['type']))
+ raise ValueError(
+ f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
+ )
+
+ @staticmethod
+ def set_interface_state_pci(
+ node, pf_pcis, namespace=None, state=u"up"):
+ """Set operational state for interface specified by PCI address.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param namespace: Exec command in namespace. (Optional, Default: none)
+ :param state: Up/Down. (Optional, default: up)
+ :type nodes: dict
+ :type pf_pcis: list
+ :type namespace: str
+ :type state: str
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ InterfaceUtil.set_linux_interface_state(
+ node, pf_eth, namespace=namespace, state=state
+ )
@staticmethod
- def set_interface_ethernet_mtu(node, iface_key, mtu):
- """Set Ethernet MTU for specified interface.
+ def set_interface_mtu(node, pf_pcis, mtu=9200):
+ """Set Ethernet MTU for specified interfaces.
- Function can be used only for TGs.
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param mtu: MTU to set. Default: 9200.
+ :type nodes: dict
+ :type pf_pcis: list
+ :type mtu: int
+ :raises RuntimeError: If failed to set MTU on interface.
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ cmd = f"ip link set {pf_eth} mtu {mtu}"
+ exec_cmd_no_error(node, cmd, sudo=True)
- :param node: Node where the interface is.
- :param iface_key: Interface key from topology file.
- :param mtu: MTU to set.
+ @staticmethod
+ def set_interface_channels(
+ node, pf_pcis, num_queues=1, channel=u"combined"):
+ """Set interface channels for specified interfaces.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param num_queues: Number of channels. (Optional, Default: 1)
+ :param channel: Channel type. (Optional, Default: combined)
+ :type nodes: dict
+ :type pf_pcis: list
+ :type num_queues: int
+ :type channel: str
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
+ """Set Ethernet flow control for specified interfaces.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param rxf: RX flow. (Optional, Default: off).
+ :param txf: TX flow. (Optional, Default: off).
+ :type nodes: dict
+ :type pf_pcis: list
+ :type rxf: str
+ :type txf: str
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
+ ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
+ if int(ret_code) not in (0, 78):
+ raise RuntimeError("Failed to set flow control on {pf_eth}!")
+
+ @staticmethod
+ def set_pci_parameter(node, pf_pcis, key, value):
+ """Set PCI parameter for specified interfaces.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param key: Key to set.
+ :param value: Value to set.
+ :type nodes: dict
+ :type pf_pcis: list
+ :type key: str
+ :type value: str
+ """
+ for pf_pci in pf_pcis:
+ cmd = f"setpci -s {pf_pci} {key}={value}"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def vpp_set_interface_mtu(node, interface, mtu=9200):
+ """Set Ethernet MTU on interface.
+
+ :param node: VPP node.
+ :param interface: Interface to setup MTU. Default: 9200.
+ :param mtu: Ethernet MTU size in Bytes.
:type node: dict
- :type iface_key: str
+ :type interface: str or int
:type mtu: int
- :returns: Nothing.
- :raises ValueError: If the node type is "DUT".
- :raises ValueError: If the node has an unknown node type.
"""
- if node['type'] == NodeType.DUT:
- raise ValueError('Node {}: Setting Ethernet MTU for interface '
- 'on DUT nodes not supported', node['host'])
- elif node['type'] == NodeType.TG:
- iface_name = Topology.get_interface_name(node, iface_key)
- cmd = 'ip link set {} mtu {}'.format(iface_name, mtu)
- exec_cmd_no_error(node, cmd, sudo=True)
+ if isinstance(interface, str):
+ sw_if_index = Topology.get_interface_sw_index(node, interface)
else:
- raise ValueError('Node {} has unknown NodeType: "{}"'
- .format(node['host'], node['type']))
+ sw_if_index = interface
- @staticmethod
- def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
- """Set default Ethernet MTU on all interfaces on node.
+ cmd = u"hw_interface_set_mtu"
+ err_msg = f"Failed to set interface MTU on host {node[u'host']}"
+ args = dict(
+ sw_if_index=sw_if_index,
+ mtu=int(mtu)
+ )
+ try:
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+ except AssertionError as err:
+ logger.debug(f"Setting MTU failed.\n{err}")
- Function can be used only for TGs.
+ @staticmethod
+ def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
+ """Set Ethernet MTU on all interfaces.
- :param node: Node where to set default MTU.
+ :param node: VPP node.
+ :param mtu: Ethernet MTU size in Bytes. Default: 9200.
:type node: dict
- :returns: Nothing.
+ :type mtu: int
+ """
+ for interface in node[u"interfaces"]:
+ InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
+
+ @staticmethod
+ def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
+ """Set Ethernet MTU on all interfaces on all DUTs.
+
+ :param nodes: VPP nodes.
+ :param mtu: Ethernet MTU size in Bytes. Default: 9200.
+ :type nodes: dict
+ :type mtu: int
"""
- for ifc in node['interfaces']:
- InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
@staticmethod
- def vpp_node_interfaces_ready_wait(node, timeout=10):
+ def vpp_node_interfaces_ready_wait(node, retries=15):
"""Wait until all interfaces with admin-up are in link-up state.
:param node: Node to wait on.
- :param timeout: Waiting timeout in seconds (optional, default 10s).
+ :param retries: Number of retries to check interface status (optional,
+ default 15).
:type node: dict
- :type timeout: int
+ :type retries: int
:returns: Nothing.
- :raises: RuntimeError if the timeout period value has elapsed.
+ :raises RuntimeError: If any interface is not in link-up state after
+ defined number of retries.
"""
- if_ready = False
- not_ready = []
- start = time()
- while not if_ready:
+ for _ in range(0, retries):
+ not_ready = list()
out = InterfaceUtil.vpp_get_interface_data(node)
- if time() - start > timeout:
- for interface in out:
- if interface.get('admin_up_down') == 1:
- if interface.get('link_up_down') != 1:
- logger.debug('{0} link-down'.format(
- interface.get('interface_name')))
- raise RuntimeError('timeout, not up {0}'.format(not_ready))
- not_ready = []
for interface in out:
- if interface.get('admin_up_down') == 1:
- if interface.get('link_up_down') != 1:
- not_ready.append(interface.get('interface_name'))
- if not not_ready:
- if_ready = True
- else:
- logger.debug('Interfaces still in link-down state: {0}, '
- 'waiting...'.format(not_ready))
+ if interface.get(u"flags") == 1:
+ not_ready.append(interface.get(u"interface_name"))
+ if not_ready:
+ logger.debug(
+ f"Interfaces still not in link-up state:\n{not_ready}"
+ )
sleep(1)
+ else:
+ break
+ else:
+ err = f"Timeout, interfaces not up:\n{not_ready}" \
+ if u"not_ready" in locals() else u"No check executed!"
+ raise RuntimeError(err)
@staticmethod
- def vpp_nodes_interfaces_ready_wait(nodes, timeout=10):
- """Wait until all interfaces with admin-up are in link-up state for
- listed nodes.
-
- :param nodes: List of nodes to wait on.
- :param timeout: Seconds to wait per node for all interfaces to come up.
- :type nodes: list
- :type timeout: int
- :returns: Nothing.
- """
- for node in nodes:
- InterfaceUtil.vpp_node_interfaces_ready_wait(node, timeout)
-
- @staticmethod
- def all_vpp_interfaces_ready_wait(nodes, timeout=10):
+ def all_vpp_interfaces_ready_wait(nodes, retries=15):
"""Wait until all interfaces with admin-up are in link-up state for all
nodes in the topology.
:param nodes: Nodes in the topology.
- :param timeout: Seconds to wait per node for all interfaces to come up.
+ :param retries: Number of retries to check interface status (optional,
+ default 15).
:type nodes: dict
- :type timeout: int
+ :type retries: int
:returns: Nothing.
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- InterfaceUtil.vpp_node_interfaces_ready_wait(node, timeout)
+ if node[u"type"] == NodeType.DUT:
+ InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
@staticmethod
def vpp_get_interface_data(node, interface=None):
"""Get all interface data from a VPP node. If a name or
sw_interface_index is provided, return only data for the matching
- interface.
+ interface(s).
:param node: VPP node to get interface data from.
:param interface: Numeric index or name string of a specific interface.
:type node: dict
:type interface: int or str
:returns: List of dictionaries containing data for each interface, or a
- single dictionary for the specified interface.
+ single dictionary for the specified interface.
:rtype: list or dict
:raises TypeError: if the data type of interface is neither basestring
- nor int.
+ nor int.
"""
- with VatTerminal(node) as vat:
- response = vat.vat_terminal_exec_cmd_from_template(
- "interface_dump.vat")
-
- data = response[0]
+ def process_if_dump(if_dump):
+ """Process interface dump.
+
+ :param if_dump: Interface dump.
+ :type if_dump: dict
+ :returns: Processed interface dump.
+ :rtype: dict
+ """
+ if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
+ if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
+ if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
+ if_dump[u"flags"] = if_dump[u"flags"].value
+ if_dump[u"type"] = if_dump[u"type"].value
+ if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
+ if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
+ if hasattr(if_dump[u"sub_if_flags"], u"value") \
+ else int(if_dump[u"sub_if_flags"])
+
+ return if_dump
if interface is not None:
- if isinstance(interface, basestring):
- param = "interface_name"
+ if isinstance(interface, str):
+ param = u"interface_name"
elif isinstance(interface, int):
- param = "sw_if_index"
+ param = u"sw_if_index"
else:
- raise TypeError
- for data_if in data:
- if data_if[param] == interface:
- return data_if
- return dict()
+ raise TypeError(f"Wrong interface format {interface}")
+ else:
+ param = u""
+
+ cmd = u"sw_interface_dump"
+ args = dict(
+ name_filter_valid=False,
+ name_filter=u""
+ )
+ err_msg = f"Failed to get interface dump on host {node[u'host']}"
+
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd, **args).get_details(err_msg)
+ logger.debug(f"Received data:\n{details!r}")
+
+ data = list() if interface is None else dict()
+ for dump in details:
+ if interface is None:
+ data.append(process_if_dump(dump))
+ elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
+ data = process_if_dump(dump)
+ break
+
+ logger.debug(f"Interface data:\n{data}")
return data
@staticmethod
- def vpp_get_interface_mac(node, interface=None):
+ def vpp_get_interface_name(node, sw_if_index):
+ """Get interface name for the given SW interface index from actual
+ interface dump.
+
+ :param node: VPP node to get interface data from.
+ :param sw_if_index: SW interface index of the specific interface.
+ :type node: dict
+ :type sw_if_index: int
+ :returns: Name of the given interface.
+ :rtype: str
+ """
+ if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
+ if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
+ if_data = InterfaceUtil.vpp_get_interface_data(
+ node, if_data[u"sup_sw_if_index"]
+ )
+
+ return if_data.get(u"interface_name")
+
+ @staticmethod
+ def vpp_get_interface_sw_index(node, interface_name):
+ """Get interface name for the given SW interface index from actual
+ interface dump.
+
+ :param node: VPP node to get interface data from.
+ :param interface_name: Interface name.
+ :type node: dict
+ :type interface_name: str
+ :returns: Name of the given interface.
+ :rtype: str
+ """
+ if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
+
+ return if_data.get(u"sw_if_index")
+
+ @staticmethod
+ def vpp_get_interface_mac(node, interface):
"""Get MAC address for the given interface from actual interface dump.
:param node: VPP node to get interface data from.
:returns: MAC address.
:rtype: str
"""
-
if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
- if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
+ if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
if_data = InterfaceUtil.vpp_get_interface_data(
- node, if_data['sup_sw_if_index'])
- mac_data = [str(hex(item))[2:] for item in if_data['l2_address'][:6]]
- mac_data_nice = []
- for item in mac_data:
- if len(item) == 1:
- item = '0' + item
- mac_data_nice.append(item)
- mac = ":".join(mac_data_nice)
- return mac
-
- @staticmethod
- def vpp_get_interface_ip_addresses(node, interface, ip_version):
- """Get list of IP addresses from an interface on a VPP node.
-
- :param node: VPP node to get data from.
- :param interface: Name of an interface on the VPP node.
- :param ip_version: IP protocol version (ipv4 or ipv6).
- :type node: dict
- :type interface: str
- :type ip_version: str
- :returns: List of dictionaries, each containing IP address, subnet
- prefix length and also the subnet mask for ipv4 addresses.
- Note: A single interface may have multiple IP addresses assigned.
- :rtype: list
- """
- sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
-
- with VatTerminal(node) as vat:
- response = vat.vat_terminal_exec_cmd_from_template(
- "ip_address_dump.vat", ip_version=ip_version,
- sw_if_index=sw_if_index)
-
- data = response[0]
-
- if ip_version == "ipv4":
- for item in data:
- item["netmask"] = convert_ipv4_netmask_prefix(
- item["prefix_length"])
- return data
+ node, if_data[u"sup_sw_if_index"])
+
+ return if_data.get(u"l2_address")
+
+ @staticmethod
+ def vpp_set_interface_mac(node, interface, mac):
+ """Set MAC address for the given interface.
+
+ :param node: VPP node to set interface MAC.
+ :param interface: Numeric index or name string of a specific interface.
+ :param mac: Required MAC address.
+ :type node: dict
+ :type interface: int or str
+ :type mac: str
+ """
+ cmd = u"sw_interface_set_mac_address"
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ mac_address=L2Util.mac_to_bin(mac)
+ )
+ err_msg = f"Failed to set MAC address of interface {interface}" \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def tg_set_interface_driver(node, pci_addr, driver):
:type node: dict
:type pci_addr: str
:type driver: str
- :returns: None.
:raises RuntimeError: If unbinding from the current driver fails.
:raises RuntimeError: If binding to the new driver fails.
"""
# Unbind from current driver
if old_driver is not None:
- cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\
- .format(pci_addr, old_driver)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ cmd = f"sh -c \"echo {pci_addr} > " \
+ f"/sys/bus/pci/drivers/{old_driver}/unbind\""
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
# Bind to the new driver
- cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\
- .format(pci_addr, driver)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\""
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
@staticmethod
def tg_get_interface_driver(node, pci_addr):
:type pci_addr: str
:returns: Interface driver or None if not found.
:rtype: str
- :raises RuntimeError: If it is not possible to get the interface driver
- information from the node.
-
- .. note::
- # lspci -vmmks 0000:00:05.0
- Slot: 00:05.0
- Class: Ethernet controller
- Vendor: Red Hat, Inc
- Device: Virtio network device
- SVendor: Red Hat, Inc
- SDevice: Device 0001
- PhySlot: 5
- Driver: virtio-pci
- """
- ssh = SSH()
- ssh.connect(node)
-
- cmd = 'lspci -vmmks {0}'.format(pci_addr)
-
- (ret_code, stdout, _) = ssh.exec_command(cmd)
- if int(ret_code) != 0:
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
-
- for line in stdout.splitlines():
- if len(line) == 0:
- continue
- (name, value) = line.split("\t", 1)
- if name == 'Driver:':
- return value
-
- return None
-
- @staticmethod
- def tg_set_interfaces_udev_rules(node):
- """Set udev rules for interfaces.
-
- Create udev rules file in /etc/udev/rules.d where are rules for each
- interface used by TG node, based on MAC interface has specific name.
- So after unbind and bind again to kernel driver interface has same
- name as before. This must be called after TG has set name for each
- port in topology dictionary.
- udev rule example
- SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
- NAME="eth1"
-
- :param node: Node to set udev rules on (must be TG node).
- :type node: dict
- :raises RuntimeError: If setting of udev rules fails.
+ :raises RuntimeError: If PCI rescan or lspci command execution failed.
"""
- ssh = SSH()
- ssh.connect(node)
-
- cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
- if int(ret_code) != 0:
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
-
- for interface in node['interfaces'].values():
- rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
- '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \
- interface['name'] + '\\"'
- cmd = 'sh -c "echo \'{0}\' >> {1}"'.format(
- rule, InterfaceUtil.__UDEV_IF_RULES_FILE)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
- if int(ret_code) != 0:
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
-
- cmd = '/etc/init.d/udev restart'
- ssh.exec_command_sudo(cmd)
+ return DUTSetup.get_pci_dev_driver(node, pci_addr)
@staticmethod
def tg_set_interfaces_default_driver(node):
:param node: Node to setup interfaces driver on (must be TG node).
:type node: dict
"""
- for interface in node['interfaces'].values():
- InterfaceUtil.tg_set_interface_driver(node,
- interface['pci_address'],
- interface['driver'])
+ for interface in node[u"interfaces"].values():
+ InterfaceUtil.tg_set_interface_driver(
+ node, interface[u"pci_address"], interface[u"driver"]
+ )
@staticmethod
def update_vpp_interface_data_on_node(node):
Updates interface names, software if index numbers and any other details
generated specifically by vpp that are unknown before testcase run.
- It does this by dumping interface list to JSON output from all
- devices using vpp_api_test, and pairing known information from topology
- (mac address/pci address of interface) to state from VPP.
+ It does this by dumping interface list from all devices using python
+ api, and pairing known information from topology (mac address) to state
+ from VPP.
:param node: Node selected from DICT__nodes.
:type node: dict
"""
- vat_executor = VatExecutor()
- vat_executor.execute_script_json_out("dump_interfaces.vat", node)
- interface_dump_json = vat_executor.get_script_stdout()
- VatJsonUtil.update_vpp_interface_data_from_json(node,
- interface_dump_json)
+ interface_list = InterfaceUtil.vpp_get_interface_data(node)
+ interface_dict = dict()
+ for ifc in interface_list:
+ interface_dict[ifc[u"l2_address"]] = ifc
+
+ for if_name, if_data in node[u"interfaces"].items():
+ ifc_dict = interface_dict.get(if_data[u"mac_address"])
+ if ifc_dict is not None:
+ if_data[u"name"] = ifc_dict[u"interface_name"]
+ if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
+ if_data[u"mtu"] = ifc_dict[u"mtu"][0]
+ logger.trace(
+ f"Interface {if_name} found by MAC "
+ f"{if_data[u'mac_address']}"
+ )
+ else:
+ logger.trace(
+ f"Interface {if_name} not found by MAC "
+ f"{if_data[u'mac_address']}"
+ )
+ if_data[u"vpp_sw_index"] = None
@staticmethod
- def update_tg_interface_data_on_node(node):
- """Update interface name for TG/linux node in DICT__nodes.
+ def update_nic_interface_names(node):
+ """Update interface names based on nic type and PCI address.
- :param node: Node selected from DICT__nodes.
+ This method updates interface names in the same format as VPP does.
+
+ :param node: Node dictionary.
:type node: dict
- :raises RuntimeError: If getting of interface name and MAC fails.
+ """
+ for ifc in node[u"interfaces"].values():
+ if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
+ loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
+ f"{int(if_pci[3], 16):x}"
+ if ifc[u"model"] == u"Intel-XL710":
+ ifc[u"name"] = f"FortyGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Intel-X710":
+ ifc[u"name"] = f"TenGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Intel-X520-DA2":
+ ifc[u"name"] = f"TenGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Cisco-VIC-1385":
+ ifc[u"name"] = f"FortyGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Cisco-VIC-1227":
+ ifc[u"name"] = f"TenGigabitEthernet{loc}"
+ else:
+ ifc[u"name"] = f"UnknownEthernet{loc}"
+
+ @staticmethod
+ def update_nic_interface_names_on_all_duts(nodes):
+ """Update interface names based on nic type and PCI address on all DUTs.
+
+ This method updates interface names in the same format as VPP does.
+
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ InterfaceUtil.update_nic_interface_names(node)
+
+ @staticmethod
+ def update_tg_interface_data_on_node(node):
+ """Update interface name for TG/linux node in DICT__nodes.
.. note::
# for dev in `ls /sys/class/net/`;
"52:54:00:e1:8a:0f": "eth2"
"00:00:00:00:00:00": "lo"
- .. todo:: parse lshw -json instead
+ :param node: Node selected from DICT__nodes.
+ :type node: dict
+ :raises RuntimeError: If getting of interface name and MAC fails.
"""
# First setup interface driver specified in yaml file
InterfaceUtil.tg_set_interfaces_default_driver(node)
ssh = SSH()
ssh.connect(node)
- cmd = ('for dev in `ls /sys/class/net/`; do echo "\\"`cat '
- '/sys/class/net/$dev/address`\\": \\"$dev\\""; done;')
+ cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
+ u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
- (ret_code, stdout, _) = ssh.exec_command(cmd)
+ ret_code, stdout, _ = ssh.exec_command(cmd)
if int(ret_code) != 0:
- raise RuntimeError('Get interface name and MAC failed')
- tmp = "{" + stdout.rstrip().replace('\n', ',') + "}"
+ raise RuntimeError(u"Get interface name and MAC failed")
+ tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
+
interfaces = JsonParser().parse_data(tmp)
- for interface in node['interfaces'].values():
- name = interfaces.get(interface['mac_address'])
+ for interface in node[u"interfaces"].values():
+ name = interfaces.get(interface[u"mac_address"])
if name is None:
continue
- interface['name'] = name
-
- # Set udev rules for interfaces
- InterfaceUtil.tg_set_interfaces_udev_rules(node)
+ interface[u"name"] = name
@staticmethod
def iface_update_numa_node(node):
:type node: dict
:returns: Nothing.
:raises ValueError: If numa node ia less than 0.
- :raises RuntimeError: If update of numa node failes.
+ :raises RuntimeError: If update of numa node failed.
"""
ssh = SSH()
for if_key in Topology.get_node_interfaces(node):
if_pci = Topology.get_interface_pci_addr(node, if_key)
ssh.connect(node)
- cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci)
+ cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
for _ in range(3):
- (ret, out, _) = ssh.exec_command(cmd)
+ ret, out, _ = ssh.exec_command(cmd)
if ret == 0:
try:
- numa_node = int(out)
- if numa_node < 0:
- raise ValueError
+ numa_node = 0 if int(out) < 0 else int(out)
except ValueError:
- logger.trace('Reading numa location failed for: {0}'\
- .format(if_pci))
+ logger.trace(
+ f"Reading numa location failed for: {if_pci}"
+ )
else:
- Topology.set_interface_numa_node(node, if_key,
- numa_node)
+ Topology.set_interface_numa_node(
+ node, if_key, numa_node
+ )
break
else:
- raise RuntimeError('Update numa node failed for: {0}'\
- .format(if_pci))
+ raise RuntimeError(f"Update numa node failed for: {if_pci}")
@staticmethod
- def update_all_numa_nodes(nodes, skip_tg=False):
- """For all nodes and all their interfaces from topology file update numa
- node information based on information from the node.
-
- :param nodes: Nodes in the topology.
- :param skip_tg: Skip TG node
- :type nodes: dict
- :type skip_tg: bool
- :returns: Nothing.
- """
- for node in nodes.values():
- if node['type'] == NodeType.DUT:
- InterfaceUtil.iface_update_numa_node(node)
- elif node['type'] == NodeType.TG and not skip_tg:
- InterfaceUtil.iface_update_numa_node(node)
-
- @staticmethod
- def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
- numa_node=False):
+ def update_all_interface_data_on_all_nodes(
+ nodes, skip_tg=False, skip_vpp=False):
"""Update interface names on all nodes in DICT__nodes.
This method updates the topology dictionary by querying interface lists
of all nodes mentioned in the topology dictionary.
:param nodes: Nodes in the topology.
- :param skip_tg: Skip TG node
- :param numa_node: Retrieve numa_node location.
+ :param skip_tg: Skip TG node.
+ :param skip_vpp: Skip VPP node.
:type nodes: dict
:type skip_tg: bool
- :type numa_node: bool
+ :type skip_vpp: bool
"""
- for node_data in nodes.values():
- if node_data['type'] == NodeType.DUT:
- InterfaceUtil.update_vpp_interface_data_on_node(node_data)
- elif node_data['type'] == NodeType.TG and not skip_tg:
- InterfaceUtil.update_tg_interface_data_on_node(node_data)
-
- if numa_node:
- if node_data['type'] == NodeType.DUT:
- InterfaceUtil.iface_update_numa_node(node_data)
- elif node_data['type'] == NodeType.TG and not skip_tg:
- InterfaceUtil.iface_update_numa_node(node_data)
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT and not skip_vpp:
+ InterfaceUtil.update_vpp_interface_data_on_node(node)
+ elif node[u"type"] == NodeType.TG and not skip_tg:
+ InterfaceUtil.update_tg_interface_data_on_node(node)
+ InterfaceUtil.iface_update_numa_node(node)
@staticmethod
def create_vlan_subinterface(node, interface, vlan):
- """Create VLAN subinterface on node.
+ """Create VLAN sub-interface on node.
:param node: Node to add VLAN subinterface on.
- :param interface: Interface name on which create VLAN subinterface.
+ :param interface: Interface name or index on which create VLAN
+ subinterface.
:param vlan: VLAN ID of the subinterface to be created.
:type node: dict
- :type interface: str
+ :type interface: str on int
:type vlan: int
:returns: Name and index of created subinterface.
:rtype: tuple
:raises RuntimeError: if it is unable to create VLAN subinterface on the
- node.
- """
- iface_key = Topology.get_interface_by_name(node, interface)
- sw_if_index = Topology.get_interface_sw_index(node, iface_key)
-
- output = VatExecutor.cmd_from_template(node, "create_vlan_subif.vat",
- sw_if_index=sw_if_index,
- vlan=vlan)
- if output[0]["retval"] == 0:
- sw_subif_index = output[0]["sw_if_index"]
- logger.trace('VLAN subinterface with sw_if_index {} and VLAN ID {} '
- 'created on node {}'.format(sw_subif_index,
- vlan, node['host']))
- else:
- raise RuntimeError('Unable to create VLAN subinterface on node {}'
- .format(node['host']))
+ node or interface cannot be converted.
+ """
+ sw_if_index = InterfaceUtil.get_interface_index(node, interface)
+
+ cmd = u"create_vlan_subif"
+ args = dict(
+ sw_if_index=sw_if_index,
+ vlan_id=int(vlan)
+ )
+ err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
+
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
- with VatTerminal(node, False) as vat:
- vat.vat_terminal_exec_cmd('exec show interfaces')
+ if_key = Topology.add_new_port(node, u"vlan_subif")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
- return '{}.{}'.format(interface, vlan), sw_subif_index
+ return f"{interface}.{vlan}", sw_if_index
@staticmethod
def create_vxlan_interface(node, vni, source_ip, destination_ip):
"""Create VXLAN interface and return sw if index of created interface.
- Executes "vxlan_add_del_tunnel src {src} dst {dst} vni {vni}" VAT
- command on the node.
-
:param node: Node where to create VXLAN interface.
:param vni: VXLAN Network Identifier.
:param source_ip: Source IP of a VXLAN Tunnel End Point.
:returns: SW IF INDEX of created interface.
:rtype: int
:raises RuntimeError: if it is unable to create VxLAN interface on the
- node.
+ node.
"""
- output = VatExecutor.cmd_from_template(node, "vxlan_create.vat",
- src=source_ip,
- dst=destination_ip,
- vni=vni)
- output = output[0]
+ cmd = u"vxlan_add_del_tunnel"
+ args = dict(
+ is_add=True,
+ instance=Constants.BITWISE_NON_ZERO,
+ src_address=IPAddress.create_ip_address_object(
+ ip_address(source_ip)
+ ),
+ dst_address=IPAddress.create_ip_address_object(
+ ip_address(destination_ip)
+ ),
+ mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+ encap_vrf_id=0,
+ decap_next_index=Constants.BITWISE_NON_ZERO,
+ vni=int(vni)
+ )
+ err_msg = f"Failed to create VXLAN tunnel interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"vxlan_tunnel")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return sw_if_index
- if output["retval"] == 0:
- return output["sw_if_index"]
- else:
- raise RuntimeError('Unable to create VXLAN interface on node {0}'
- .format(node))
+ @staticmethod
+ def set_vxlan_bypass(node, interface=None):
+ """Add the 'ip4-vxlan-bypass' graph node for a given interface.
+
+ By adding the IPv4 vxlan-bypass graph node to an interface, the node
+ checks for and validate input vxlan packet and bypass ip4-lookup,
+ ip4-local, ip4-udp-lookup nodes to speedup vxlan packet forwarding.
+ This node will cause extra overhead to for non-vxlan packets which is
+ kept at a minimum.
+
+ :param node: Node where to set VXLAN bypass.
+ :param interface: Numeric index or name string of a specific interface.
+ :type node: dict
+ :type interface: int or str
+ :raises RuntimeError: if it failed to set VXLAN bypass on interface.
+ """
+ sw_if_index = InterfaceUtil.get_interface_index(node, interface)
+
+ cmd = u"sw_interface_set_vxlan_bypass"
+ args = dict(
+ is_ipv6=False,
+ sw_if_index=sw_if_index,
+ enable=True
+ )
+ err_msg = f"Failed to set VXLAN bypass on interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_replies(err_msg)
@staticmethod
def vxlan_dump(node, interface=None):
:param node: VPP node to get interface data from.
:param interface: Numeric index or name string of a specific interface.
- If None, information about all VxLAN interfaces is returned.
+ If None, information about all VxLAN interfaces is returned.
:type node: dict
:type interface: int or str
:returns: Dictionary containing data for the given VxLAN interface or if
- interface=None, the list of dictionaries with all VxLAN interfaces.
+ interface=None, the list of dictionaries with all VxLAN interfaces.
:rtype: dict or list
:raises TypeError: if the data type of interface is neither basestring
- nor int.
- """
- param = "sw_if_index"
- if interface is None:
- param = ''
- sw_if_index = ''
- elif isinstance(interface, basestring):
- sw_if_index = Topology.get_interface_sw_index(node, interface)
- elif isinstance(interface, int):
- sw_if_index = interface
- else:
- raise TypeError("Wrong interface format {0}".format(interface))
-
- with VatTerminal(node) as vat:
- response = vat.vat_terminal_exec_cmd_from_template(
- "vxlan_dump.vat", param=param, sw_if_index=sw_if_index)
-
- if sw_if_index:
- for vxlan in response[0]:
- if vxlan["sw_if_index"] == sw_if_index:
- return vxlan
- return {}
- return response[0]
-
- @staticmethod
- def vhost_user_dump(node):
- """Get vhost-user data for the given node.
-
- :param node: VPP node to get interface data from.
- :type node: dict
- :returns: List of dictionaries with all vhost-user interfaces.
- :rtype: list
+ nor int.
"""
- with VatTerminal(node) as vat:
- response = vat.vat_terminal_exec_cmd_from_template(
- "vhost_user_dump.vat")
-
- return response[0]
+ def process_vxlan_dump(vxlan_dump):
+ """Process vxlan dump.
+
+ :param vxlan_dump: Vxlan interface dump.
+ :type vxlan_dump: dict
+ :returns: Processed vxlan interface dump.
+ :rtype: dict
+ """
+ vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
+ vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
+ return vxlan_dump
- @staticmethod
- def tap_dump(node, name=None):
- """Get all TAP interface data from the given node, or data about
- a specific TAP interface.
-
- :param node: VPP node to get data from.
- :param name: Optional name of a specific TAP interface.
- :type node: dict
- :type name: str
- :returns: Dictionary of information about a specific TAP interface, or
- a List of dictionaries containing all TAP data for the given node.
- :rtype: dict or list
- """
- with VatTerminal(node) as vat:
- response = vat.vat_terminal_exec_cmd_from_template(
- "tap_dump.vat")
- if name is None:
- return response[0]
+ if interface is not None:
+ sw_if_index = InterfaceUtil.get_interface_index(node, interface)
else:
- for item in response[0]:
- if name == item['dev_name']:
- return item
- return {}
+ sw_if_index = int(Constants.BITWISE_NON_ZERO)
+
+ cmd = u"vxlan_tunnel_dump"
+ args = dict(
+ sw_if_index=sw_if_index
+ )
+ err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
+
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd, **args).get_details(err_msg)
+
+ data = list() if interface is None else dict()
+ for dump in details:
+ if interface is None:
+ data.append(process_vxlan_dump(dump))
+ elif dump[u"sw_if_index"] == sw_if_index:
+ data = process_vxlan_dump(dump)
+ break
+
+ logger.debug(f"VXLAN data:\n{data}")
+ return data
@staticmethod
- def create_subinterface(node, interface, sub_id, outer_vlan_id=None,
- inner_vlan_id=None, type_subif=None):
+ def create_subinterface(
+ node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
+ type_subif=None):
"""Create sub-interface on node. It is possible to set required
sub-interface type and VLAN tag(s).
:param outer_vlan_id: Optional outer VLAN ID.
:param inner_vlan_id: Optional inner VLAN ID.
:param type_subif: Optional type of sub-interface. Values supported by
- VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match] [default_sub]
+ VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
+ [default_sub]
:type node: dict
:type interface: str or int
:type sub_id: int
:rtype: tuple
:raises RuntimeError: If it is not possible to create sub-interface.
"""
-
- outer_vlan_id = 'outer_vlan_id {0}'.format(outer_vlan_id)\
- if outer_vlan_id else ''
-
- inner_vlan_id = 'inner_vlan_id {0}'.format(inner_vlan_id)\
- if inner_vlan_id else ''
-
- if type_subif is None:
- type_subif = ''
-
- if isinstance(interface, basestring):
- iface_key = Topology.get_interface_by_name(node, interface)
- sw_if_index = Topology.get_interface_sw_index(node, iface_key)
- else:
- sw_if_index = interface
-
- output = VatExecutor.cmd_from_template(node, "create_sub_interface.vat",
- sw_if_index=sw_if_index,
- sub_id=sub_id,
- outer_vlan_id=outer_vlan_id,
- inner_vlan_id=inner_vlan_id,
- type_subif=type_subif)
-
- if output[0]["retval"] == 0:
- sw_subif_index = output[0]["sw_if_index"]
- logger.trace('Created subinterface with index {}'
- .format(sw_subif_index))
- else:
- raise RuntimeError('Unable to create sub-interface on node {}'
- .format(node['host']))
-
- with VatTerminal(node, json_param=False) as vat:
- vat.vat_terminal_exec_cmd('exec show interfaces')
-
- name = '{}.{}'.format(interface, sub_id)
- return name, sw_subif_index
+ subif_types = type_subif.split()
+
+ flags = 0
+ if u"no_tags" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
+ if u"one_tag" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
+ if u"two_tags" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
+ if u"dot1ad" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
+ if u"exact_match" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
+ if u"default_sub" in subif_types:
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
+ if type_subif == u"default_sub":
+ flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
+ | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
+
+ cmd = u"create_subif"
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ sub_id=int(sub_id),
+ sub_if_flags=flags.value if hasattr(flags, u"value")
+ else int(flags),
+ outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
+ inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
+ )
+ err_msg = f"Failed to create sub-interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"subinterface")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return f"{interface}.{sub_id}", sw_if_index
@staticmethod
def create_gre_tunnel_interface(node, source_ip, destination_ip):
:rtype: tuple
:raises RuntimeError: If unable to create GRE tunnel interface.
"""
- output = VatExecutor.cmd_from_template(node, "create_gre.vat",
- src=source_ip,
- dst=destination_ip)
- output = output[0]
+ cmd = u"gre_tunnel_add_del"
+ tunnel = dict(
+ type=0,
+ instance=Constants.BITWISE_NON_ZERO,
+ src=str(source_ip),
+ dst=str(destination_ip),
+ outer_fib_id=0,
+ session_id=0
+ )
+ args = dict(
+ is_add=1,
+ tunnel=tunnel
+ )
+ err_msg = f"Failed to create GRE tunnel interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"gre_tunnel")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return ifc_name, sw_if_index
- if output["retval"] == 0:
- sw_if_index = output["sw_if_index"]
+ @staticmethod
+ def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
+ """Create GTPU interface and return sw if index of created interface.
- vat_executor = VatExecutor()
- vat_executor.execute_script_json_out("dump_interfaces.vat", node)
- interface_dump_json = vat_executor.get_script_stdout()
- name = VatJsonUtil.get_interface_name_from_json(
- interface_dump_json, sw_if_index)
- return name, sw_if_index
- else:
- raise RuntimeError('Unable to create GRE tunnel on node {}.'
- .format(node))
+ :param node: Node where to create GTPU interface.
+ :param teid: GTPU Tunnel Endpoint Identifier.
+ :param source_ip: Source IP of a GTPU Tunnel End Point.
+ :param destination_ip: Destination IP of a GTPU Tunnel End Point.
+ :type node: dict
+ :type teid: int
+ :type source_ip: str
+ :type destination_ip: str
+ :returns: SW IF INDEX of created interface.
+ :rtype: int
+ :raises RuntimeError: if it is unable to create GTPU interface on the
+ node.
+ """
+ cmd = u"gtpu_add_del_tunnel"
+ args = dict(
+ is_add=True,
+ src_address=IPAddress.create_ip_address_object(
+ ip_address(source_ip)
+ ),
+ dst_address=IPAddress.create_ip_address_object(
+ ip_address(destination_ip)
+ ),
+ mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+ encap_vrf_id=0,
+ decap_next_index=2,
+ teid=teid
+ )
+ err_msg = f"Failed to create GTPU tunnel interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"gtpu_tunnel")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return sw_if_index
@staticmethod
- def vpp_create_loopback(node):
+ def vpp_create_loopback(node, mac=None):
"""Create loopback interface on VPP node.
:param node: Node to create loopback interface on.
+ :param mac: Optional MAC address for loopback interface.
:type node: dict
+ :type mac: str
:returns: SW interface index.
:rtype: int
:raises RuntimeError: If it is not possible to create loopback on the
- node.
+ node.
"""
- out = VatExecutor.cmd_from_template(node, "create_loopback.vat")
- if out[0].get('retval') == 0:
- return out[0].get('sw_if_index')
- else:
- raise RuntimeError('Create loopback failed on node "{}"'
- .format(node['host']))
+ cmd = u"create_loopback_instance"
+ args = dict(
+ mac_address=L2Util.mac_to_bin(mac) if mac else 0,
+ is_specified=False,
+ user_instance=0,
+ )
+ err_msg = f"Failed to create loopback interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"loopback")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+ if mac:
+ mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name)
+ Topology.update_interface_mac_address(node, if_key, mac)
+
+ return sw_if_index
+
+ @staticmethod
+ def vpp_create_bond_interface(
+ node, mode, load_balance=None, mac=None, gso=False):
+ """Create bond interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param mode: Link bonding mode.
+ :param load_balance: Load balance (optional, valid for xor and lacp
+ modes, otherwise ignored). Default: None.
+ :param mac: MAC address to assign to the bond interface (optional).
+ Default: None.
+ :param gso: Enable GSO support (optional). Default: False.
+ :type node: dict
+ :type mode: str
+ :type load_balance: str
+ :type mac: str
+ :type gso: bool
+ :returns: Interface key (name) in topology.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create bond interface on
+ the node.
+ """
+ cmd = u"bond_create2"
+ args = dict(
+ id=int(Constants.BITWISE_NON_ZERO),
+ use_custom_mac=bool(mac is not None),
+ mac_address=L2Util.mac_to_bin(mac) if mac else None,
+ mode=getattr(
+ LinkBondMode,
+ f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
+ ).value,
+ lb=0 if load_balance is None else getattr(
+ LinkBondLoadBalanceAlgo,
+ f"BOND_API_LB_ALGO_{load_balance.upper()}"
+ ).value,
+ numa_only=False,
+ enable_gso=gso
+ )
+ err_msg = f"Failed to create bond interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ InterfaceUtil.add_eth_interface(
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
+ )
+ if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
+
+ return if_key
+
+ @staticmethod
+ def add_eth_interface(
+ node, ifc_name=None, sw_if_index=None, ifc_pfx=None,
+ host_if_key=None):
+ """Add ethernet interface to current topology.
+
+ :param node: DUT node from topology.
+ :param ifc_name: Name of the interface.
+ :param sw_if_index: SW interface index.
+ :param ifc_pfx: Interface key prefix.
+ :param host_if_key: Host interface key from topology file.
+ :type node: dict
+ :type ifc_name: str
+ :type sw_if_index: int
+ :type ifc_pfx: str
+ :type host_if_key: str
+ """
+ if_key = Topology.add_new_port(node, ifc_pfx)
+
+ if ifc_name and sw_if_index is None:
+ sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(
+ node, ifc_name)
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ if sw_if_index and ifc_name is None:
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+ ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)
+ Topology.update_interface_mac_address(node, if_key, ifc_mac)
+ if host_if_key is not None:
+ Topology.set_interface_numa_node(
+ node, if_key, Topology.get_interface_numa_node(
+ node, host_if_key
+ )
+ )
+ Topology.update_interface_pci_address(
+ node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
+ )
+
+ @staticmethod
+ def vpp_create_avf_interface(
+ node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
+ """Create AVF interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param if_key: Interface key from topology file of interface
+ to be bound to i40evf driver.
+ :param num_rx_queues: Number of RX queues.
+ :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
+ :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
+ :type node: dict
+ :type if_key: str
+ :type num_rx_queues: int
+ :type rxq_size: int
+ :type txq_size: int
+ :returns: AVF interface key (name) in topology.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create AVF interface on
+ the node.
+ """
+ PapiSocketExecutor.run_cli_cmd(
+ node, u"set logging class avf level debug"
+ )
+
+ cmd = u"avf_create"
+ vf_pci_addr = Topology.get_interface_pci_addr(node, if_key)
+ args = dict(
+ pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
+ enable_elog=0,
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=rxq_size,
+ txq_size=txq_size
+ )
+ err_msg = f"Failed to create AVF interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ InterfaceUtil.add_eth_interface(
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
+ host_if_key=if_key
+ )
+
+ return Topology.get_interface_by_sw_index(node, sw_if_index)
+
+ @staticmethod
+ def vpp_create_af_xdp_interface(
+ node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
+ mode=u"auto"):
+ """Create AF_XDP interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param if_key: Physical interface key from topology file of interface
+ to be bound to compatible driver.
+ :param num_rx_queues: Number of RX queues. (Optional, Default: none)
+ :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
+ :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
+ :param mode: AF_XDP interface mode. (Optional, Default: auto).
+ :type node: dict
+ :type if_key: str
+ :type num_rx_queues: int
+ :type rxq_size: int
+ :type txq_size: int
+ :type mode: str
+ :returns: Interface key (name) in topology file.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create AF_XDP interface
+ on the node.
+ """
+ PapiSocketExecutor.run_cli_cmd(
+ node, u"set logging class af_xdp level debug"
+ )
+
+ cmd = u"af_xdp_create"
+ pci_addr = Topology.get_interface_pci_addr(node, if_key)
+ args = dict(
+ name=InterfaceUtil.pci_to_eth(node, pci_addr),
+ host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=rxq_size,
+ txq_size=txq_size,
+ mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
+ )
+ err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ InterfaceUtil.vpp_set_interface_mac(
+ node, sw_if_index, Topology.get_interface_mac(node, if_key)
+ )
+ InterfaceUtil.add_eth_interface(
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
+ host_if_key=if_key
+ )
+
+ return Topology.get_interface_by_sw_index(node, sw_if_index)
+
+ @staticmethod
+ def vpp_create_rdma_interface(
+ node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
+ mode=u"auto"):
+ """Create RDMA interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param if_key: Physical interface key from topology file of interface
+ to be bound to rdma-core driver.
+ :param num_rx_queues: Number of RX queues.
+ :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
+ :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
+ :param mode: RDMA interface mode - auto/ibv/dv.
+ :type node: dict
+ :type if_key: str
+ :type num_rx_queues: int
+ :type rxq_size: int
+ :type txq_size: int
+ :type mode: str
+ :returns: Interface key (name) in topology file.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create RDMA interface on
+ the node.
+ """
+ PapiSocketExecutor.run_cli_cmd(
+ node, u"set logging class rdma level debug"
+ )
+
+ cmd = u"rdma_create_v2"
+ pci_addr = Topology.get_interface_pci_addr(node, if_key)
+ args = dict(
+ name=InterfaceUtil.pci_to_eth(node, pci_addr),
+ host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=rxq_size,
+ txq_size=txq_size,
+ mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
+ # Note: Set True for non-jumbo packets.
+ no_multi_seg=False,
+ max_pktlen=0,
+ )
+ err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ InterfaceUtil.vpp_set_interface_mac(
+ node, sw_if_index, Topology.get_interface_mac(node, if_key)
+ )
+ InterfaceUtil.add_eth_interface(
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
+ host_if_key=if_key
+ )
+
+ return Topology.get_interface_by_sw_index(node, sw_if_index)
+
+ @staticmethod
+ def vpp_add_bond_member(node, interface, bond_if):
+ """Add member interface to bond interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param interface: Physical interface key from topology file.
+ :param bond_if: Load balance
+ :type node: dict
+ :type interface: str
+ :type bond_if: str
+ :raises RuntimeError: If it is not possible to add member to bond
+ interface on the node.
+ """
+ cmd = u"bond_add_member"
+ args = dict(
+ sw_if_index=Topology.get_interface_sw_index(node, interface),
+ bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
+ is_passive=False,
+ is_long_timeout=False
+ )
+ err_msg = f"Failed to add member {interface} to bond interface " \
+ f"{bond_if} on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def vpp_show_bond_data_on_node(node, verbose=False):
+ """Show (detailed) bond information on VPP node.
+
+ :param node: DUT node from topology.
+ :param verbose: If detailed information is required or not.
+ :type node: dict
+ :type verbose: bool
+ """
+ cmd = u"sw_bond_interface_dump"
+ err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
+
+ data = f"Bond data on node {node[u'host']}:\n"
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd).get_details(err_msg)
+
+ for bond in details:
+ data += f"{bond[u'interface_name']}\n"
+ data += u" mode: {m}\n".format(
+ m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
+ )
+ data += u" load balance: {lb}\n".format(
+ lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
+ )
+ data += f" number of active members: {bond[u'active_members']}\n"
+ if verbose:
+ member_data = InterfaceUtil.vpp_bond_member_dump(
+ node, Topology.get_interface_by_sw_index(
+ node, bond[u"sw_if_index"]
+ )
+ )
+ for member in member_data:
+ if not member[u"is_passive"]:
+ data += f" {member[u'interface_name']}\n"
+ data += f" number of members: {bond[u'members']}\n"
+ if verbose:
+ for member in member_data:
+ data += f" {member[u'interface_name']}\n"
+ data += f" interface id: {bond[u'id']}\n"
+ data += f" sw_if_index: {bond[u'sw_if_index']}\n"
+ logger.info(data)
+
+ @staticmethod
+ def vpp_bond_member_dump(node, interface):
+ """Get bond interface slave(s) data on VPP node.
+
+ :param node: DUT node from topology.
+ :param interface: Physical interface key from topology file.
+ :type node: dict
+ :type interface: str
+ :returns: Bond slave interface data.
+ :rtype: dict
+ """
+ cmd = u"sw_member_interface_dump"
+ args = dict(
+ sw_if_index=Topology.get_interface_sw_index(node, interface)
+ )
+ err_msg = f"Failed to get slave dump on host {node[u'host']}"
+
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd, **args).get_details(err_msg)
+
+ logger.debug(f"Member data:\n{details}")
+ return details
@staticmethod
- def vpp_enable_input_acl_interface(node, interface, ip_version,
- table_index):
+ def vpp_show_bond_data_on_all_nodes(nodes, verbose=False):
+ """Show (detailed) bond information on all VPP nodes in DICT__nodes.
+
+ :param nodes: Nodes in the topology.
+ :param verbose: If detailed information is required or not.
+ :type nodes: dict
+ :type verbose: bool
+ """
+ for node_data in nodes.values():
+ if node_data[u"type"] == NodeType.DUT:
+ InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
+
+ @staticmethod
+ def vpp_enable_input_acl_interface(
+ node, interface, ip_version, table_index):
"""Enable input acl on interface.
:param node: VPP node to setup interface for input acl.
:type ip_version: str
:type table_index: int
"""
- if isinstance(interface, basestring):
- sw_if_index = Topology.get_interface_sw_index(node, interface)
- else:
- sw_if_index = interface
-
- with VatTerminal(node) as vat:
- vat.vat_terminal_exec_cmd_from_template("input_acl_int.vat",
- sw_if_index=sw_if_index,
- ip_version=ip_version,
- table_index=table_index)
+ cmd = u"input_acl_set_interface"
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ ip4_table_index=table_index if ip_version == u"ip4"
+ else Constants.BITWISE_NON_ZERO,
+ ip6_table_index=table_index if ip_version == u"ip6"
+ else Constants.BITWISE_NON_ZERO,
+ l2_table_index=table_index if ip_version == u"l2"
+ else Constants.BITWISE_NON_ZERO,
+ is_add=1)
+ err_msg = f"Failed to enable input acl on interface {interface}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def get_interface_classify_table(node, interface):
"""Get name of classify table for the given interface.
+ TODO: Move to Classify.py.
+
:param node: VPP node to get data from.
:param interface: Name or sw_if_index of a specific interface.
:type node: dict
:returns: Classify table name.
:rtype: str
"""
- if isinstance(interface, basestring):
+ if isinstance(interface, str):
sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
else:
sw_if_index = interface
- with VatTerminal(node) as vat:
- data = vat.vat_terminal_exec_cmd_from_template(
- "classify_interface_table.vat",
- sw_if_index=sw_if_index)
- return data[0]
+ cmd = u"classify_table_by_interface"
+ args = dict(
+ sw_if_index=sw_if_index
+ )
+ err_msg = f"Failed to get classify table name by interface {interface}"
+ with PapiSocketExecutor(node) as papi_exec:
+ reply = papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ return reply
@staticmethod
def get_sw_if_index(node, interface_name):
"""Get sw_if_index for the given interface from actual interface dump.
+ FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
+
:param node: VPP node to get interface data from.
:param interface_name: Name of the specific interface.
:type node: dict
:returns: sw_if_index of the given interface.
:rtype: str
"""
-
- with VatTerminal(node) as vat:
- if_data = vat.vat_terminal_exec_cmd_from_template(
- "interface_dump.vat")
- for interface in if_data[0]:
- if interface["interface_name"] == interface_name:
- return interface["sw_if_index"]
-
- return None
+ interface_data = InterfaceUtil.vpp_get_interface_data(
+ node, interface=interface_name
+ )
+ return interface_data.get(u"sw_if_index")
@staticmethod
def vxlan_gpe_dump(node, interface_name=None):
:param node: VPP node to get interface data from.
:param interface_name: Name of the specific interface. If None,
- information about all VxLAN GPE interfaces is returned.
+ information about all VxLAN GPE interfaces is returned.
:type node: dict
:type interface_name: str
:returns: Dictionary containing data for the given VxLAN GPE interface
- or if interface=None, the list of dictionaries with all VxLAN GPE
- interfaces.
+ or if interface=None, the list of dictionaries with all VxLAN GPE
+ interfaces.
:rtype: dict or list
"""
+ def process_vxlan_gpe_dump(vxlan_dump):
+ """Process vxlan_gpe dump.
+
+ :param vxlan_dump: Vxlan_gpe nterface dump.
+ :type vxlan_dump: dict
+ :returns: Processed vxlan_gpe interface dump.
+ :rtype: dict
+ """
+ if vxlan_dump[u"is_ipv6"]:
+ vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
+ vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
+ else:
+ vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
+ vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
+ return vxlan_dump
+
+ if interface_name is not None:
+ sw_if_index = InterfaceUtil.get_interface_index(
+ node, interface_name
+ )
+ else:
+ sw_if_index = int(Constants.BITWISE_NON_ZERO)
+
+ cmd = u"vxlan_gpe_tunnel_dump"
+ args = dict(
+ sw_if_index=sw_if_index
+ )
+ err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd, **args).get_details(err_msg)
+
+ data = list() if interface_name is None else dict()
+ for dump in details:
+ if interface_name is None:
+ data.append(process_vxlan_gpe_dump(dump))
+ elif dump[u"sw_if_index"] == sw_if_index:
+ data = process_vxlan_gpe_dump(dump)
+ break
+
+ logger.debug(f"VXLAN-GPE data:\n{data}")
+ return data
- with VatTerminal(node) as vat:
- vxlan_gpe_data = vat.vat_terminal_exec_cmd_from_template(
- "vxlan_gpe_dump.vat")
-
- if interface_name:
- sw_if_index = InterfaceUtil.get_sw_if_index(node, interface_name)
- if sw_if_index:
- for vxlan_gpe in vxlan_gpe_data[0]:
- if vxlan_gpe["sw_if_index"] == sw_if_index:
- return vxlan_gpe
- return {}
+ @staticmethod
+ def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
+ """Assign VPP interface to specific VRF/FIB table.
- return vxlan_gpe_data[0]
+ :param node: VPP node where the FIB and interface are located.
+ :param interface: Interface to be assigned to FIB.
+ :param table_id: VRF table ID.
+ :param ipv6: Assign to IPv6 table. Default False.
+ :type node: dict
+ :type interface: str or int
+ :type table_id: int
+ :type ipv6: bool
+ """
+ cmd = u"sw_interface_set_table"
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ is_ipv6=ipv6,
+ vrf_id=int(table_id)
+ )
+ err_msg = f"Failed to assign interface {interface} to FIB table"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def vpp_proxy_arp_interface_enable(node, interface):
- """Enable proxy ARP on interface.
+ def set_linux_interface_mac(
+ node, interface, mac, namespace=None, vf_id=None):
+ """Set MAC address for interface in linux.
- :param node: VPP node to enable proxy ARP on interface.
- :param interface: Interface to enable proxy ARP.
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param mac: MAC to be assigned to interface.
+ :param namespace: Execute command in namespace. Optional
+ :param vf_id: Virtual Function id. Optional
:type node: dict
- :type interface: str or int
+ :type interface: str
+ :type mac: str
+ :type namespace: str
+ :type vf_id: int
"""
- if isinstance(interface, basestring):
- sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
- else:
- sw_if_index = interface
+ mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
+ else f"address {mac}"
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
- with VatTerminal(node) as vat:
- vat.vat_terminal_exec_cmd_from_template(
- "proxy_arp_intfc_enable.vat",
- sw_if_index=sw_if_index)
+ cmd = f"{ns_str} ip link set {interface} {mac_str}"
+ exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def vpp_ip_source_check_setup(node, interface):
- """Setup Reverse Path Forwarding source check on interface.
+ def set_linux_interface_promisc(
+ node, interface, namespace=None, vf_id=None, state=u"on"):
+ """Set promisc state for interface in linux.
- :param node: Node to setup RPF source check.
- :param interface: Interface name to setup RPF source check.
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Exec command in namespace. (Optional, Default: None)
+ :param vf_id: Virtual Function id. (Optional, Default: None)
+ :param state: State of feature. (Optional, Default: on)
:type node: dict
:type interface: str
+ :type namespace: str
+ :type vf_id: int
+ :type state: str
"""
- with VatTerminal(node) as vat:
- vat.vat_terminal_exec_cmd_from_template("ip_source_check.vat",
- interface_name=interface)
+ promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
+ else f"promisc {state}"
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
+
+ cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
+ exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
- """Assign VPP interface to specific VRF/FIB table.
+ def set_linux_interface_trust_on(
+ node, interface, namespace=None, vf_id=None):
+ """Set trust on (promisc) for interface in linux.
- :param node: VPP node where the FIB and interface are located.
- :param interface: Interface to be assigned to FIB.
- :param table_id: VRF table ID.
- :param ipv6: Assign to IPv6 table. Default False.
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Execute command in namespace. Optional
+ :param vf_id: Virtual Function id. Optional
:type node: dict
- :type interface: str or int
- :type table_id: int
- :type ipv6: bool
+ :type interface: str
+ :type namespace: str
+ :type vf_id: int
"""
- if isinstance(interface, basestring):
- sw_if_index = Topology.get_interface_sw_index(node, interface)
- else:
- sw_if_index = interface
+ trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
- ipv6 = 'ipv6' if ipv6 else ''
+ cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
+ exec_cmd_no_error(node, cmd, sudo=True)
- with VatTerminal(node) as vat:
- vat.vat_terminal_exec_cmd_from_template("set_fib_to_interface.vat",
- sw_index=sw_if_index,
- vrf=table_id,
- ipv6=ipv6)
+ @staticmethod
+ def set_linux_interface_spoof_off(
+ node, interface, namespace=None, vf_id=None):
+ """Set spoof off for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Execute command in namespace. Optional
+ :param vf_id: Virtual Function id. Optional
+ :type node: dict
+ :type interface: str
+ :type namespace: str
+ :type vf_id: int
+ """
+ spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
+ else u"spoof off"
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
+
+ cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
+ exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def set_linux_interface_mac(node, interface, mac, namespace=None):
- """Set MAC address for interface in linux.
+ def set_linux_interface_state(
+ node, interface, namespace=None, state=u"up"):
+ """Set operational state for interface in linux.
:param node: Node where to execute command.
:param interface: Interface in namespace.
- :param mac: MAC to be assigned to interface.
:param namespace: Execute command in namespace. Optional
+ :param state: Up/Down.
:type node: dict
:type interface: str
- :type mac: str
:type namespace: str
+ :type state: str
"""
- if namespace is not None:
- cmd = 'ip netns exec {} ip link set {} address {}'.format(
- namespace, interface, mac)
- else:
- cmd = 'ip link set {} address {}'.format(interface, mac)
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
+
+ cmd = f"{ns_str} ip link set dev {interface} {state}"
exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
+ """Init PCI device. Check driver compatibility and bind to proper
+ drivers. Optionally create NIC VFs.
+
+ :param node: DUT node.
+ :param ifc_key: Interface key from topology file.
+ :param driver: Base driver to use.
+ :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
+ :param osi_layer: OSI Layer type to initialize TG with.
+ Default value "L2" sets linux interface spoof off.
+ :type node: dict
+ :type ifc_key: str
+ :type driver: str
+ :type numvfs: int
+ :type osi_layer: str
+ :returns: Virtual Function topology interface keys.
+ :rtype: list
+ :raises RuntimeError: If a reason preventing initialization is found.
+ """
+ kernel_driver = Topology.get_interface_driver(node, ifc_key)
+ vf_keys = []
+ if driver == u"avf":
+ if kernel_driver not in (
+ u"ice", u"iavf", u"i40e", u"i40evf"):
+ raise RuntimeError(
+ f"AVF needs ice or i40e compatible driver, not "
+ f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
+ )
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
+ elif driver == u"af_xdp":
+ if kernel_driver not in (
+ u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"):
+ raise RuntimeError(
+ f"AF_XDP needs ice or i40e or rdma compatible driver, not "
+ f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
+ )
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
+ return vf_keys
+
+ @staticmethod
+ def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
+ """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
+
+ :param node: DUT node.
+ :param ifc_key: Interface key from topology file.
+ :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
+ :param osi_layer: OSI Layer type to initialize TG with.
+ Default value "L2" sets linux interface spoof off.
+ :type node: dict
+ :type ifc_key: str
+ :type numvfs: int
+ :type osi_layer: str
+ :returns: Virtual Function topology interface keys.
+ :rtype: list
+ :raises RuntimeError: If a reason preventing initialization is found.
+ """
+ # Read PCI address and driver.
+ pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
+ pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
+ uio_driver = Topology.get_uio_driver(node)
+ kernel_driver = Topology.get_interface_driver(node, ifc_key)
+ current_driver = DUTSetup.get_pci_dev_driver(
+ node, pf_pci_addr.replace(u":", r"\:"))
+ pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
+
+ VPPUtil.stop_vpp_service(node)
+ if current_driver != kernel_driver:
+ # PCI device must be re-bound to kernel driver before creating VFs.
+ DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
+ # Stop VPP to prevent deadlock.
+ # Unbind from current driver.
+ DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+ # Bind to kernel driver.
+ DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
+
+ # Initialize PCI VFs.
+ DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
+
+ if not numvfs:
+ if osi_layer == u"L2":
+ InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
+
+ vf_ifc_keys = []
+ # Set MAC address and bind each virtual function to uio driver.
+ for vf_id in range(numvfs):
+ vf_mac_addr = u":".join(
+ [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
+ pf_mac_addr[5], f"{vf_id:02x}"
+ ]
+ )
+
+ InterfaceUtil.set_linux_interface_trust_on(
+ node, pf_dev, vf_id=vf_id
+ )
+ if osi_layer == u"L2":
+ InterfaceUtil.set_linux_interface_spoof_off(
+ node, pf_dev, vf_id=vf_id
+ )
+ InterfaceUtil.set_linux_interface_mac(
+ node, pf_dev, vf_mac_addr, vf_id=vf_id
+ )
+ InterfaceUtil.set_linux_interface_state(
+ node, pf_dev, state=u"up"
+ )
+
+ DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
+ DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
+
+ # Add newly created ports into topology file
+ vf_ifc_name = f"{ifc_key}_vif"
+ vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
+ vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
+ Topology.update_interface_name(
+ node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
+ )
+ Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
+ Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
+ Topology.set_interface_numa_node(
+ node, vf_ifc_key, Topology.get_interface_numa_node(
+ node, ifc_key
+ )
+ )
+ vf_ifc_keys.append(vf_ifc_key)
+
+ return vf_ifc_keys
+
+ @staticmethod
+ def vpp_sw_interface_rx_placement_dump(node):
+ """Dump VPP interface RX placement on node.
+
+ :param node: Node to run command on.
+ :type node: dict
+ :returns: Thread mapping information as a list of dictionaries.
+ :rtype: list
+ """
+ cmd = u"sw_interface_rx_placement_dump"
+ err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
+ with PapiSocketExecutor(node) as papi_exec:
+ for ifc in node[u"interfaces"].values():
+ if ifc[u"vpp_sw_index"] is not None:
+ papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
+ details = papi_exec.get_details(err_msg)
+ return sorted(details, key=lambda k: k[u"sw_if_index"])
+
+ @staticmethod
+ def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
+ """Dump VPP interface RX placement on all given nodes.
+
+ :param nodes: Nodes to run command on.
+ :type nodes: dict
+ :returns: Thread mapping information as a list of dictionaries.
+ :rtype: list
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
+
+ @staticmethod
+ def vpp_sw_interface_set_rx_placement(
+ node, sw_if_index, queue_id, worker_id):
+ """Set interface RX placement to worker on node.
+
+ :param node: Node to run command on.
+ :param sw_if_index: VPP SW interface index.
+ :param queue_id: VPP interface queue ID.
+ :param worker_id: VPP worker ID (indexing from 0).
+ :type node: dict
+ :type sw_if_index: int
+ :type queue_id: int
+ :type worker_id: int
+ :raises RuntimeError: If failed to run command on host or if no API
+ reply received.
+ """
+ cmd = u"sw_interface_set_rx_placement"
+ err_msg = f"Failed to set interface RX placement to worker " \
+ f"on host {node[u'host']}!"
+ args = dict(
+ sw_if_index=sw_if_index,
+ queue_id=queue_id,
+ worker_id=worker_id,
+ is_main=False
+ )
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def vpp_round_robin_rx_placement(
+ node, prefix, workers=None):
+ """Set Round Robin interface RX placement on all worker threads
+ on node.
+
+ If specified, workers limits the number of physical cores used
+ for data plane I/O work. Other cores are presumed to do something else,
+ e.g. asynchronous crypto processing.
+ None means all workers are used for data plane work.
+
+ :param node: Topology nodes.
+ :param prefix: Interface name prefix.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
+ :type node: dict
+ :type prefix: str
+ :type workers: str
+ """
+ thread_data = VPPUtil.vpp_show_threads(node)
+ worker_cnt = len(thread_data) - 1
+ if not worker_cnt:
+ return None
+ worker_ids = list()
+ if workers:
+ for item in thread_data:
+ if str(item.cpu_id) in workers.split(u","):
+ worker_ids.append(item.id)
+ else:
+ for item in thread_data:
+ if u"vpp_main" not in item.name:
+ worker_ids.append(item.id)
+
+ worker_idx = 0
+ for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
+ for interface in node[u"interfaces"].values():
+ if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
+ and prefix in interface[u"name"]:
+ InterfaceUtil.vpp_sw_interface_set_rx_placement(
+ node, placement[u"sw_if_index"], placement[u"queue_id"],
+ worker_ids[worker_idx % len(worker_ids)] - 1
+ )
+ worker_idx += 1
+
+ @staticmethod
+ def vpp_round_robin_rx_placement_on_all_duts(
+ nodes, prefix, workers=None):
+ """Set Round Robin interface RX placement on worker threads
+ on all DUTs.
+
+ If specified, workers limits the number of physical cores used
+ for data plane I/O work. Other cores are presumed to do something else,
+ e.g. asynchronous crypto processing.
+ None means all cores are used for data plane work.
+
+ :param nodes: Topology nodes.
+ :param prefix: Interface name prefix.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
+ :type nodes: dict
+ :type prefix: str
+ :type workers: str
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ InterfaceUtil.vpp_round_robin_rx_placement(
+ node, prefix, workers
+ )