-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from robot.api import logger
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.L2Util import L2Util
raise ValueError(f"Unknown if_type: {if_type}")
if node[u"type"] == NodeType.DUT:
+ if sw_if_index is None:
+ raise ValueError(
+ f"Interface index for {interface} not assigned by VPP."
+ )
if state == u"up":
flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
elif state == u"down":
except AssertionError as err:
logger.debug(f"Setting MTU failed.\n{err}")
- @staticmethod
- def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
- """Set Ethernet MTU on all interfaces.
-
- :param node: VPP node.
- :param mtu: Ethernet MTU size in Bytes. Default: 9200.
- :type node: dict
- :type mtu: int
- """
- for interface in node[u"interfaces"]:
- InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
-
- @staticmethod
- def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
- """Set Ethernet MTU on all interfaces on all DUTs.
-
- :param nodes: VPP nodes.
- :param mtu: Ethernet MTU size in Bytes. Default: 9200.
- :type nodes: dict
- :type mtu: int
- """
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
- InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
-
@staticmethod
def vpp_node_interfaces_ready_wait(node, retries=15):
"""Wait until all interfaces with admin-up are in link-up state.
:raises RuntimeError: if it is unable to create VxLAN interface on the
node.
"""
- cmd = u"vxlan_add_del_tunnel"
+ cmd = u"vxlan_add_del_tunnel_v3"
args = dict(
is_add=True,
instance=Constants.BITWISE_NON_ZERO,
err_msg = f"Failed to set VXLAN bypass on interface " \
f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_replies(err_msg)
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vxlan_dump(node, interface=None):
return ifc_name, sw_if_index
+ @staticmethod
+ def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
+ """Create GTPU interface and return sw if index of created interface.
+
+ :param node: Node where to create GTPU interface.
+ :param teid: GTPU Tunnel Endpoint Identifier.
+ :param source_ip: Source IP of a GTPU Tunnel End Point.
+ :param destination_ip: Destination IP of a GTPU Tunnel End Point.
+ :type node: dict
+ :type teid: int
+ :type source_ip: str
+ :type destination_ip: str
+ :returns: SW IF INDEX of created interface.
+ :rtype: int
+ :raises RuntimeError: if it is unable to create GTPU interface on the
+ node.
+ """
+ cmd = u"gtpu_add_del_tunnel"
+ args = dict(
+ is_add=True,
+ src_address=IPAddress.create_ip_address_object(
+ ip_address(source_ip)
+ ),
+ dst_address=IPAddress.create_ip_address_object(
+ ip_address(destination_ip)
+ ),
+ mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+ encap_vrf_id=0,
+ decap_next_index=2,
+ teid=teid
+ )
+ err_msg = f"Failed to create GTPU tunnel interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"gtpu_tunnel")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return sw_if_index
+
+ @staticmethod
+ def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index):
+ """Enable GTPU offload RX onto interface.
+
+ :param node: Node to run command on.
+ :param interface: Name of the specific interface.
+ :param gtpu_if_index: Index of GTPU tunnel interface.
+
+ :type node: dict
+ :type interface: str
+ :type gtpu_interface: int
+ """
+ sw_if_index = Topology.get_interface_sw_index(node, interface)
+
+ cmd = u"gtpu_offload_rx"
+ args = dict(
+ hw_if_index=sw_if_index,
+ sw_if_index=gtpu_if_index,
+ enable=True
+ )
+
+ err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
@staticmethod
def vpp_create_loopback(node, mac=None):
"""Create loopback interface on VPP node.
txq_size=txq_size
)
err_msg = f"Failed to create AVF interface on host {node[u'host']}"
- with PapiSocketExecutor(node) as papi_exec:
- sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ # FIXME: Remove once the fw/driver is upgraded.
+ for _ in range(10):
+ with PapiSocketExecutor(node) as papi_exec:
+ try:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
+ err_msg
+ )
+ break
+ except AssertionError:
+ logger.error(err_msg)
+ else:
+ raise AssertionError(err_msg)
InterfaceUtil.add_eth_interface(
node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
node, u"set logging class af_xdp level debug"
)
- cmd = u"af_xdp_create"
+ cmd = u"af_xdp_create_v2"
pci_addr = Topology.get_interface_pci_addr(node, if_key)
args = dict(
name=InterfaceUtil.pci_to_eth(node, pci_addr),
node, u"set logging class rdma level debug"
)
- cmd = u"rdma_create_v2"
+ cmd = u"rdma_create_v3"
pci_addr = Topology.get_interface_pci_addr(node, if_key)
args = dict(
name=InterfaceUtil.pci_to_eth(node, pci_addr),
# Note: Set True for non-jumbo packets.
no_multi_seg=False,
max_pktlen=0,
+ # TODO: Apply desired RSS flags.
)
err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
cmd = f"{ns_str} ip link set {interface} {mac_str}"
exec_cmd_no_error(node, cmd, sudo=True)
+ @staticmethod
+ def set_linux_interface_promisc(
+ node, interface, namespace=None, vf_id=None, state=u"on"):
+ """Set promisc state for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Exec command in namespace. (Optional, Default: None)
+ :param vf_id: Virtual Function id. (Optional, Default: None)
+ :param state: State of feature. (Optional, Default: on)
+ :type node: dict
+ :type interface: str
+ :type namespace: str
+ :type vf_id: int
+ :type state: str
+ """
+ promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
+ else f"promisc {state}"
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
+
+ cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
@staticmethod
def set_linux_interface_trust_on(
node, interface, namespace=None, vf_id=None):
)
elif driver == u"af_xdp":
if kernel_driver not in (
- u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"):
+ u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
+ u"ixgbe"):
raise RuntimeError(
- f"AF_XDP needs ice or i40e or rdma compatible driver, not "
+ f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
)
vf_keys = InterfaceUtil.init_generic_interface(
node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
)
+ elif driver == u"rdma-core":
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
return vf_keys
@staticmethod
kernel_driver = Topology.get_interface_driver(node, ifc_key)
current_driver = DUTSetup.get_pci_dev_driver(
node, pf_pci_addr.replace(u":", r"\:"))
+ pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
VPPUtil.stop_vpp_service(node)
if current_driver != kernel_driver:
# PCI device must be re-bound to kernel driver before creating VFs.
DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
# Stop VPP to prevent deadlock.
- # Unbind from current driver.
- DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+ # Unbind from current driver if bound.
+ if current_driver:
+ DUTSetup.pci_driver_unbind(node, pf_pci_addr)
# Bind to kernel driver.
DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
# Initialize PCI VFs.
DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
+ if not numvfs:
+ if osi_layer == u"L2":
+ InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
+
vf_ifc_keys = []
# Set MAC address and bind each virtual function to uio driver.
for vf_id in range(numvfs):
]
)
- pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
InterfaceUtil.set_linux_interface_trust_on(
node, pf_dev, vf_id=vf_id
)
@staticmethod
def vpp_round_robin_rx_placement(
- node, prefix, dp_worker_limit=None):
+ node, prefix, workers=None):
"""Set Round Robin interface RX placement on all worker threads
on node.
- If specified, dp_core_limit limits the number of physical cores used
+ If specified, workers limits the number of physical cores used
for data plane I/O work. Other cores are presumed to do something else,
e.g. asynchronous crypto processing.
None means all workers are used for data plane work.
- Note this keyword specifies workers, not cores.
:param node: Topology nodes.
:param prefix: Interface name prefix.
- :param dp_worker_limit: How many cores for data plane work.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
:type node: dict
:type prefix: str
- :type dp_worker_limit: Optional[int]
+ :type workers: str
"""
- worker_id = 0
- worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
- if dp_worker_limit is not None:
- worker_cnt = min(worker_cnt, dp_worker_limit)
+ thread_data = VPPUtil.vpp_show_threads(node)
+ worker_cnt = len(thread_data) - 1
if not worker_cnt:
- return
+ return None
+ worker_ids = list()
+ if workers:
+ for item in thread_data:
+ if str(item.cpu_id) in workers.split(u","):
+ worker_ids.append(item.id)
+ else:
+ for item in thread_data:
+ if u"vpp_main" not in item.name:
+ worker_ids.append(item.id)
+
+ worker_idx = 0
for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
for interface in node[u"interfaces"].values():
if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
and prefix in interface[u"name"]:
InterfaceUtil.vpp_sw_interface_set_rx_placement(
node, placement[u"sw_if_index"], placement[u"queue_id"],
- worker_id % worker_cnt
+ worker_ids[worker_idx % len(worker_ids)] - 1
)
- worker_id += 1
+ worker_idx += 1
@staticmethod
def vpp_round_robin_rx_placement_on_all_duts(
- nodes, prefix, dp_core_limit=None):
- """Set Round Robin interface RX placement on all worker threads
+ nodes, prefix, workers=None):
+ """Set Round Robin interface RX placement on worker threads
on all DUTs.
- If specified, dp_core_limit limits the number of physical cores used
+ If specified, workers limits the number of physical cores used
for data plane I/O work. Other cores are presumed to do something else,
e.g. asynchronous crypto processing.
None means all cores are used for data plane work.
- Note this keyword specifies cores, not workers.
:param nodes: Topology nodes.
:param prefix: Interface name prefix.
- :param dp_worker_limit: How many cores for data plane work.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
:type nodes: dict
:type prefix: str
- :type dp_worker_limit: Optional[int]
+ :type workers: str
"""
for node in nodes.values():
if node[u"type"] == NodeType.DUT:
- dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt(
- phy_cores=dp_core_limit,
- smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]),
- )
InterfaceUtil.vpp_round_robin_rx_placement(
- node, prefix, dp_worker_limit
+ node, prefix, workers
)