-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""Interface util library."""
+from json import loads
from time import sleep
from enum import IntEnum
from ipaddress import ip_address
from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.L2Util import L2Util
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
-from resources.libraries.python.parsers.JsonParser import JsonParser
from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
from resources.libraries.python.VPPUtil import VPPUtil
raise ValueError(f"Unknown if_type: {if_type}")
if node[u"type"] == NodeType.DUT:
+ if sw_if_index is None:
+ raise ValueError(
+ f"Interface index for {interface} not assigned by VPP."
+ )
if state == u"up":
flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
elif state == u"down":
cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
exec_cmd_no_error(node, cmd, sudo=True)
+ @staticmethod
+ def set_interface_xdp_off(node, pf_pcis):
+ """Detaches any currently attached XDP/BPF program from the specified
+ interfaces.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :type nodes: dict
+ :type pf_pcis: list
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ cmd = f"ip link set dev {pf_eth} xdp off"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
@staticmethod
def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
"""Set Ethernet flow control for specified interfaces.
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def vpp_set_interface_mtu(node, interface, mtu=9200):
- """Set Ethernet MTU on interface.
+ def vpp_set_interface_mtu(node, interface, mtu):
+ """Apply new MTU value to a VPP hardware interface.
+
+ The interface should be down when this is called.
:param node: VPP node.
- :param interface: Interface to setup MTU. Default: 9200.
+ :param interface: Interface to set MTU on.
:param mtu: Ethernet MTU size in Bytes.
:type node: dict
:type interface: str or int
sw_if_index = Topology.get_interface_sw_index(node, interface)
else:
sw_if_index = interface
-
cmd = u"hw_interface_set_mtu"
err_msg = f"Failed to set interface MTU on host {node[u'host']}"
- args = dict(
- sw_if_index=sw_if_index,
- mtu=int(mtu)
- )
- try:
- with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_reply(err_msg)
- except AssertionError as err:
- logger.debug(f"Setting MTU failed.\n{err}")
-
- @staticmethod
- def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
- """Set Ethernet MTU on all interfaces.
-
- :param node: VPP node.
- :param mtu: Ethernet MTU size in Bytes. Default: 9200.
- :type node: dict
- :type mtu: int
- """
- for interface in node[u"interfaces"]:
- InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
-
- @staticmethod
- def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
- """Set Ethernet MTU on all interfaces on all DUTs.
-
- :param nodes: VPP nodes.
- :param mtu: Ethernet MTU size in Bytes. Default: 9200.
- :type nodes: dict
- :type mtu: int
- """
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
- InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
+ args = dict(sw_if_index=sw_if_index, mtu=int(mtu))
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vpp_node_interfaces_ready_wait(node, retries=15):
ret_code, stdout, _ = ssh.exec_command(cmd)
if int(ret_code) != 0:
raise RuntimeError(u"Get interface name and MAC failed")
- tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
- interfaces = JsonParser().parse_data(tmp)
+ interfaces = loads("{" + stdout.rstrip().replace("\n", ",") + "}")
for interface in node[u"interfaces"].values():
name = interfaces.get(interface[u"mac_address"])
if name is None:
:raises RuntimeError: if it is unable to create VxLAN interface on the
node.
"""
- cmd = u"vxlan_add_del_tunnel"
+ cmd = u"vxlan_add_del_tunnel_v3"
args = dict(
is_add=True,
instance=Constants.BITWISE_NON_ZERO,
err_msg = f"Failed to set VXLAN bypass on interface " \
f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_replies(err_msg)
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vxlan_dump(node, interface=None):
:raises RuntimeError: if it is unable to create GTPU interface on the
node.
"""
- cmd = u"gtpu_add_del_tunnel"
+ cmd = u"gtpu_add_del_tunnel_v2"
args = dict(
is_add=True,
src_address=IPAddress.create_ip_address_object(
),
mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
encap_vrf_id=0,
- decap_next_index=2,
- teid=teid
+ decap_next_index=2, # ipv4
+ teid=teid,
+ # pdu_extension: Unused, false by default.
+ # qfi: Irrelevant when pdu_extension is not used.
)
err_msg = f"Failed to create GTPU tunnel interface " \
f"on host {node[u'host']}"
return sw_if_index
+ @staticmethod
+ def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index):
+ """Enable GTPU offload RX onto interface.
+
+ :param node: Node to run command on.
+ :param interface: Name of the specific interface.
+ :param gtpu_if_index: Index of GTPU tunnel interface.
+
+ :type node: dict
+ :type interface: str
+ :type gtpu_interface: int
+ """
+ sw_if_index = Topology.get_interface_sw_index(node, interface)
+
+ cmd = u"gtpu_offload_rx"
+ args = dict(
+ hw_if_index=sw_if_index,
+ sw_if_index=gtpu_if_index,
+ enable=True
+ )
+
+ err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
@staticmethod
def vpp_create_loopback(node, mac=None):
"""Create loopback interface on VPP node.
node, u"set logging class af_xdp level debug"
)
- cmd = u"af_xdp_create"
+ cmd = u"af_xdp_create_v3"
pci_addr = Topology.get_interface_pci_addr(node, if_key)
args = dict(
name=InterfaceUtil.pci_to_eth(node, pci_addr),
node, u"set logging class rdma level debug"
)
- cmd = u"rdma_create_v2"
+ cmd = u"rdma_create_v4"
pci_addr = Topology.get_interface_pci_addr(node, if_key)
args = dict(
name=InterfaceUtil.pci_to_eth(node, pci_addr),
# Note: Set True for non-jumbo packets.
no_multi_seg=False,
max_pktlen=0,
+ # TODO: Apply desired RSS flags.
+ # rss4 kept 0 (auto) as API default.
+ # rss6 kept 0 (auto) as API default.
)
err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
vf_keys = InterfaceUtil.init_generic_interface(
node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
)
+ elif driver == u"rdma-core":
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
return vf_keys
@staticmethod
# PCI device must be re-bound to kernel driver before creating VFs.
DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
# Stop VPP to prevent deadlock.
- # Unbind from current driver.
- DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+ # Unbind from current driver if bound.
+ if current_driver:
+ DUTSetup.pci_driver_unbind(node, pf_pci_addr)
# Bind to kernel driver.
DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
# Initialize PCI VFs.
- DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
+ DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs=numvfs)
if not numvfs:
if osi_layer == u"L2":
node, pf_dev, state=u"up"
)
- DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
- DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
+ vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
+ current_driver = DUTSetup.get_pci_dev_driver(
+ node, vf_pci_addr.replace(":", r"\:")
+ )
+ if current_driver:
+ DUTSetup.pci_vf_driver_unbind(
+ node, pf_pci_addr, vf_id
+ )
+ DUTSetup.pci_vf_driver_bind(
+ node, pf_pci_addr, vf_id, uio_driver
+ )
# Add newly created ports into topology file
vf_ifc_name = f"{ifc_key}_vif"
- vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
Topology.update_interface_name(
node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
thread_data = VPPUtil.vpp_show_threads(node)
worker_cnt = len(thread_data) - 1
if not worker_cnt:
- return None
+ return
worker_ids = list()
if workers:
for item in thread_data:
@staticmethod
def vpp_round_robin_rx_placement_on_all_duts(
- nodes, prefix, workers=None):
+ nodes, prefix, use_dp_cores=False):
"""Set Round Robin interface RX placement on worker threads
on all DUTs.
:param nodes: Topology nodes.
:param prefix: Interface name prefix.
- :param workers: Comma separated worker index numbers intended for
- dataplane work.
+ :param use_dp_cores: Limit to dataplane cores.
:type nodes: dict
:type prefix: str
- :type workers: str
+ :type use_dp_cores: bool
"""
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
+ workers = None
+ if use_dp_cores:
+ workers = BuiltIn().get_variable_value(
+ f"${{{node_name}_cpu_dp}}"
+ )
InterfaceUtil.vpp_round_robin_rx_placement(
node, prefix, workers
)