-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
RDMA_API_MODE_DV = 2
+class AfXdpMode(IntEnum):
+ """AF_XDP interface mode."""
+ AF_XDP_API_MODE_AUTO = 0
+ AF_XDP_API_MODE_COPY = 1
+ AF_XDP_API_MODE_ZERO_COPY = 2
+
+
class InterfaceUtil:
"""General utilities for managing interfaces"""
raise ValueError(f"Unknown if_type: {if_type}")
if node[u"type"] == NodeType.DUT:
+ if sw_if_index is None:
+ raise ValueError(
+ f"Interface index for {interface} not assigned by VPP."
+ )
if state == u"up":
flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
elif state == u"down":
f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
)
+ @staticmethod
+ def set_interface_state_pci(
+ node, pf_pcis, namespace=None, state=u"up"):
+ """Set operational state for interface specified by PCI address.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param namespace: Exec command in namespace. (Optional, Default: none)
+ :param state: Up/Down. (Optional, default: up)
+ :type nodes: dict
+ :type pf_pcis: list
+ :type namespace: str
+ :type state: str
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ InterfaceUtil.set_linux_interface_state(
+ node, pf_eth, namespace=namespace, state=state
+ )
+
@staticmethod
def set_interface_mtu(node, pf_pcis, mtu=9200):
"""Set Ethernet MTU for specified interfaces.
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def set_interface_flow_control(node, pf_pcis, rx=u"off", tx=u"off"):
+ def set_interface_channels(
+ node, pf_pcis, num_queues=1, channel=u"combined"):
+ """Set interface channels for specified interfaces.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param num_queues: Number of channels. (Optional, Default: 1)
+ :param channel: Channel type. (Optional, Default: combined)
+ :type nodes: dict
+ :type pf_pcis: list
+ :type num_queues: int
+ :type channel: str
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
"""Set Ethernet flow control for specified interfaces.
:param node: Topology node.
:param pf_pcis: List of node's interfaces PCI addresses.
- :param rx: RX flow. Default: off.
- :param tx: TX flow. Default: off.
+ :param rxf: RX flow. (Optional, Default: off).
+ :param txf: TX flow. (Optional, Default: off).
:type nodes: dict
:type pf_pcis: list
- :type rx: str
- :type tx: str
+ :type rxf: str
+ :type txf: str
"""
for pf_pci in pf_pcis:
pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
- cmd = f"ethtool -A {pf_eth} rx off tx off"
+ cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
if int(ret_code) not in (0, 78):
- raise RuntimeError("Failed to set MTU on {pf_eth}!")
-
+ raise RuntimeError("Failed to set flow control on {pf_eth}!")
@staticmethod
def set_pci_parameter(node, pf_pcis, key, value):
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
except AssertionError as err:
- # TODO: Make failure tolerance optional.
- logger.debug(f"Setting MTU failed. Expected?\n{err}")
-
- @staticmethod
- def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
- """Set Ethernet MTU on all interfaces.
-
- :param node: VPP node.
- :param mtu: Ethernet MTU size in Bytes. Default: 9200.
- :type node: dict
- :type mtu: int
- """
- for interface in node[u"interfaces"]:
- InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
-
- @staticmethod
- def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
- """Set Ethernet MTU on all interfaces on all DUTs.
-
- :param nodes: VPP nodes.
- :param mtu: Ethernet MTU size in Bytes. Default: 9200.
- :type nodes: dict
- :type mtu: int
- """
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
- InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
+ logger.debug(f"Setting MTU failed.\n{err}")
@staticmethod
def vpp_node_interfaces_ready_wait(node, retries=15):
:raises RuntimeError: if it is unable to create VxLAN interface on the
node.
"""
- cmd = u"vxlan_add_del_tunnel"
+ cmd = u"vxlan_add_del_tunnel_v3"
args = dict(
is_add=True,
instance=Constants.BITWISE_NON_ZERO,
return ifc_name, sw_if_index
+ @staticmethod
+ def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
+ """Create GTPU interface and return sw if index of created interface.
+
+ :param node: Node where to create GTPU interface.
+ :param teid: GTPU Tunnel Endpoint Identifier.
+ :param source_ip: Source IP of a GTPU Tunnel End Point.
+ :param destination_ip: Destination IP of a GTPU Tunnel End Point.
+ :type node: dict
+ :type teid: int
+ :type source_ip: str
+ :type destination_ip: str
+ :returns: SW IF INDEX of created interface.
+ :rtype: int
+ :raises RuntimeError: if it is unable to create GTPU interface on the
+ node.
+ """
+ cmd = u"gtpu_add_del_tunnel"
+ args = dict(
+ is_add=True,
+ src_address=IPAddress.create_ip_address_object(
+ ip_address(source_ip)
+ ),
+ dst_address=IPAddress.create_ip_address_object(
+ ip_address(destination_ip)
+ ),
+ mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+ encap_vrf_id=0,
+ decap_next_index=2,
+ teid=teid
+ )
+ err_msg = f"Failed to create GTPU tunnel interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"gtpu_tunnel")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return sw_if_index
+
+ @staticmethod
+ def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index):
+ """Enable GTPU offload RX onto interface.
+
+ :param node: Node to run command on.
+ :param interface: Name of the specific interface.
+ :param gtpu_if_index: Index of GTPU tunnel interface.
+
+ :type node: dict
+ :type interface: str
+ :type gtpu_interface: int
+ """
+ sw_if_index = Topology.get_interface_sw_index(node, interface)
+
+ cmd = u"gtpu_offload_rx"
+ args = dict(
+ hw_if_index=sw_if_index,
+ sw_if_index=gtpu_if_index,
+ enable=True
+ )
+
+ err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
@staticmethod
def vpp_create_loopback(node, mac=None):
"""Create loopback interface on VPP node.
:raises RuntimeError: If it is not possible to create loopback on the
node.
"""
- cmd = u"create_loopback"
+ cmd = u"create_loopback_instance"
args = dict(
- mac_address=L2Util.mac_to_bin(mac) if mac else 0
+ mac_address=L2Util.mac_to_bin(mac) if mac else 0,
+ is_specified=False,
+ user_instance=0,
)
err_msg = f"Failed to create loopback interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
return sw_if_index
@staticmethod
- def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
+ def vpp_create_bond_interface(
+ node, mode, load_balance=None, mac=None, gso=False):
"""Create bond interface on VPP node.
:param node: DUT node from topology.
:param mode: Link bonding mode.
:param load_balance: Load balance (optional, valid for xor and lacp
- modes, otherwise ignored).
+ modes, otherwise ignored). Default: None.
:param mac: MAC address to assign to the bond interface (optional).
+ Default: None.
+ :param gso: Enable GSO support (optional). Default: False.
:type node: dict
:type mode: str
:type load_balance: str
:type mac: str
+ :type gso: bool
:returns: Interface key (name) in topology.
:rtype: str
:raises RuntimeError: If it is not possible to create bond interface on
the node.
"""
- cmd = u"bond_create"
+ cmd = u"bond_create2"
args = dict(
id=int(Constants.BITWISE_NON_ZERO),
use_custom_mac=bool(mac is not None),
LinkBondLoadBalanceAlgo,
f"BOND_API_LB_ALGO_{load_balance.upper()}"
).value,
- numa_only=False
+ numa_only=False,
+ enable_gso=gso
)
err_msg = f"Failed to create bond interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
txq_size=txq_size
)
err_msg = f"Failed to create AVF interface on host {node[u'host']}"
+
+ # FIXME: Remove once the fw/driver is upgraded.
+ for _ in range(10):
+ with PapiSocketExecutor(node) as papi_exec:
+ try:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
+ err_msg
+ )
+ break
+ except AssertionError:
+ logger.error(err_msg)
+ else:
+ raise AssertionError(err_msg)
+
+ InterfaceUtil.add_eth_interface(
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
+ host_if_key=if_key
+ )
+
+ return Topology.get_interface_by_sw_index(node, sw_if_index)
+
+ @staticmethod
+ def vpp_create_af_xdp_interface(
+ node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
+ mode=u"auto"):
+ """Create AF_XDP interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param if_key: Physical interface key from topology file of interface
+ to be bound to compatible driver.
+ :param num_rx_queues: Number of RX queues. (Optional, Default: none)
+ :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
+ :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
+ :param mode: AF_XDP interface mode. (Optional, Default: auto).
+ :type node: dict
+ :type if_key: str
+ :type num_rx_queues: int
+ :type rxq_size: int
+ :type txq_size: int
+ :type mode: str
+ :returns: Interface key (name) in topology file.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create AF_XDP interface
+ on the node.
+ """
+ PapiSocketExecutor.run_cli_cmd(
+ node, u"set logging class af_xdp level debug"
+ )
+
+ cmd = u"af_xdp_create_v2"
+ pci_addr = Topology.get_interface_pci_addr(node, if_key)
+ args = dict(
+ name=InterfaceUtil.pci_to_eth(node, pci_addr),
+ host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=rxq_size,
+ txq_size=txq_size,
+ mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
+ )
+ err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+ InterfaceUtil.vpp_set_interface_mac(
+ node, sw_if_index, Topology.get_interface_mac(node, if_key)
+ )
InterfaceUtil.add_eth_interface(
- node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
host_if_key=if_key
)
node, u"set logging class rdma level debug"
)
- cmd = u"rdma_create"
+ cmd = u"rdma_create_v3"
pci_addr = Topology.get_interface_pci_addr(node, if_key)
args = dict(
name=InterfaceUtil.pci_to_eth(node, pci_addr),
rxq_size=rxq_size,
txq_size=txq_size,
mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
+ # Note: Set True for non-jumbo packets.
+ no_multi_seg=False,
+ max_pktlen=0,
+ # TODO: Apply desired RSS flags.
)
err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
return Topology.get_interface_by_sw_index(node, sw_if_index)
@staticmethod
- def vpp_enslave_physical_interface(node, interface, bond_if):
- """Enslave physical interface to bond interface on VPP node.
+ def vpp_add_bond_member(node, interface, bond_if):
+ """Add member interface to bond interface on VPP node.
:param node: DUT node from topology.
:param interface: Physical interface key from topology file.
:type node: dict
:type interface: str
:type bond_if: str
- :raises RuntimeError: If it is not possible to enslave physical
- interface to bond interface on the node.
+ :raises RuntimeError: If it is not possible to add member to bond
+ interface on the node.
"""
- cmd = u"bond_enslave"
+ cmd = u"bond_add_member"
args = dict(
sw_if_index=Topology.get_interface_sw_index(node, interface),
bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
is_passive=False,
is_long_timeout=False
)
- err_msg = f"Failed to enslave physical interface {interface} to bond " \
- f"interface {bond_if} on host {node[u'host']}"
+ err_msg = f"Failed to add member {interface} to bond interface " \
+ f"{bond_if} on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type node: dict
:type verbose: bool
"""
- cmd = u"sw_interface_bond_dump"
+ cmd = u"sw_bond_interface_dump"
err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
data = f"Bond data on node {node[u'host']}:\n"
data += u" load balance: {lb}\n".format(
lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
)
- data += f" number of active slaves: {bond[u'active_slaves']}\n"
+ data += f" number of active members: {bond[u'active_members']}\n"
if verbose:
- slave_data = InterfaceUtil.vpp_bond_slave_dump(
+ member_data = InterfaceUtil.vpp_bond_member_dump(
node, Topology.get_interface_by_sw_index(
node, bond[u"sw_if_index"]
)
)
- for slave in slave_data:
- if not slave[u"is_passive"]:
- data += f" {slave[u'interface_name']}\n"
- data += f" number of slaves: {bond[u'slaves']}\n"
+ for member in member_data:
+ if not member[u"is_passive"]:
+ data += f" {member[u'interface_name']}\n"
+ data += f" number of members: {bond[u'members']}\n"
if verbose:
- for slave in slave_data:
- data += f" {slave[u'interface_name']}\n"
+ for member in member_data:
+ data += f" {member[u'interface_name']}\n"
data += f" interface id: {bond[u'id']}\n"
data += f" sw_if_index: {bond[u'sw_if_index']}\n"
logger.info(data)
@staticmethod
- def vpp_bond_slave_dump(node, interface):
+ def vpp_bond_member_dump(node, interface):
"""Get bond interface slave(s) data on VPP node.
:param node: DUT node from topology.
:returns: Bond slave interface data.
:rtype: dict
"""
- cmd = u"sw_interface_slave_dump"
+ cmd = u"sw_member_interface_dump"
args = dict(
sw_if_index=Topology.get_interface_sw_index(node, interface)
)
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
- logger.debug(f"Slave data:\n{details}")
+ logger.debug(f"Member data:\n{details}")
return details
@staticmethod
cmd = f"{ns_str} ip link set {interface} {mac_str}"
exec_cmd_no_error(node, cmd, sudo=True)
+ @staticmethod
+ def set_linux_interface_promisc(
+ node, interface, namespace=None, vf_id=None, state=u"on"):
+ """Set promisc state for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Exec command in namespace. (Optional, Default: None)
+ :param vf_id: Virtual Function id. (Optional, Default: None)
+ :param state: State of feature. (Optional, Default: on)
+ :type node: dict
+ :type interface: str
+ :type namespace: str
+ :type vf_id: int
+ :type state: str
+ """
+ promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
+ else f"promisc {state}"
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
+
+ cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
@staticmethod
def set_linux_interface_trust_on(
node, interface, namespace=None, vf_id=None):
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"):
- """Init PCI device by creating VIFs and bind them to vfio-pci for AVF
- driver testing on DUT.
+ def set_linux_interface_state(
+ node, interface, namespace=None, state=u"up"):
+ """Set operational state for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Execute command in namespace. Optional
+ :param state: Up/Down.
+ :type node: dict
+ :type interface: str
+ :type namespace: str
+ :type state: str
+ """
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
+
+ cmd = f"{ns_str} ip link set dev {interface} {state}"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
+ """Init PCI device. Check driver compatibility and bind to proper
+ drivers. Optionally create NIC VFs.
+
+ :param node: DUT node.
+ :param ifc_key: Interface key from topology file.
+ :param driver: Base driver to use.
+ :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
+ :param osi_layer: OSI Layer type to initialize TG with.
+ Default value "L2" sets linux interface spoof off.
+ :type node: dict
+ :type ifc_key: str
+ :type driver: str
+ :type numvfs: int
+ :type osi_layer: str
+ :returns: Virtual Function topology interface keys.
+ :rtype: list
+ :raises RuntimeError: If a reason preventing initialization is found.
+ """
+ kernel_driver = Topology.get_interface_driver(node, ifc_key)
+ vf_keys = []
+ if driver == u"avf":
+ if kernel_driver not in (
+ u"ice", u"iavf", u"i40e", u"i40evf"):
+ raise RuntimeError(
+ f"AVF needs ice or i40e compatible driver, not "
+ f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
+ )
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
+ elif driver == u"af_xdp":
+ if kernel_driver not in (
+ u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
+ u"ixgbe"):
+ raise RuntimeError(
+ f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
+ f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
+ )
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
+ elif driver == u"rdma-core":
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
+ return vf_keys
+
+ @staticmethod
+ def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
+ """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
:param node: DUT node.
:param ifc_key: Interface key from topology file.
pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
uio_driver = Topology.get_uio_driver(node)
kernel_driver = Topology.get_interface_driver(node, ifc_key)
- if kernel_driver not in (u"ice", u"iavf", u"i40e", u"i40evf"):
- raise RuntimeError(
- f"AVF needs ice or i40e compatible driver, not {kernel_driver}"
- f"at node {node[u'host']} ifc {ifc_key}"
- )
current_driver = DUTSetup.get_pci_dev_driver(
node, pf_pci_addr.replace(u":", r"\:"))
+ pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
VPPUtil.stop_vpp_service(node)
if current_driver != kernel_driver:
# PCI device must be re-bound to kernel driver before creating VFs.
DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
# Stop VPP to prevent deadlock.
- # Unbind from current driver.
- DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+ # Unbind from current driver if bound.
+ if current_driver:
+ DUTSetup.pci_driver_unbind(node, pf_pci_addr)
# Bind to kernel driver.
DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
# Initialize PCI VFs.
DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
+ if not numvfs:
+ if osi_layer == u"L2":
+ InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
+
vf_ifc_keys = []
# Set MAC address and bind each virtual function to uio driver.
for vf_id in range(numvfs):
]
)
- pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
InterfaceUtil.set_linux_interface_trust_on(
node, pf_dev, vf_id=vf_id
)
InterfaceUtil.set_linux_interface_mac(
node, pf_dev, vf_mac_addr, vf_id=vf_id
)
+ InterfaceUtil.set_linux_interface_state(
+ node, pf_dev, state=u"up"
+ )
DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
details = papi_exec.get_details(err_msg)
return sorted(details, key=lambda k: k[u"sw_if_index"])
+ @staticmethod
+ def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
+ """Dump VPP interface RX placement on all given nodes.
+
+ :param nodes: Nodes to run command on.
+ :type nodes: dict
+ :returns: Thread mapping information as a list of dictionaries.
+ :rtype: list
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
+
@staticmethod
def vpp_sw_interface_set_rx_placement(
node, sw_if_index, queue_id, worker_id):
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def vpp_round_robin_rx_placement(node, prefix):
+ def vpp_round_robin_rx_placement(
+ node, prefix, workers=None):
"""Set Round Robin interface RX placement on all worker threads
on node.
+ If specified, workers limits the number of physical cores used
+ for data plane I/O work. Other cores are presumed to do something else,
+ e.g. asynchronous crypto processing.
+ None means all workers are used for data plane work.
+
:param node: Topology nodes.
:param prefix: Interface name prefix.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
:type node: dict
:type prefix: str
+ :type workers: str
"""
- worker_id = 0
- worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
+ thread_data = VPPUtil.vpp_show_threads(node)
+ worker_cnt = len(thread_data) - 1
if not worker_cnt:
- return
+ return None
+ worker_ids = list()
+ if workers:
+ for item in thread_data:
+ if str(item.cpu_id) in workers.split(u","):
+ worker_ids.append(item.id)
+ else:
+ for item in thread_data:
+ if u"vpp_main" not in item.name:
+ worker_ids.append(item.id)
+
+ worker_idx = 0
for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
for interface in node[u"interfaces"].values():
if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
and prefix in interface[u"name"]:
InterfaceUtil.vpp_sw_interface_set_rx_placement(
node, placement[u"sw_if_index"], placement[u"queue_id"],
- worker_id % worker_cnt
+ worker_ids[worker_idx % len(worker_ids)] - 1
)
- worker_id += 1
+ worker_idx += 1
@staticmethod
- def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
- """Set Round Robin interface RX placement on all worker threads
+ def vpp_round_robin_rx_placement_on_all_duts(
+ nodes, prefix, workers=None):
+ """Set Round Robin interface RX placement on worker threads
on all DUTs.
+ If specified, workers limits the number of physical cores used
+ for data plane I/O work. Other cores are presumed to do something else,
+ e.g. asynchronous crypto processing.
+ None means all cores are used for data plane work.
+
:param nodes: Topology nodes.
:param prefix: Interface name prefix.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
:type nodes: dict
:type prefix: str
+ :type workers: str
"""
for node in nodes.values():
if node[u"type"] == NodeType.DUT:
- InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)
+ InterfaceUtil.vpp_round_robin_rx_placement(
+ node, prefix, workers
+ )