-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from resources.libraries.python.Constants import Constants
from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.DUTSetup import DUTSetup
+from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.L2Util import L2Util
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.parsers.JsonParser import JsonParser
-from resources.libraries.python.ssh import SSH, exec_cmd_no_error
+from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
from resources.libraries.python.VPPUtil import VPPUtil
BOND_API_MODE_LACP = 5
+class RdmaMode(IntEnum):
+ """RDMA interface mode."""
+ RDMA_API_MODE_AUTO = 0
+ RDMA_API_MODE_IBV = 1
+ RDMA_API_MODE_DV = 2
+
+
class InterfaceUtil:
"""General utilities for managing interfaces"""
- __UDEV_IF_RULES_FILE = u"/etc/udev/rules.d/10-network.rules"
-
@staticmethod
def pci_to_int(pci_str):
"""Convert PCI address from string format (0000:18:0a.0) to
)
@staticmethod
- def set_interface_ethernet_mtu(node, iface_key, mtu):
- """Set Ethernet MTU for specified interface.
-
- Function can be used only for TGs.
+ def set_interface_mtu(node, pf_pcis, mtu=9200):
+ """Set Ethernet MTU for specified interfaces.
- :param node: Node where the interface is.
- :param iface_key: Interface key from topology file.
- :param mtu: MTU to set.
- :type node: dict
- :type iface_key: str
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param mtu: MTU to set. Default: 9200.
+ :type nodes: dict
+ :type pf_pcis: list
:type mtu: int
- :returns: Nothing.
- :raises ValueError: If the node type is "DUT".
- :raises ValueError: If the node has an unknown node type.
+ :raises RuntimeError: If failed to set MTU on interface.
"""
- if node[u"type"] == NodeType.DUT:
- msg = f"Node {node[u'host']}: Setting Ethernet MTU for interface " \
- f"on DUT nodes not supported"
- elif node[u"type"] != NodeType.TG:
- msg = f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
- else:
- iface_name = Topology.get_interface_name(node, iface_key)
- cmd = f"ip link set {iface_name} mtu {mtu}"
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ cmd = f"ip link set {pf_eth} mtu {mtu}"
exec_cmd_no_error(node, cmd, sudo=True)
- return
- raise ValueError(msg)
@staticmethod
- def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
- """Set default Ethernet MTU on all interfaces on node.
+ def set_interface_flow_control(node, pf_pcis, rx=u"off", tx=u"off"):
+ """Set Ethernet flow control for specified interfaces.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param rx: RX flow. Default: off.
+ :param tx: TX flow. Default: off.
+ :type nodes: dict
+ :type pf_pcis: list
+ :type rx: str
+ :type tx: str
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ cmd = f"ethtool -A {pf_eth} rx off tx off"
+ ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
+ if int(ret_code) not in (0, 78):
+ raise RuntimeError("Failed to set MTU on {pf_eth}!")
- Function can be used only for TGs.
- :param node: Node where to set default MTU.
- :type node: dict
- :returns: Nothing.
+ @staticmethod
+ def set_pci_parameter(node, pf_pcis, key, value):
+ """Set PCI parameter for specified interfaces.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param key: Key to set.
+ :param value: Value to set.
+ :type nodes: dict
+ :type pf_pcis: list
+ :type key: str
+ :type value: str
"""
- for ifc in node[u"interfaces"]:
- InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
+ for pf_pci in pf_pcis:
+ cmd = f"setpci -s {pf_pci} {key}={value}"
+ exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
def vpp_set_interface_mtu(node, interface, mtu=9200):
return if_data.get(u"l2_address")
+ @staticmethod
+ def vpp_set_interface_mac(node, interface, mac):
+ """Set MAC address for the given interface.
+
+ :param node: VPP node to set interface MAC.
+ :param interface: Numeric index or name string of a specific interface.
+ :param mac: Required MAC address.
+ :type node: dict
+ :type interface: int or str
+ :type mac: str
+ """
+ cmd = u"sw_interface_set_mac_address"
+ args = dict(
+ sw_if_index=InterfaceUtil.get_interface_index(node, interface),
+ mac_address=L2Util.mac_to_bin(mac)
+ )
+ err_msg = f"Failed to set MAC address of interface {interface}" \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
@staticmethod
def tg_set_interface_driver(node, pci_addr, driver):
"""Set interface driver on the TG node.
"""
return DUTSetup.get_pci_dev_driver(node, pci_addr)
- @staticmethod
- def tg_set_interfaces_udev_rules(node):
- """Set udev rules for interfaces.
-
- Create udev rules file in /etc/udev/rules.d where are rules for each
- interface used by TG node, based on MAC interface has specific name.
- So after unbind and bind again to kernel driver interface has same
- name as before. This must be called after TG has set name for each
- port in topology dictionary.
- udev rule example
- SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
- NAME="eth1"
-
- :param node: Node to set udev rules on (must be TG node).
- :type node: dict
- :raises RuntimeError: If setting of udev rules fails.
- """
- ssh = SSH()
- ssh.connect(node)
-
- cmd = f"rm -f {InterfaceUtil.__UDEV_IF_RULES_FILE}"
- ret_code, _, _ = ssh.exec_command_sudo(cmd)
- if int(ret_code) != 0:
- raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
-
- for interface in node[u"interfaces"].values():
- rule = u'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
- u'==\\"' + interface[u"mac_address"] + u'\\", NAME=\\"' + \
- interface[u"name"] + u'\\"'
- cmd = f"sh -c \"echo '{rule}'\" >> " \
- f"{InterfaceUtil.__UDEV_IF_RULES_FILE}'"
-
- ret_code, _, _ = ssh.exec_command_sudo(cmd)
- if int(ret_code) != 0:
- raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
-
- cmd = u"/etc/init.d/udev restart"
- ssh.exec_command_sudo(cmd)
-
@staticmethod
def tg_set_interfaces_default_driver(node):
"""Set interfaces default driver specified in topology yaml file.
InterfaceUtil.update_nic_interface_names(node)
@staticmethod
- def update_tg_interface_data_on_node(node, skip_tg_udev=False):
+ def update_tg_interface_data_on_node(node):
"""Update interface name for TG/linux node in DICT__nodes.
.. note::
"00:00:00:00:00:00": "lo"
:param node: Node selected from DICT__nodes.
- :param skip_tg_udev: Skip udev rename on TG node.
:type node: dict
- :type skip_tg_udev: bool
:raises RuntimeError: If getting of interface name and MAC fails.
"""
# First setup interface driver specified in yaml file
continue
interface[u"name"] = name
- # Set udev rules for interfaces
- if not skip_tg_udev:
- InterfaceUtil.tg_set_interfaces_udev_rules(node)
-
@staticmethod
def iface_update_numa_node(node):
"""For all interfaces from topology file update numa node based on
:raises ValueError: If numa node ia less than 0.
:raises RuntimeError: If update of numa node failed.
"""
- def check_cpu_node_count(node_n, val):
- val = int(val)
- if val < 0:
- if CpuUtils.cpu_node_count(node_n) == 1:
- val = 0
- else:
- raise ValueError
- return val
ssh = SSH()
for if_key in Topology.get_node_interfaces(node):
if_pci = Topology.get_interface_pci_addr(node, if_key)
ret, out, _ = ssh.exec_command(cmd)
if ret == 0:
try:
- numa_node = check_cpu_node_count(node, out)
+ numa_node = 0 if int(out) < 0 else int(out)
except ValueError:
logger.trace(
f"Reading numa location failed for: {if_pci}"
else:
raise RuntimeError(f"Update numa node failed for: {if_pci}")
- @staticmethod
- def update_all_numa_nodes(nodes, skip_tg=False):
- """For all nodes and all their interfaces from topology file update numa
- node information based on information from the node.
-
- :param nodes: Nodes in the topology.
- :param skip_tg: Skip TG node
- :type nodes: dict
- :type skip_tg: bool
- :returns: Nothing.
- """
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
- InterfaceUtil.iface_update_numa_node(node)
- elif node[u"type"] == NodeType.TG and not skip_tg:
- InterfaceUtil.iface_update_numa_node(node)
-
@staticmethod
def update_all_interface_data_on_all_nodes(
- nodes, skip_tg=False, skip_tg_udev=False, numa_node=False):
+ nodes, skip_tg=False, skip_vpp=False):
"""Update interface names on all nodes in DICT__nodes.
This method updates the topology dictionary by querying interface lists
:param nodes: Nodes in the topology.
:param skip_tg: Skip TG node.
- :param skip_tg_udev: Skip udev rename on TG node.
- :param numa_node: Retrieve numa_node location.
+ :param skip_vpp: Skip VPP node.
:type nodes: dict
:type skip_tg: bool
- :type skip_tg_udev: bool
- :type numa_node: bool
+ :type skip_vpp: bool
"""
- for node_data in nodes.values():
- if node_data[u"type"] == NodeType.DUT:
- InterfaceUtil.update_vpp_interface_data_on_node(node_data)
- elif node_data[u"type"] == NodeType.TG and not skip_tg:
- InterfaceUtil.update_tg_interface_data_on_node(
- node_data, skip_tg_udev)
-
- if numa_node:
- if node_data[u"type"] == NodeType.DUT:
- InterfaceUtil.iface_update_numa_node(node_data)
- elif node_data[u"type"] == NodeType.TG and not skip_tg:
- InterfaceUtil.iface_update_numa_node(node_data)
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT and not skip_vpp:
+ InterfaceUtil.update_vpp_interface_data_on_node(node)
+ elif node[u"type"] == NodeType.TG and not skip_tg:
+ InterfaceUtil.update_tg_interface_data_on_node(node)
+ InterfaceUtil.iface_update_numa_node(node)
@staticmethod
def create_vlan_subinterface(node, interface, vlan):
:raises RuntimeError: if it is unable to create VxLAN interface on the
node.
"""
- src_address = ip_address(source_ip)
- dst_address = ip_address(destination_ip)
-
cmd = u"vxlan_add_del_tunnel"
args = dict(
- is_add=1,
- is_ipv6=1 if src_address.version == 6 else 0,
+ is_add=True,
instance=Constants.BITWISE_NON_ZERO,
- src_address=src_address.packed,
- dst_address=dst_address.packed,
+ src_address=IPAddress.create_ip_address_object(
+ ip_address(source_ip)
+ ),
+ dst_address=IPAddress.create_ip_address_object(
+ ip_address(destination_ip)
+ ),
mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
encap_vrf_id=0,
decap_next_index=Constants.BITWISE_NON_ZERO,
cmd = u"sw_interface_set_vxlan_bypass"
args = dict(
- is_ipv6=0,
+ is_ipv6=False,
sw_if_index=sw_if_index,
- enable=1
+ enable=True
)
err_msg = f"Failed to set VXLAN bypass on interface " \
f"on host {node[u'host']}"
:returns: Processed vxlan interface dump.
:rtype: dict
"""
- if vxlan_dump[u"is_ipv6"]:
- vxlan_dump[u"src_address"] = \
- ip_address(vxlan_dump[u"src_address"])
- vxlan_dump[u"dst_address"] = \
- ip_address(vxlan_dump[u"dst_address"])
- else:
- vxlan_dump[u"src_address"] = \
- ip_address(vxlan_dump[u"src_address"][0:4])
- vxlan_dump[u"dst_address"] = \
- ip_address(vxlan_dump[u"dst_address"][0:4])
+ vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"])
+ vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"])
return vxlan_dump
if interface is not None:
:raises RuntimeError: If it is not possible to create loopback on the
node.
"""
- cmd = u"create_loopback"
+ cmd = u"create_loopback_instance"
args = dict(
- mac_address=L2Util.mac_to_bin(mac) if mac else 0
+ mac_address=L2Util.mac_to_bin(mac) if mac else 0,
+ is_specified=False,
+ user_instance=0,
)
err_msg = f"Failed to create loopback interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
return sw_if_index
@staticmethod
- def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
+ def vpp_create_bond_interface(
+ node, mode, load_balance=None, mac=None, gso=False):
"""Create bond interface on VPP node.
:param node: DUT node from topology.
:param mode: Link bonding mode.
:param load_balance: Load balance (optional, valid for xor and lacp
- modes, otherwise ignored).
+ modes, otherwise ignored). Default: None.
:param mac: MAC address to assign to the bond interface (optional).
+ Default: None.
+ :param gso: Enable GSO support (optional). Default: False.
:type node: dict
:type mode: str
:type load_balance: str
:type mac: str
+ :type gso: bool
:returns: Interface key (name) in topology.
:rtype: str
:raises RuntimeError: If it is not possible to create bond interface on
the node.
"""
- cmd = u"bond_create"
+ cmd = u"bond_create2"
args = dict(
id=int(Constants.BITWISE_NON_ZERO),
use_custom_mac=bool(mac is not None),
LinkBondLoadBalanceAlgo,
f"BOND_API_LB_ALGO_{load_balance.upper()}"
).value,
- numa_only=False
+ numa_only=False,
+ enable_gso=gso
)
err_msg = f"Failed to create bond interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
:type ifc_name: str
:type sw_if_index: int
:type ifc_pfx: str
- :type ifc_pfx: host_if_key
+ :type host_if_key: str
"""
if_key = Topology.add_new_port(node, ifc_pfx)
node, host_if_key
)
)
+ Topology.update_interface_pci_address(
+ node, if_key, Topology.get_interface_pci_addr(node, host_if_key)
+ )
@staticmethod
- def vpp_create_avf_interface(node, if_key, num_rx_queues=None):
+ def vpp_create_avf_interface(
+ node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0):
"""Create AVF interface on VPP node.
:param node: DUT node from topology.
:param if_key: Interface key from topology file of interface
to be bound to i40evf driver.
:param num_rx_queues: Number of RX queues.
+ :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
+ :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
:type node: dict
:type if_key: str
:type num_rx_queues: int
- :returns: Interface key (name) in topology.
+ :type rxq_size: int
+ :type txq_size: int
+ :returns: AVF interface key (name) in topology.
:rtype: str
:raises RuntimeError: If it is not possible to create AVF interface on
the node.
pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
enable_elog=0,
rxq_num=int(num_rx_queues) if num_rx_queues else 0,
- rxq_size=0,
- txq_size=0
+ rxq_size=rxq_size,
+ txq_size=txq_size
)
err_msg = f"Failed to create AVF interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
host_if_key=if_key
)
- if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
- return if_key
+ return Topology.get_interface_by_sw_index(node, sw_if_index)
@staticmethod
- def vpp_create_rdma_interface(node, if_key, num_rx_queues=None):
+ def vpp_create_rdma_interface(
+ node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
+ mode=u"auto"):
"""Create RDMA interface on VPP node.
:param node: DUT node from topology.
:param if_key: Physical interface key from topology file of interface
to be bound to rdma-core driver.
:param num_rx_queues: Number of RX queues.
+ :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
+ :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
+ :param mode: RDMA interface mode - auto/ibv/dv.
:type node: dict
:type if_key: str
:type num_rx_queues: int
+ :type rxq_size: int
+ :type txq_size: int
+ :type mode: str
:returns: Interface key (name) in topology file.
:rtype: str
:raises RuntimeError: If it is not possible to create RDMA interface on
the node.
"""
- cmd = u"rdma_create"
+ PapiSocketExecutor.run_cli_cmd(
+ node, u"set logging class rdma level debug"
+ )
+
+ cmd = u"rdma_create_v2"
pci_addr = Topology.get_interface_pci_addr(node, if_key)
args = dict(
name=InterfaceUtil.pci_to_eth(node, pci_addr),
host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
rxq_num=int(num_rx_queues) if num_rx_queues else 0,
- rxq_size=0,
- txq_size=0
+ rxq_size=rxq_size,
+ txq_size=txq_size,
+ mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
+ # TODO: Set True for non-jumbo packets.
+ no_multi_seg=False,
+ max_pktlen=0,
)
err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+ InterfaceUtil.vpp_set_interface_mac(
+ node, sw_if_index, Topology.get_interface_mac(node, if_key)
+ )
InterfaceUtil.add_eth_interface(
node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma",
host_if_key=if_key
return Topology.get_interface_by_sw_index(node, sw_if_index)
@staticmethod
- def vpp_enslave_physical_interface(node, interface, bond_if):
- """Enslave physical interface to bond interface on VPP node.
+ def vpp_add_bond_member(node, interface, bond_if):
+ """Add member interface to bond interface on VPP node.
:param node: DUT node from topology.
:param interface: Physical interface key from topology file.
:type node: dict
:type interface: str
:type bond_if: str
- :raises RuntimeError: If it is not possible to enslave physical
- interface to bond interface on the node.
+ :raises RuntimeError: If it is not possible to add member to bond
+ interface on the node.
"""
- cmd = u"bond_enslave"
+ cmd = u"bond_add_member"
args = dict(
sw_if_index=Topology.get_interface_sw_index(node, interface),
bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
is_passive=False,
is_long_timeout=False
)
- err_msg = f"Failed to enslave physical interface {interface} to bond " \
- f"interface {bond_if} on host {node[u'host']}"
+ err_msg = f"Failed to add member {interface} to bond interface " \
+ f"{bond_if} on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type node: dict
:type verbose: bool
"""
- cmd = u"sw_interface_bond_dump"
+ cmd = u"sw_bond_interface_dump"
err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
data = f"Bond data on node {node[u'host']}:\n"
data += u" load balance: {lb}\n".format(
lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
)
- data += f" number of active slaves: {bond[u'active_slaves']}\n"
+ data += f" number of active members: {bond[u'active_members']}\n"
if verbose:
- slave_data = InterfaceUtil.vpp_bond_slave_dump(
+ member_data = InterfaceUtil.vpp_bond_member_dump(
node, Topology.get_interface_by_sw_index(
node, bond[u"sw_if_index"]
)
)
- for slave in slave_data:
- if not slave[u"is_passive"]:
- data += f" {slave[u'interface_name']}\n"
- data += f" number of slaves: {bond[u'slaves']}\n"
+ for member in member_data:
+ if not member[u"is_passive"]:
+ data += f" {member[u'interface_name']}\n"
+ data += f" number of members: {bond[u'members']}\n"
if verbose:
- for slave in slave_data:
- data += f" {slave[u'interface_name']}\n"
+ for member in member_data:
+ data += f" {member[u'interface_name']}\n"
data += f" interface id: {bond[u'id']}\n"
data += f" sw_if_index: {bond[u'sw_if_index']}\n"
logger.info(data)
@staticmethod
- def vpp_bond_slave_dump(node, interface):
+ def vpp_bond_member_dump(node, interface):
"""Get bond interface slave(s) data on VPP node.
:param node: DUT node from topology.
:returns: Bond slave interface data.
:rtype: dict
"""
- cmd = u"sw_interface_slave_dump"
+ cmd = u"sw_member_interface_dump"
args = dict(
sw_if_index=Topology.get_interface_sw_index(node, interface)
)
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
- logger.debug(f"Slave data:\n{details}")
+ logger.debug(f"Member data:\n{details}")
return details
@staticmethod
def get_sw_if_index(node, interface_name):
"""Get sw_if_index for the given interface from actual interface dump.
+ FIXME: Delete and redirect callers to vpp_get_interface_sw_index.
+
:param node: VPP node to get interface data from.
:param interface_name: Name of the specific interface.
:type node: dict
cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
exec_cmd_no_error(node, cmd, sudo=True)
+ @staticmethod
+ def set_linux_interface_state(
+ node, interface, namespace=None, state=u"up"):
+ """Set operational state for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Execute command in namespace. Optional
+ :param state: Up/Down.
+ :type node: dict
+ :type interface: str
+ :type namespace: str
+ :type state: str
+ """
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
+
+ cmd = f"{ns_str} ip link set dev {interface} {state}"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
@staticmethod
def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"):
"""Init PCI device by creating VIFs and bind them to vfio-pci for AVF
pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
uio_driver = Topology.get_uio_driver(node)
kernel_driver = Topology.get_interface_driver(node, ifc_key)
- if kernel_driver not in (u"i40e", u"i40evf"):
+ if kernel_driver not in (u"ice", u"iavf", u"i40e", u"i40evf"):
raise RuntimeError(
- f"AVF needs i40e-compatible driver, not {kernel_driver} "
+ f"AVF needs ice or i40e compatible driver, not {kernel_driver}"
f"at node {node[u'host']} ifc {ifc_key}"
)
current_driver = DUTSetup.get_pci_dev_driver(
InterfaceUtil.set_linux_interface_mac(
node, pf_dev, vf_mac_addr, vf_id=vf_id
)
+ InterfaceUtil.set_linux_interface_state(
+ node, pf_dev, state=u"up"
+ )
DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def vpp_round_robin_rx_placement(node, prefix):
+ def vpp_round_robin_rx_placement(
+ node, prefix, dp_worker_limit=None
+ ):
"""Set Round Robin interface RX placement on all worker threads
on node.
+ If specified, dp_core_limit limits the number of physical cores used
+ for data plane I/O work. Other cores are presumed to do something else,
+ e.g. asynchronous crypto processing.
+ None means all workers are used for data plane work.
+ Note this keyword specifies workers, not cores.
+
:param node: Topology nodes.
:param prefix: Interface name prefix.
+ :param dp_worker_limit: How many cores for data plane work.
:type node: dict
:type prefix: str
+ :type dp_worker_limit: Optional[int]
"""
worker_id = 0
worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
+ if dp_worker_limit is not None:
+ worker_cnt = min(worker_cnt, dp_worker_limit)
if not worker_cnt:
return
for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
worker_id += 1
@staticmethod
- def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
+ def vpp_round_robin_rx_placement_on_all_duts(
+ nodes, prefix, dp_core_limit=None
+ ):
"""Set Round Robin interface RX placement on all worker threads
on all DUTs.
+ If specified, dp_core_limit limits the number of physical cores used
+ for data plane I/O work. Other cores are presumed to do something else,
+ e.g. asynchronous crypto processing.
+ None means all cores are used for data plane work.
+ Note this keyword specifies cores, not workers.
+
:param nodes: Topology nodes.
:param prefix: Interface name prefix.
+ :param dp_worker_limit: How many cores for data plane work.
:type nodes: dict
:type prefix: str
+ :type dp_worker_limit: Optional[int]
"""
for node in nodes.values():
if node[u"type"] == NodeType.DUT:
- InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)
+ dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt(
+ phy_cores=dp_core_limit,
+ smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]),
+ )
+ InterfaceUtil.vpp_round_robin_rx_placement(
+ node, prefix, dp_worker_limit
+ )