from robot.api import logger
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.L2Util import L2Util
raise ValueError(f"Unknown if_type: {if_type}")
if node[u"type"] == NodeType.DUT:
+ if sw_if_index is None:
+ raise ValueError(
+ f"Interface index for {interface} not assigned by VPP."
+ )
if state == u"up":
flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
elif state == u"down":
:raises RuntimeError: if it is unable to create VxLAN interface on the
node.
"""
- cmd = u"vxlan_add_del_tunnel"
+ cmd = u"vxlan_add_del_tunnel_v3"
args = dict(
is_add=True,
instance=Constants.BITWISE_NON_ZERO,
return ifc_name, sw_if_index
+ @staticmethod
+ def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
+ """Create GTPU interface and return sw if index of created interface.
+
+ :param node: Node where to create GTPU interface.
+ :param teid: GTPU Tunnel Endpoint Identifier.
+ :param source_ip: Source IP of a GTPU Tunnel End Point.
+ :param destination_ip: Destination IP of a GTPU Tunnel End Point.
+ :type node: dict
+ :type teid: int
+ :type source_ip: str
+ :type destination_ip: str
+ :returns: SW IF INDEX of created interface.
+ :rtype: int
+ :raises RuntimeError: if it is unable to create GTPU interface on the
+ node.
+ """
+ cmd = u"gtpu_add_del_tunnel"
+ args = dict(
+ is_add=True,
+ src_address=IPAddress.create_ip_address_object(
+ ip_address(source_ip)
+ ),
+ dst_address=IPAddress.create_ip_address_object(
+ ip_address(destination_ip)
+ ),
+ mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+ encap_vrf_id=0,
+ decap_next_index=2,
+ teid=teid
+ )
+ err_msg = f"Failed to create GTPU tunnel interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"gtpu_tunnel")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return sw_if_index
+
@staticmethod
def vpp_create_loopback(node, mac=None):
"""Create loopback interface on VPP node.
txq_size=txq_size
)
err_msg = f"Failed to create AVF interface on host {node[u'host']}"
- with PapiSocketExecutor(node) as papi_exec:
- sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ # FIXME: Remove once the fw/driver is upgraded.
+ for _ in range(10):
+ with PapiSocketExecutor(node) as papi_exec:
+ try:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
+ err_msg
+ )
+ break
+ except AssertionError:
+ logger.error(err_msg)
+ else:
+ raise AssertionError(err_msg)
InterfaceUtil.add_eth_interface(
node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
node, u"set logging class rdma level debug"
)
- cmd = u"rdma_create_v2"
+ cmd = u"rdma_create_v3"
pci_addr = Topology.get_interface_pci_addr(node, if_key)
args = dict(
name=InterfaceUtil.pci_to_eth(node, pci_addr),
# Note: Set True for non-jumbo packets.
no_multi_seg=False,
max_pktlen=0,
+ # TODO: Apply desired RSS flags.
)
err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
)
elif driver == u"af_xdp":
if kernel_driver not in (
- u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"):
+ u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
+ u"ixgbe"):
raise RuntimeError(
- f"AF_XDP needs ice or i40e or rdma compatible driver, not "
+ f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
)
vf_keys = InterfaceUtil.init_generic_interface(
node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
)
+ elif driver == u"rdma-core":
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
return vf_keys
@staticmethod
# PCI device must be re-bound to kernel driver before creating VFs.
DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
# Stop VPP to prevent deadlock.
- # Unbind from current driver.
- DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+ # Unbind from current driver if bound.
+ if current_driver:
+ DUTSetup.pci_driver_unbind(node, pf_pci_addr)
# Bind to kernel driver.
DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
@staticmethod
def vpp_round_robin_rx_placement(
- node, prefix, dp_worker_limit=None):
+ node, prefix, workers=None):
"""Set Round Robin interface RX placement on all worker threads
on node.
- If specified, dp_core_limit limits the number of physical cores used
+ If specified, workers limits the number of physical cores used
for data plane I/O work. Other cores are presumed to do something else,
e.g. asynchronous crypto processing.
None means all workers are used for data plane work.
- Note this keyword specifies workers, not cores.
:param node: Topology nodes.
:param prefix: Interface name prefix.
- :param dp_worker_limit: How many cores for data plane work.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
:type node: dict
:type prefix: str
- :type dp_worker_limit: Optional[int]
+ :type workers: str
"""
- worker_id = 0
- worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
- if dp_worker_limit is not None:
- worker_cnt = min(worker_cnt, dp_worker_limit)
+ thread_data = VPPUtil.vpp_show_threads(node)
+ worker_cnt = len(thread_data) - 1
if not worker_cnt:
- return
+ return None
+ worker_ids = list()
+ if workers:
+ for item in thread_data:
+ if str(item.cpu_id) in workers.split(u","):
+ worker_ids.append(item.id)
+ else:
+ for item in thread_data:
+ if u"vpp_main" not in item.name:
+ worker_ids.append(item.id)
+
+ worker_idx = 0
for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
for interface in node[u"interfaces"].values():
if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
and prefix in interface[u"name"]:
InterfaceUtil.vpp_sw_interface_set_rx_placement(
node, placement[u"sw_if_index"], placement[u"queue_id"],
- worker_id % worker_cnt
+ worker_ids[worker_idx % len(worker_ids)] - 1
)
- worker_id += 1
+ worker_idx += 1
@staticmethod
def vpp_round_robin_rx_placement_on_all_duts(
- nodes, prefix, dp_core_limit=None):
- """Set Round Robin interface RX placement on all worker threads
+ nodes, prefix, workers=None):
+ """Set Round Robin interface RX placement on worker threads
on all DUTs.
- If specified, dp_core_limit limits the number of physical cores used
+ If specified, workers limits the number of physical cores used
for data plane I/O work. Other cores are presumed to do something else,
e.g. asynchronous crypto processing.
None means all cores are used for data plane work.
- Note this keyword specifies cores, not workers.
:param nodes: Topology nodes.
:param prefix: Interface name prefix.
- :param dp_worker_limit: How many cores for data plane work.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
:type nodes: dict
:type prefix: str
- :type dp_worker_limit: Optional[int]
+ :type workers: str
"""
for node in nodes.values():
if node[u"type"] == NodeType.DUT:
- dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt(
- phy_cores=dp_core_limit,
- smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]),
- )
InterfaceUtil.vpp_round_robin_rx_placement(
- node, prefix, dp_worker_limit
+ node, prefix, workers
)