Performance: Add AF_XDP tests 56/32456/23
authorpmikus <pmikus@cisco.com>
Wed, 26 May 2021 16:06:02 +0000 (16:06 +0000)
committerPeter Mikus <pmikus@cisco.com>
Fri, 4 Jun 2021 06:45:23 +0000 (06:45 +0000)
- enabling for fortville, columbiaville
- enabling experimental for mlx

Signed-off-by: pmikus <pmikus@cisco.com>
Change-Id: I1b7ceb54769f4a0089ac7309350499e60c5cca0a

resources/api/vpp/supported_crcs.yaml
resources/libraries/python/Constants.py
resources/libraries/python/CpuUtils.py
resources/libraries/python/InterfaceUtil.py
resources/libraries/python/IrqUtil.py [new file with mode: 0644]
resources/libraries/python/topology.py
resources/libraries/robot/shared/default.robot
resources/libraries/robot/shared/interfaces.robot
resources/libraries/robot/shared/suite_setup.robot

index 3bf9ddd..5aaca0a 100644 (file)
@@ -49,6 +49,8 @@
     # ^^ ip4fwdANDiaclANDacl10AND100_flows
     add_node_next: '0x2457116d'  # dev
     add_node_next_reply: '0x2ed75f32'  # dev
+    af_xdp_create: '0x21226c99'  # perf
+    af_xdp_create_reply: '0x5383d31f'  # perf
     avf_create: '0xdaab8ae2'  # dev
     avf_create_reply: '0x5383d31f'  # dev
     bond_add_member: '0xe7d14948'  # perf
index 4afcc0d..992a314 100644 (file)
@@ -333,12 +333,12 @@ class Constants:
     NIC_NAME_TO_DRIVER = {
         u"Intel-X520-DA2": [u"vfio-pci"],
         u"Intel-X553": [u"vfio-pci"],
-        u"Intel-X710": [u"vfio-pci", u"avf"],
-        u"Intel-XL710": [u"vfio-pci", u"avf"],
-        u"Intel-XXV710": [u"vfio-pci", u"avf"],
-        u"Intel-E810CQ": [u"vfio-pci", u"avf"],
+        u"Intel-X710": [u"vfio-pci", u"avf", u"af_xdp"],
+        u"Intel-XL710": [u"vfio-pci", u"avf", u"af_xdp"],
+        u"Intel-XXV710": [u"vfio-pci", u"avf", u"af_xdp"],
+        u"Intel-E810CQ": [u"vfio-pci", u"avf", u"af_xdp"],
         u"Amazon-Nitro-50G": [u"vfio-pci"],
-        u"Mellanox-CX556A": [u"rdma-core"],
+        u"Mellanox-CX556A": [u"rdma-core", u"af_xdp"],
     }
 
     # Each driver needs different prugin to work.
@@ -346,6 +346,7 @@ class Constants:
         u"vfio-pci": u"dpdk_plugin.so",
         u"avf": u"avf_plugin.so",
         u"rdma-core": u"rdma_plugin.so",
+        u"af_xdp": u"af_xdp_plugin.so",
     }
 
     # Tags to differentiate tests for different NIC driver.
@@ -353,6 +354,7 @@ class Constants:
         u"vfio-pci": u"DRV_VFIO_PCI",
         u"avf": u"DRV_AVF",
         u"rdma-core": u"DRV_RDMA_CORE",
+        u"af_xdp": u"DRV_AF_XDP",
     }
 
     # Suite names have to be different, add prefix.
@@ -360,6 +362,7 @@ class Constants:
         u"vfio-pci": u"",
         u"avf": u"avf-",
         u"rdma-core": u"rdma-",
+        u"af_xdp": u"af-xdp-",
     }
 
     # Number of virtual functions of physical nic.
@@ -367,6 +370,7 @@ class Constants:
         u"vfio-pci": u"nic_vfs}= | 0",
         u"avf": u"nic_vfs}= | 1",
         u"rdma-core": u"nic_vfs}= | 0",
+        u"af_xdp": u"nic_vfs}= | 0",
     }
 
     # Not each driver is supported by each NIC.
index 293d6b6..e23404b 100644 (file)
@@ -333,6 +333,36 @@ class CpuUtils:
         result[0:0] = cpu_list[mt_skip:mt_skip + 1]
         return result
 
+    @staticmethod
+    def get_affinity_af_xdp(
+            node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+        """Get affinity for AF_XDP interface. Result will be used to pin IRQs.
+
+        :param node: Topology node.
+        :param pf_key: Topology interface.
+        :param cpu_skip_cnt: Amount of CPU cores to skip.
+        :param cpu_cnt: CPU threads count.
+        :type node: dict
+        :type pf_key: str
+        :type cpu_skip_cnt: int
+        :type cpu_cnt: int
+        :returns: List of CPUs allocated to AF_XDP interface.
+        :rtype: list
+        """
+        if pf_key:
+            cpu_node = Topology.get_interface_numa_node(node, pf_key)
+        else:
+            cpu_node = 0
+
+        smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+        if smt_used:
+            cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+        return CpuUtils.cpu_slice_of_list_per_node(
+            node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+            smt_used=smt_used
+        )
+
     @staticmethod
     def get_affinity_nf(
             nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
index bf36635..939a34b 100644 (file)
@@ -111,6 +111,13 @@ class RdmaMode(IntEnum):
     RDMA_API_MODE_DV = 2
 
 
+class AfXdpMode(IntEnum):
+    """AF_XDP interface mode."""
+    AF_XDP_API_MODE_AUTO = 0
+    AF_XDP_API_MODE_COPY = 1
+    AF_XDP_API_MODE_ZERO_COPY = 2
+
+
 class InterfaceUtil:
     """General utilities for managing interfaces"""
 
@@ -228,6 +235,26 @@ class InterfaceUtil:
                 f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
             )
 
+    @staticmethod
+    def set_interface_state_pci(
+            node, pf_pcis, namespace=None, state=u"up"):
+        """Set operational state for interface specified by PCI address.
+
+        :param node: Topology node.
+        :param pf_pcis: List of node's interfaces PCI addresses.
+        :param namespace: Exec command in namespace. (Optional, Default: none)
+        :param state: Up/Down. (Optional, default: up)
+        :type nodes: dict
+        :type pf_pcis: list
+        :type namespace: str
+        :type state: str
+        """
+        for pf_pci in pf_pcis:
+            pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+            InterfaceUtil.set_linux_interface_state(
+                node, pf_eth, namespace=namespace, state=state
+            )
+
     @staticmethod
     def set_interface_mtu(node, pf_pcis, mtu=9200):
         """Set Ethernet MTU for specified interfaces.
@@ -246,25 +273,43 @@ class InterfaceUtil:
             exec_cmd_no_error(node, cmd, sudo=True)
 
     @staticmethod
-    def set_interface_flow_control(node, pf_pcis, rx=u"off", tx=u"off"):
+    def set_interface_channels(
+            node, pf_pcis, num_queues=1, channel=u"combined"):
+        """Set interface channels for specified interfaces.
+
+        :param node: Topology node.
+        :param pf_pcis: List of node's interfaces PCI addresses.
+        :param num_queues: Number of channels. (Optional, Default: 1)
+        :param channel: Channel type. (Optional, Default: combined)
+        :type nodes: dict
+        :type pf_pcis: list
+        :type num_queues: int
+        :type channel: str
+        """
+        for pf_pci in pf_pcis:
+            pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+            cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
+            exec_cmd_no_error(node, cmd, sudo=True)
+
+    @staticmethod
+    def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
         """Set Ethernet flow control for specified interfaces.
 
         :param node: Topology node.
         :param pf_pcis: List of node's interfaces PCI addresses.
-        :param rx: RX flow. Default: off.
-        :param tx: TX flow. Default: off.
+        :param rxf: RX flow. (Optional, Default: off).
+        :param txf: TX flow. (Optional, Default: off).
         :type nodes: dict
         :type pf_pcis: list
-        :type rx: str
-        :type tx: str
+        :type rxf: str
+        :type txf: str
         """
         for pf_pci in pf_pcis:
             pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
-            cmd = f"ethtool -A {pf_eth} rx off tx off"
+            cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
             ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
             if int(ret_code) not in (0, 78):
-                raise RuntimeError("Failed to set MTU on {pf_eth}!")
-
+                raise RuntimeError("Failed to set flow control on {pf_eth}!")
 
     @staticmethod
     def set_pci_parameter(node, pf_pcis, key, value):
@@ -309,8 +354,7 @@ class InterfaceUtil:
             with PapiSocketExecutor(node) as papi_exec:
                 papi_exec.add(cmd, **args).get_reply(err_msg)
         except AssertionError as err:
-            # TODO: Make failure tolerance optional.
-            logger.debug(f"Setting MTU failed. Expected?\n{err}")
+            logger.debug(f"Setting MTU failed.\n{err}")
 
     @staticmethod
     def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
@@ -1182,6 +1226,58 @@ class InterfaceUtil:
 
         return Topology.get_interface_by_sw_index(node, sw_if_index)
 
+    @staticmethod
+    def vpp_create_af_xdp_interface(
+            node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
+            mode=u"auto"):
+        """Create AF_XDP interface on VPP node.
+
+        :param node: DUT node from topology.
+        :param if_key: Physical interface key from topology file of interface
+            to be bound to compatible driver.
+        :param num_rx_queues: Number of RX queues. (Optional, Default: none)
+        :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
+        :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
+        :param mode: AF_XDP interface mode. (Optional, Default: auto).
+        :type node: dict
+        :type if_key: str
+        :type num_rx_queues: int
+        :type rxq_size: int
+        :type txq_size: int
+        :type mode: str
+        :returns: Interface key (name) in topology file.
+        :rtype: str
+        :raises RuntimeError: If it is not possible to create AF_XDP interface
+            on the node.
+        """
+        PapiSocketExecutor.run_cli_cmd(
+            node, u"set logging class af_xdp level debug"
+        )
+
+        cmd = u"af_xdp_create"
+        pci_addr = Topology.get_interface_pci_addr(node, if_key)
+        args = dict(
+            name=InterfaceUtil.pci_to_eth(node, pci_addr),
+            host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
+            rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+            rxq_size=rxq_size,
+            txq_size=txq_size,
+            mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
+        )
+        err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
+        with PapiSocketExecutor(node) as papi_exec:
+            sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+        InterfaceUtil.vpp_set_interface_mac(
+            node, sw_if_index, Topology.get_interface_mac(node, if_key)
+        )
+        InterfaceUtil.add_eth_interface(
+            node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
+            host_if_key=if_key
+        )
+
+        return Topology.get_interface_by_sw_index(node, sw_if_index)
+
     @staticmethod
     def vpp_create_rdma_interface(
             node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
@@ -1219,7 +1315,7 @@ class InterfaceUtil:
             rxq_size=rxq_size,
             txq_size=txq_size,
             mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
-            # TODO: Set True for non-jumbo packets.
+            # Note: Set True for non-jumbo packets.
             no_multi_seg=False,
             max_pktlen=0,
         )
@@ -1577,9 +1673,52 @@ class InterfaceUtil:
         exec_cmd_no_error(node, cmd, sudo=True)
 
     @staticmethod
-    def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"):
-        """Init PCI device by creating VIFs and bind them to vfio-pci for AVF
-        driver testing on DUT.
+    def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
+        """Init PCI device. Check driver compatibility and bind to proper
+        drivers. Optionally create NIC VFs.
+
+        :param node: DUT node.
+        :param ifc_key: Interface key from topology file.
+        :param driver: Base driver to use.
+        :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
+        :param osi_layer: OSI Layer type to initialize TG with.
+            Default value "L2" sets linux interface spoof off.
+        :type node: dict
+        :type ifc_key: str
+        :type driver: str
+        :type numvfs: int
+        :type osi_layer: str
+        :returns: Virtual Function topology interface keys.
+        :rtype: list
+        :raises RuntimeError: If a reason preventing initialization is found.
+        """
+        kernel_driver = Topology.get_interface_driver(node, ifc_key)
+        vf_keys = []
+        if driver == u"avf":
+            if kernel_driver not in (
+                    u"ice", u"iavf", u"i40e", u"i40evf"):
+                raise RuntimeError(
+                    f"AVF needs ice or i40e compatible driver, not "
+                    f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
+                )
+            vf_keys = InterfaceUtil.init_generic_interface(
+                node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+            )
+        elif driver == u"af_xdp":
+            if kernel_driver not in (
+                    u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"):
+                raise RuntimeError(
+                    f"AF_XDP needs ice or i40e or rdma compatible driver, not "
+                    f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
+                )
+            vf_keys = InterfaceUtil.init_generic_interface(
+                node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+            )
+        return vf_keys
+
+    @staticmethod
+    def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
+        """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
 
         :param node: DUT node.
         :param ifc_key: Interface key from topology file.
@@ -1599,11 +1738,6 @@ class InterfaceUtil:
         pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
         uio_driver = Topology.get_uio_driver(node)
         kernel_driver = Topology.get_interface_driver(node, ifc_key)
-        if kernel_driver not in (u"ice", u"iavf", u"i40e", u"i40evf"):
-            raise RuntimeError(
-                f"AVF needs ice or i40e compatible driver, not {kernel_driver}"
-                f"at node {node[u'host']} ifc {ifc_key}"
-            )
         current_driver = DUTSetup.get_pci_dev_driver(
             node, pf_pci_addr.replace(u":", r"\:"))
 
@@ -1726,8 +1860,7 @@ class InterfaceUtil:
 
     @staticmethod
     def vpp_round_robin_rx_placement(
-        node, prefix, dp_worker_limit=None
-    ):
+            node, prefix, dp_worker_limit=None):
         """Set Round Robin interface RX placement on all worker threads
         on node.
 
@@ -1762,8 +1895,7 @@ class InterfaceUtil:
 
     @staticmethod
     def vpp_round_robin_rx_placement_on_all_duts(
-        nodes, prefix, dp_core_limit=None
-    ):
+            nodes, prefix, dp_core_limit=None):
         """Set Round Robin interface RX placement on all worker threads
         on all DUTs.
 
diff --git a/resources/libraries/python/IrqUtil.py b/resources/libraries/python/IrqUtil.py
new file mode 100644 (file)
index 0000000..1ef228e
--- /dev/null
@@ -0,0 +1,99 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""IRQ handling library."""
+
+from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.InterfaceUtil import InterfaceUtil
+from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.topology import Topology
+
+
+class IrqUtil:
+    """Contains methods for managing IRQs."""
+
+    @staticmethod
+    def get_pci_interface_irqs(node, pci_addr):
+        """Get IRQs for interface in linux specified by PCI address.
+
+        :param node: Topology node.
+        :param pci_addr: Linux interface PCI address.
+        :type node: dict
+        :type pci_addr: str
+        :returns: List of IRQs attached to specified interface.
+        :rtype: list
+        """
+        interface = InterfaceUtil.pci_to_eth(node, pci_addr)
+        return IrqUtil.get_interface_irqs(node, interface)
+
+    @staticmethod
+    def get_interface_irqs(node, interface):
+        """Get IRQs for interface in linux.
+
+        :param node: Topology node.
+        :param interface: Linux interface name.
+        :type node: dict
+        :type interface: str
+        :returns: List of IRQs attached to specified interface.
+        :rtype: list
+        """
+        irqs = []
+
+        command = f"grep '{interface}-.*TxRx' /proc/interrupts | cut -f1 -d:"
+        message = f"Failed to get IRQs for {interface} on {node['host']}!"
+        stdout, _ = exec_cmd_no_error(
+            node, command, timeout=30, sudo=True, message=message
+        )
+
+        for line in stdout.splitlines():
+            irqs.append(int(line.strip()))
+
+        return irqs
+
+    @staticmethod
+    def set_interface_irqs_affinity(node, interface, cpu_skip_cnt=0, cpu_cnt=1):
+        """Set IRQs affinity for interface in linux.
+
+        :param node: Topology node.
+        :param interface: Topology interface.
+        :param cpu_skip_cnt: Amount of CPU cores to skip.
+        :param cpu_cnt: CPU threads count. (Optional, Default: 0)
+        :param cpu_list: List of CPUs. (Optional, Default: 1)
+        :type node: dict
+        :type interface: str
+        :type cpu_skip_cnt: int
+        :type cpu_cnt: int
+        """
+        cpu_list = CpuUtils.get_affinity_af_xdp(
+            node, interface, cpu_skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt
+        )
+        interface = Topology.get_interface_name(node, interface)
+        irq_list = IrqUtil.get_interface_irqs(node, interface)
+
+        for irq, cpu in zip(irq_list, cpu_list):
+            if cpu < 32:
+                mask = 1 << cpu
+                mask = f"{mask:x}"
+            else:
+                groups = int(cpu/32)
+                mask_fill = u""
+                for _ in range(groups):
+                    mask_fill = f"{mask_fill},00000000"
+                mask = 1 << (cpu - (32 * groups))
+                mask = f"{mask:x}{mask_fill}"
+
+            command = f"sh -c 'echo {mask} > /proc/irq/{irq}/smp_affinity'"
+            message = f"Failed to set IRQ affinity for {irq} on {node['host']}!"
+            exec_cmd_no_error(
+                node, command, timeout=30, sudo=True, message=message
+            )
index c39e5af..e829700 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -174,7 +174,7 @@ class Topology:
         port_types = (
             u"subinterface", u"vlan_subif", u"memif", u"tap", u"vhost",
             u"loopback", u"gre_tunnel", u"vxlan_tunnel", u"eth_bond",
-            u"eth_avf", u"eth_rdma", u"geneve_tunnel"
+            u"eth_avf", u"eth_rdma", u"geneve_tunnel", u"eth_af_xdp"
         )
 
         for node_data in nodes.values():
index 113305b..58c2189 100644 (file)
@@ -29,6 +29,7 @@
 | Library | resources.libraries.python.InterfaceUtil
 | Library | resources.libraries.python.IPUtil
 | Library | resources.libraries.python.IPv6Util
+| Library | resources.libraries.python.IrqUtil
 | Library | resources.libraries.python.NodePath
 | Library | resources.libraries.python.Namespaces
 | Library | resources.libraries.python.PapiHistory
index 30c0940..5bb7c85 100644 (file)
 | |
 | | No operation
 
+| Pre-initialize layer af_xdp on all DUTs
+| | [Documentation]
+| | ... | Pre-initialize af_xdp driver.
+| |
+| | FOR | ${dut} | IN | @{duts}
+| | | Set Interface State PCI
+| | | ... | ${nodes['${dut}']} | ${${dut}_pf_pci} | state=up
+| | | Set Interface Channels
+| | | ... | ${nodes['${dut}']} | ${${dut}_pf_pci} | num_queues=${rxq_count_int}
+| | | ... | channel=combined
+| | END
+
 | Pre-initialize layer rdma-core on all DUTs
 | | [Documentation]
 | | ... | Pre-initialize rdma-core driver.
 | |
 | | FOR | ${dut} | IN | @{duts}
 | | | Set Interface Flow Control
-| | | ... | ${nodes['${dut}']} | ${${dut}_pf_pci} | rx="off" | tx="off"
+| | | ... | ${nodes['${dut}']} | ${${dut}_pf_pci} | rxf="off" | txf="off"
 | | | Set PCI Parameter
 | | | ... | ${nodes['${dut}']} | ${${dut}_pf_pci} | key="68.w" | value="3BCD"
 | | END
 | | | ... | ELSE
 | | | ... | Set Interface MTU | ${nodes['${dut}']} | ${${dut}_pf_pci} | mtu=1500
 | | | Set Interface Flow Control
-| | | ... | ${nodes['${dut}']} | ${${dut}_pf_pci} | rx="off" | tx="off"
+| | | ... | ${nodes['${dut}']} | ${${dut}_pf_pci} | rxf="off" | txf="off"
 | | | Set PCI Parameter
 | | | ... | ${nodes['${dut}']} | ${${dut}_pf_pci} | key="68.w" | value="3BCD"
 | | END
 | | | Set List Value | ${${dut}_vf${pf}_vlan} | ${vf} | ${_vlan}
 | | END
 
+| Initialize layer af_xdp on node
+| | [Documentation]
+| | ... | Initialize AF_XDP (eBPF) interfaces on DUT on NIC PF.
+| |
+| | ... | *Arguments:*
+| | ... | - dut - DUT node. Type: string
+| | ... | - pf - NIC physical function (physical port). Type: integer
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Initialize layer af_xdp on node \| DUT1 \| 1 \|
+| |
+| | [Arguments] | ${dut} | ${pf}
+| |
+| | ${_af_xdp}= | VPP Create AF XDP Interface
+| | ... | ${nodes['${dut}']} | ${${dut}_vf${pf}}[0]
+| | ... | num_rx_queues=${65535}
+| | ... | rxq_size=${nic_rxq_size} | txq_size=${nic_txq_size}
+| | ${cpu_skip_cnt}= | Evaluate | ${CPU_CNT_SYSTEM}+${CPU_CNT_MAIN}
+| | ${cpu_skip_cnt}= | Evaluate | ${cpu_skip_cnt}+${cpu_count_int}
+| | ${cpu_skip_cnt}= | Evaluate | ${cpu_skip_cnt}+(${pf}-${1})*${rxq_count_int}
+| | Set Interface IRQs Affinity
+| | ... | ${nodes['${dut}']} | ${_af_xdp}
+| | ... | cpu_skip_cnt=${cpu_skip_cnt} | cpu_cnt=${rxq_count_int}
+| | Set List Value | ${${dut}_vf${pf}} | 0 | ${_af_xdp}
+
 | Initialize layer rdma-core on node
 | | [Documentation]
 | | ... | Initialize rdma-core (Mellanox VPP) interfaces on DUT on NIC PF.
index 09cec67..6e11367 100644 (file)
 | |
 | | ... | *Example:*
 | |
-| | ... | \| Additional Suite Setup Action For performance_dut \| DUT1 \|
+| | ... | \| Additional Suite Setup Action For performance vf \| DUT1 \|
 | |
 | | [Arguments] | ${dut}
 | |
 | | FOR | ${pf} | IN RANGE | 1 | ${nic_pfs} + 1
 | | | ${_vf}=
-| | | ... | Run Keyword | Init ${nic_driver} interface
-| | | ... | ${nodes['${dut}']} | ${${dut}_pf${pf}}[0] | numvfs=${nic_vfs}
-| | | ... | osi_layer=${osi_layer}
+| | | ... | Run Keyword | Init interface
+| | | ... | ${nodes['${dut}']} | ${${dut}_pf${pf}}[0] | driver=${nic_driver}
+| | | ... | numvfs=${nic_vfs} | osi_layer=${osi_layer}
 | | | ${_mac}=
 | | | ... | Create List | ${EMPTY}
 | | | ${_ip4_addr}=
 | | Set Suite Variable
 | | ... | ${int} | prevf
 
+| Additional Suite Setup Action For performance pf
+| | [Documentation]
+| | ... | Additional Setup for suites which uses performance measurement for
+| | ... | single DUT (inner loop).
+| |
+| | ... | *Arguments:*
+| | ... | - dut - DUT node. Type: string
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Additional Suite Setup Action For performance pf \| DUT1 \|
+| |
+| | [Arguments] | ${dut}
+| |
+| | FOR | ${pf} | IN RANGE | 1 | ${nic_pfs} + 1
+| | | Run Keyword | Init interface
+| | | ... | ${nodes['${dut}']} | ${${dut}_pf${pf}}[0] | driver=${nic_driver}
+| | | ... | numvfs=${0} | osi_layer=${osi_layer}
+| | END
+
 | Additional Suite Setup Action For performance
 | | [Documentation]
 | | ... | Additional Setup for suites which uses performance measurement.
 | | FOR | ${dut} | IN | @{duts}
 | | | Run Keyword If | ${nic_vfs} > 0
 | | | ... | Additional Suite Setup Action For performance vf | ${dut}
+| | | ... | ELSE
+| | | ... | Additional Suite Setup Action For performance pf | ${dut}
 | | END
 | | Initialize traffic generator
 | | ... | ${tg} | ${TG_pf1}[0] | ${TG_pf2}[0]