Add 2048B file size cps rps tests in job specs for http-ldpreload-nginx-1_21_5.
[csit.git] / resources / libraries / python / InterfaceUtil.py
index 481c122..ff01330 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 """Interface util library."""
 
 
 """Interface util library."""
 
+from json import loads
 from time import sleep
 from enum import IntEnum
 
 from ipaddress import ip_address
 from robot.api import logger
 from time import sleep
 from enum import IntEnum
 
 from ipaddress import ip_address
 from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
 
 from resources.libraries.python.Constants import Constants
 
 from resources.libraries.python.Constants import Constants
-from resources.libraries.python.CpuUtils import CpuUtils
 from resources.libraries.python.DUTSetup import DUTSetup
 from resources.libraries.python.IPAddress import IPAddress
 from resources.libraries.python.L2Util import L2Util
 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
 from resources.libraries.python.DUTSetup import DUTSetup
 from resources.libraries.python.IPAddress import IPAddress
 from resources.libraries.python.L2Util import L2Util
 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
-from resources.libraries.python.parsers.JsonParser import JsonParser
 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
 from resources.libraries.python.topology import NodeType, Topology
 from resources.libraries.python.VPPUtil import VPPUtil
 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
 from resources.libraries.python.topology import NodeType, Topology
 from resources.libraries.python.VPPUtil import VPPUtil
@@ -213,6 +213,10 @@ class InterfaceUtil:
             raise ValueError(f"Unknown if_type: {if_type}")
 
         if node[u"type"] == NodeType.DUT:
             raise ValueError(f"Unknown if_type: {if_type}")
 
         if node[u"type"] == NodeType.DUT:
+            if sw_if_index is None:
+                raise ValueError(
+                    f"Interface index for {interface} not assigned by VPP."
+                )
             if state == u"up":
                 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
             elif state == u"down":
             if state == u"up":
                 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
             elif state == u"down":
@@ -291,6 +295,21 @@ class InterfaceUtil:
             cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
             exec_cmd_no_error(node, cmd, sudo=True)
 
             cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
             exec_cmd_no_error(node, cmd, sudo=True)
 
+    @staticmethod
+    def set_interface_xdp_off(node, pf_pcis):
+        """Detaches any currently attached XDP/BPF program from the specified
+        interfaces.
+
+        :param node: Topology node.
+        :param pf_pcis: List of node's interfaces PCI addresses.
+        :type nodes: dict
+        :type pf_pcis: list
+        """
+        for pf_pci in pf_pcis:
+            pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+            cmd = f"ip link set dev {pf_eth} xdp off"
+            exec_cmd_no_error(node, cmd, sudo=True)
+
     @staticmethod
     def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
         """Set Ethernet flow control for specified interfaces.
     @staticmethod
     def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
         """Set Ethernet flow control for specified interfaces.
@@ -329,11 +348,13 @@ class InterfaceUtil:
             exec_cmd_no_error(node, cmd, sudo=True)
 
     @staticmethod
             exec_cmd_no_error(node, cmd, sudo=True)
 
     @staticmethod
-    def vpp_set_interface_mtu(node, interface, mtu=9200):
-        """Set Ethernet MTU on interface.
+    def vpp_set_interface_mtu(node, interface, mtu):
+        """Apply new MTU value to a VPP hardware interface.
+
+        The interface should be down when this is called.
 
         :param node: VPP node.
 
         :param node: VPP node.
-        :param interface: Interface to setup MTU. Default: 9200.
+        :param interface: Interface to set MTU on.
         :param mtu: Ethernet MTU size in Bytes.
         :type node: dict
         :type interface: str or int
         :param mtu: Ethernet MTU size in Bytes.
         :type node: dict
         :type interface: str or int
@@ -343,43 +364,11 @@ class InterfaceUtil:
             sw_if_index = Topology.get_interface_sw_index(node, interface)
         else:
             sw_if_index = interface
             sw_if_index = Topology.get_interface_sw_index(node, interface)
         else:
             sw_if_index = interface
-
         cmd = u"hw_interface_set_mtu"
         err_msg = f"Failed to set interface MTU on host {node[u'host']}"
         cmd = u"hw_interface_set_mtu"
         err_msg = f"Failed to set interface MTU on host {node[u'host']}"
-        args = dict(
-            sw_if_index=sw_if_index,
-            mtu=int(mtu)
-        )
-        try:
-            with PapiSocketExecutor(node) as papi_exec:
-                papi_exec.add(cmd, **args).get_reply(err_msg)
-        except AssertionError as err:
-            logger.debug(f"Setting MTU failed.\n{err}")
-
-    @staticmethod
-    def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
-        """Set Ethernet MTU on all interfaces.
-
-        :param node: VPP node.
-        :param mtu: Ethernet MTU size in Bytes. Default: 9200.
-        :type node: dict
-        :type mtu: int
-        """
-        for interface in node[u"interfaces"]:
-            InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
-
-    @staticmethod
-    def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
-        """Set Ethernet MTU on all interfaces on all DUTs.
-
-        :param nodes: VPP nodes.
-        :param mtu: Ethernet MTU size in Bytes. Default: 9200.
-        :type nodes: dict
-        :type mtu: int
-        """
-        for node in nodes.values():
-            if node[u"type"] == NodeType.DUT:
-                InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
+        args = dict(sw_if_index=sw_if_index, mtu=int(mtu))
+        with PapiSocketExecutor(node) as papi_exec:
+            papi_exec.add(cmd, **args).get_reply(err_msg)
 
     @staticmethod
     def vpp_node_interfaces_ready_wait(node, retries=15):
 
     @staticmethod
     def vpp_node_interfaces_ready_wait(node, retries=15):
@@ -734,9 +723,8 @@ class InterfaceUtil:
         ret_code, stdout, _ = ssh.exec_command(cmd)
         if int(ret_code) != 0:
             raise RuntimeError(u"Get interface name and MAC failed")
         ret_code, stdout, _ = ssh.exec_command(cmd)
         if int(ret_code) != 0:
             raise RuntimeError(u"Get interface name and MAC failed")
-        tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
 
 
-        interfaces = JsonParser().parse_data(tmp)
+        interfaces = loads("{" + stdout.rstrip().replace("\n", ",") + "}")
         for interface in node[u"interfaces"].values():
             name = interfaces.get(interface[u"mac_address"])
             if name is None:
         for interface in node[u"interfaces"].values():
             name = interfaces.get(interface[u"mac_address"])
             if name is None:
@@ -850,7 +838,7 @@ class InterfaceUtil:
         :raises RuntimeError: if it is unable to create VxLAN interface on the
             node.
         """
         :raises RuntimeError: if it is unable to create VxLAN interface on the
             node.
         """
-        cmd = u"vxlan_add_del_tunnel"
+        cmd = u"vxlan_add_del_tunnel_v3"
         args = dict(
             is_add=True,
             instance=Constants.BITWISE_NON_ZERO,
         args = dict(
             is_add=True,
             instance=Constants.BITWISE_NON_ZERO,
@@ -904,7 +892,7 @@ class InterfaceUtil:
         err_msg = f"Failed to set VXLAN bypass on interface " \
             f"on host {node[u'host']}"
         with PapiSocketExecutor(node) as papi_exec:
         err_msg = f"Failed to set VXLAN bypass on interface " \
             f"on host {node[u'host']}"
         with PapiSocketExecutor(node) as papi_exec:
-            papi_exec.add(cmd, **args).get_replies(err_msg)
+            papi_exec.add(cmd, **args).get_reply(err_msg)
 
     @staticmethod
     def vxlan_dump(node, interface=None):
 
     @staticmethod
     def vxlan_dump(node, interface=None):
@@ -1061,6 +1049,76 @@ class InterfaceUtil:
 
         return ifc_name, sw_if_index
 
 
         return ifc_name, sw_if_index
 
+    @staticmethod
+    def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
+        """Create GTPU interface and return sw if index of created interface.
+
+        :param node: Node where to create GTPU interface.
+        :param teid: GTPU Tunnel Endpoint Identifier.
+        :param source_ip: Source IP of a GTPU Tunnel End Point.
+        :param destination_ip: Destination IP of a GTPU Tunnel End Point.
+        :type node: dict
+        :type teid: int
+        :type source_ip: str
+        :type destination_ip: str
+        :returns: SW IF INDEX of created interface.
+        :rtype: int
+        :raises RuntimeError: if it is unable to create GTPU interface on the
+            node.
+        """
+        cmd = u"gtpu_add_del_tunnel_v2"
+        args = dict(
+            is_add=True,
+            src_address=IPAddress.create_ip_address_object(
+                ip_address(source_ip)
+            ),
+            dst_address=IPAddress.create_ip_address_object(
+                ip_address(destination_ip)
+            ),
+            mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+            encap_vrf_id=0,
+            decap_next_index=2,  # ipv4
+            teid=teid,
+            # pdu_extension: Unused, false by default.
+            # qfi: Irrelevant when pdu_extension is not used.
+        )
+        err_msg = f"Failed to create GTPU tunnel interface " \
+            f"on host {node[u'host']}"
+        with PapiSocketExecutor(node) as papi_exec:
+            sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+        if_key = Topology.add_new_port(node, u"gtpu_tunnel")
+        Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+        ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+        Topology.update_interface_name(node, if_key, ifc_name)
+
+        return sw_if_index
+
+    @staticmethod
+    def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index):
+        """Enable GTPU offload RX onto interface.
+
+        :param node: Node to run command on.
+        :param interface: Name of the specific interface.
+        :param gtpu_if_index: Index of GTPU tunnel interface.
+
+        :type node: dict
+        :type interface: str
+        :type gtpu_interface: int
+        """
+        sw_if_index = Topology.get_interface_sw_index(node, interface)
+
+        cmd = u"gtpu_offload_rx"
+        args = dict(
+            hw_if_index=sw_if_index,
+            sw_if_index=gtpu_if_index,
+            enable=True
+        )
+
+        err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}"
+        with PapiSocketExecutor(node) as papi_exec:
+            papi_exec.add(cmd, **args).get_reply(err_msg)
+
     @staticmethod
     def vpp_create_loopback(node, mac=None):
         """Create loopback interface on VPP node.
     @staticmethod
     def vpp_create_loopback(node, mac=None):
         """Create loopback interface on VPP node.
@@ -1216,8 +1274,19 @@ class InterfaceUtil:
             txq_size=txq_size
         )
         err_msg = f"Failed to create AVF interface on host {node[u'host']}"
             txq_size=txq_size
         )
         err_msg = f"Failed to create AVF interface on host {node[u'host']}"
-        with PapiSocketExecutor(node) as papi_exec:
-            sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+        # FIXME: Remove once the fw/driver is upgraded.
+        for _ in range(10):
+            with PapiSocketExecutor(node) as papi_exec:
+                try:
+                    sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
+                        err_msg
+                    )
+                    break
+                except AssertionError:
+                    logger.error(err_msg)
+        else:
+            raise AssertionError(err_msg)
 
         InterfaceUtil.add_eth_interface(
             node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
 
         InterfaceUtil.add_eth_interface(
             node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
@@ -1254,7 +1323,7 @@ class InterfaceUtil:
             node, u"set logging class af_xdp level debug"
         )
 
             node, u"set logging class af_xdp level debug"
         )
 
-        cmd = u"af_xdp_create"
+        cmd = u"af_xdp_create_v3"
         pci_addr = Topology.get_interface_pci_addr(node, if_key)
         args = dict(
             name=InterfaceUtil.pci_to_eth(node, pci_addr),
         pci_addr = Topology.get_interface_pci_addr(node, if_key)
         args = dict(
             name=InterfaceUtil.pci_to_eth(node, pci_addr),
@@ -1306,7 +1375,7 @@ class InterfaceUtil:
             node, u"set logging class rdma level debug"
         )
 
             node, u"set logging class rdma level debug"
         )
 
-        cmd = u"rdma_create_v2"
+        cmd = u"rdma_create_v4"
         pci_addr = Topology.get_interface_pci_addr(node, if_key)
         args = dict(
             name=InterfaceUtil.pci_to_eth(node, pci_addr),
         pci_addr = Topology.get_interface_pci_addr(node, if_key)
         args = dict(
             name=InterfaceUtil.pci_to_eth(node, pci_addr),
@@ -1318,6 +1387,9 @@ class InterfaceUtil:
             # Note: Set True for non-jumbo packets.
             no_multi_seg=False,
             max_pktlen=0,
             # Note: Set True for non-jumbo packets.
             no_multi_seg=False,
             max_pktlen=0,
+            # TODO: Apply desired RSS flags.
+            # rss4 kept 0 (auto) as API default.
+            # rss6 kept 0 (auto) as API default.
         )
         err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
         with PapiSocketExecutor(node) as papi_exec:
         )
         err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
         with PapiSocketExecutor(node) as papi_exec:
@@ -1729,14 +1801,19 @@ class InterfaceUtil:
             )
         elif driver == u"af_xdp":
             if kernel_driver not in (
             )
         elif driver == u"af_xdp":
             if kernel_driver not in (
-                    u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"):
+                    u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
+                    u"ixgbe"):
                 raise RuntimeError(
                 raise RuntimeError(
-                    f"AF_XDP needs ice or i40e or rdma compatible driver, not "
+                    f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
                     f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
                 )
             vf_keys = InterfaceUtil.init_generic_interface(
                 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
             )
                     f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
                 )
             vf_keys = InterfaceUtil.init_generic_interface(
                 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
             )
+        elif driver == u"rdma-core":
+            vf_keys = InterfaceUtil.init_generic_interface(
+                node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+            )
         return vf_keys
 
     @staticmethod
         return vf_keys
 
     @staticmethod
@@ -1770,13 +1847,14 @@ class InterfaceUtil:
             # PCI device must be re-bound to kernel driver before creating VFs.
             DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
             # Stop VPP to prevent deadlock.
             # PCI device must be re-bound to kernel driver before creating VFs.
             DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
             # Stop VPP to prevent deadlock.
-            # Unbind from current driver.
-            DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+            # Unbind from current driver if bound.
+            if current_driver:
+                DUTSetup.pci_driver_unbind(node, pf_pci_addr)
             # Bind to kernel driver.
             DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
 
         # Initialize PCI VFs.
             # Bind to kernel driver.
             DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
 
         # Initialize PCI VFs.
-        DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
+        DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs=numvfs)
 
         if not numvfs:
             if osi_layer == u"L2":
 
         if not numvfs:
             if osi_layer == u"L2":
@@ -1805,12 +1883,20 @@ class InterfaceUtil:
                 node, pf_dev, state=u"up"
             )
 
                 node, pf_dev, state=u"up"
             )
 
-            DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
-            DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
+            vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
+            current_driver = DUTSetup.get_pci_dev_driver(
+                node, vf_pci_addr.replace(":", r"\:")
+            )
+            if current_driver:
+                DUTSetup.pci_vf_driver_unbind(
+                    node, pf_pci_addr, vf_id
+                )
+            DUTSetup.pci_vf_driver_bind(
+                node, pf_pci_addr, vf_id, uio_driver
+            )
 
             # Add newly created ports into topology file
             vf_ifc_name = f"{ifc_key}_vif"
 
             # Add newly created ports into topology file
             vf_ifc_name = f"{ifc_key}_vif"
-            vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
             vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
             Topology.update_interface_name(
                 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
             vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
             Topology.update_interface_name(
                 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
@@ -1887,64 +1973,73 @@ class InterfaceUtil:
 
     @staticmethod
     def vpp_round_robin_rx_placement(
 
     @staticmethod
     def vpp_round_robin_rx_placement(
-            node, prefix, dp_worker_limit=None):
+            node, prefix, workers=None):
         """Set Round Robin interface RX placement on all worker threads
         on node.
 
         """Set Round Robin interface RX placement on all worker threads
         on node.
 
-        If specified, dp_core_limit limits the number of physical cores used
+        If specified, workers limits the number of physical cores used
         for data plane I/O work. Other cores are presumed to do something else,
         e.g. asynchronous crypto processing.
         None means all workers are used for data plane work.
         for data plane I/O work. Other cores are presumed to do something else,
         e.g. asynchronous crypto processing.
         None means all workers are used for data plane work.
-        Note this keyword specifies workers, not cores.
 
         :param node: Topology nodes.
         :param prefix: Interface name prefix.
 
         :param node: Topology nodes.
         :param prefix: Interface name prefix.
-        :param dp_worker_limit: How many cores for data plane work.
+        :param workers: Comma separated worker index numbers intended for
+            dataplane work.
         :type node: dict
         :type prefix: str
         :type node: dict
         :type prefix: str
-        :type dp_worker_limit: Optional[int]
+        :type workers: str
         """
         """
-        worker_id = 0
-        worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
-        if dp_worker_limit is not None:
-            worker_cnt = min(worker_cnt, dp_worker_limit)
+        thread_data = VPPUtil.vpp_show_threads(node)
+        worker_cnt = len(thread_data) - 1
         if not worker_cnt:
             return
         if not worker_cnt:
             return
+        worker_ids = list()
+        if workers:
+            for item in thread_data:
+                if str(item.cpu_id) in workers.split(u","):
+                    worker_ids.append(item.id)
+        else:
+            for item in thread_data:
+                if u"vpp_main" not in item.name:
+                    worker_ids.append(item.id)
+
+        worker_idx = 0
         for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
             for interface in node[u"interfaces"].values():
                 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
                     and prefix in interface[u"name"]:
                     InterfaceUtil.vpp_sw_interface_set_rx_placement(
                         node, placement[u"sw_if_index"], placement[u"queue_id"],
         for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
             for interface in node[u"interfaces"].values():
                 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
                     and prefix in interface[u"name"]:
                     InterfaceUtil.vpp_sw_interface_set_rx_placement(
                         node, placement[u"sw_if_index"], placement[u"queue_id"],
-                        worker_id % worker_cnt
+                        worker_ids[worker_idx % len(worker_ids)] - 1
                     )
                     )
-                    worker_id += 1
+                    worker_idx += 1
 
     @staticmethod
     def vpp_round_robin_rx_placement_on_all_duts(
 
     @staticmethod
     def vpp_round_robin_rx_placement_on_all_duts(
-            nodes, prefix, dp_core_limit=None):
-        """Set Round Robin interface RX placement on all worker threads
+            nodes, prefix, use_dp_cores=False):
+        """Set Round Robin interface RX placement on worker threads
         on all DUTs.
 
         on all DUTs.
 
-        If specified, dp_core_limit limits the number of physical cores used
+        If specified, workers limits the number of physical cores used
         for data plane I/O work. Other cores are presumed to do something else,
         e.g. asynchronous crypto processing.
         None means all cores are used for data plane work.
         for data plane I/O work. Other cores are presumed to do something else,
         e.g. asynchronous crypto processing.
         None means all cores are used for data plane work.
-        Note this keyword specifies cores, not workers.
 
         :param nodes: Topology nodes.
         :param prefix: Interface name prefix.
 
         :param nodes: Topology nodes.
         :param prefix: Interface name prefix.
-        :param dp_worker_limit: How many cores for data plane work.
+        :param use_dp_cores: Limit to dataplane cores.
         :type nodes: dict
         :type prefix: str
         :type nodes: dict
         :type prefix: str
-        :type dp_worker_limit: Optional[int]
+        :type use_dp_cores: bool
         """
         """
-        for node in nodes.values():
-            if node[u"type"] == NodeType.DUT:
-                dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt(
-                    phy_cores=dp_core_limit,
-                    smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]),
-                )
+        for node_name, node in nodes.items():
+            if node["type"] == NodeType.DUT:
+                workers = None
+                if use_dp_cores:
+                    workers = BuiltIn().get_variable_value(
+                        f"${{{node_name}_cpu_dp}}"
+                    )
                 InterfaceUtil.vpp_round_robin_rx_placement(
                 InterfaceUtil.vpp_round_robin_rx_placement(
-                    node, prefix, dp_worker_limit
+                    node, prefix, workers
                 )
                 )