FIX: init_interface driver and sr-iov
[csit.git] / resources / libraries / python / InterfaceUtil.py
index 939a34b..4cd7cf1 100644 (file)
@@ -20,7 +20,6 @@ from ipaddress import ip_address
 from robot.api import logger
 
 from resources.libraries.python.Constants import Constants
-from resources.libraries.python.CpuUtils import CpuUtils
 from resources.libraries.python.DUTSetup import DUTSetup
 from resources.libraries.python.IPAddress import IPAddress
 from resources.libraries.python.L2Util import L2Util
@@ -1061,6 +1060,49 @@ class InterfaceUtil:
 
         return ifc_name, sw_if_index
 
+    @staticmethod
+    def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
+        """Create GTPU interface and return sw if index of created interface.
+
+        :param node: Node where to create GTPU interface.
+        :param teid: GTPU Tunnel Endpoint Identifier.
+        :param source_ip: Source IP of a GTPU Tunnel End Point.
+        :param destination_ip: Destination IP of a GTPU Tunnel End Point.
+        :type node: dict
+        :type teid: int
+        :type source_ip: str
+        :type destination_ip: str
+        :returns: SW IF INDEX of created interface.
+        :rtype: int
+        :raises RuntimeError: if it is unable to create GTPU interface on the
+            node.
+        """
+        cmd = u"gtpu_add_del_tunnel"
+        args = dict(
+            is_add=True,
+            src_address=IPAddress.create_ip_address_object(
+                ip_address(source_ip)
+            ),
+            dst_address=IPAddress.create_ip_address_object(
+                ip_address(destination_ip)
+            ),
+            mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+            encap_vrf_id=0,
+            decap_next_index=2,
+            teid=teid
+        )
+        err_msg = f"Failed to create GTPU tunnel interface " \
+            f"on host {node[u'host']}"
+        with PapiSocketExecutor(node) as papi_exec:
+            sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+        if_key = Topology.add_new_port(node, u"gtpu_tunnel")
+        Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+        ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+        Topology.update_interface_name(node, if_key, ifc_name)
+
+        return sw_if_index
+
     @staticmethod
     def vpp_create_loopback(node, mac=None):
         """Create loopback interface on VPP node.
@@ -1216,8 +1258,19 @@ class InterfaceUtil:
             txq_size=txq_size
         )
         err_msg = f"Failed to create AVF interface on host {node[u'host']}"
-        with PapiSocketExecutor(node) as papi_exec:
-            sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+        # FIXME: Remove once the fw/driver is upgraded.
+        for _ in range(10):
+            with PapiSocketExecutor(node) as papi_exec:
+                try:
+                    sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
+                        err_msg
+                    )
+                    break
+                except AssertionError:
+                    logger.error(err_msg)
+        else:
+            raise AssertionError(err_msg)
 
         InterfaceUtil.add_eth_interface(
             node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
@@ -1612,6 +1665,29 @@ class InterfaceUtil:
         cmd = f"{ns_str} ip link set {interface} {mac_str}"
         exec_cmd_no_error(node, cmd, sudo=True)
 
+    @staticmethod
+    def set_linux_interface_promisc(
+            node, interface, namespace=None, vf_id=None, state=u"on"):
+        """Set promisc state for interface in linux.
+
+        :param node: Node where to execute command.
+        :param interface: Interface in namespace.
+        :param namespace: Exec command in namespace. (Optional, Default: None)
+        :param vf_id: Virtual Function id. (Optional, Default: None)
+        :param state: State of feature. (Optional, Default: on)
+        :type node: dict
+        :type interface: str
+        :type namespace: str
+        :type vf_id: int
+        :type state: str
+        """
+        promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
+            else f"promisc {state}"
+        ns_str = f"ip netns exec {namespace}" if namespace else u""
+
+        cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
+        exec_cmd_no_error(node, cmd, sudo=True)
+
     @staticmethod
     def set_linux_interface_trust_on(
             node, interface, namespace=None, vf_id=None):
@@ -1706,14 +1782,19 @@ class InterfaceUtil:
             )
         elif driver == u"af_xdp":
             if kernel_driver not in (
-                    u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"):
+                    u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
+                    u"ixgbe"):
                 raise RuntimeError(
-                    f"AF_XDP needs ice or i40e or rdma compatible driver, not "
+                    f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
                     f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
                 )
             vf_keys = InterfaceUtil.init_generic_interface(
                 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
             )
+        elif driver == u"rdma-core":
+            vf_keys = InterfaceUtil.init_generic_interface(
+                node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+            )
         return vf_keys
 
     @staticmethod
@@ -1740,20 +1821,26 @@ class InterfaceUtil:
         kernel_driver = Topology.get_interface_driver(node, ifc_key)
         current_driver = DUTSetup.get_pci_dev_driver(
             node, pf_pci_addr.replace(u":", r"\:"))
+        pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
 
         VPPUtil.stop_vpp_service(node)
         if current_driver != kernel_driver:
             # PCI device must be re-bound to kernel driver before creating VFs.
             DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
             # Stop VPP to prevent deadlock.
-            # Unbind from current driver.
-            DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+            # Unbind from current driver if bound.
+            if current_driver:
+                DUTSetup.pci_driver_unbind(node, pf_pci_addr)
             # Bind to kernel driver.
             DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
 
         # Initialize PCI VFs.
         DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
 
+        if not numvfs:
+            if osi_layer == u"L2":
+                InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
+
         vf_ifc_keys = []
         # Set MAC address and bind each virtual function to uio driver.
         for vf_id in range(numvfs):
@@ -1763,7 +1850,6 @@ class InterfaceUtil:
                  ]
             )
 
-            pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
             InterfaceUtil.set_linux_interface_trust_on(
                 node, pf_dev, vf_id=vf_id
             )
@@ -1860,64 +1946,69 @@ class InterfaceUtil:
 
     @staticmethod
     def vpp_round_robin_rx_placement(
-            node, prefix, dp_worker_limit=None):
+            node, prefix, workers=None):
         """Set Round Robin interface RX placement on all worker threads
         on node.
 
-        If specified, dp_core_limit limits the number of physical cores used
+        If specified, workers limits the number of physical cores used
         for data plane I/O work. Other cores are presumed to do something else,
         e.g. asynchronous crypto processing.
         None means all workers are used for data plane work.
-        Note this keyword specifies workers, not cores.
 
         :param node: Topology nodes.
         :param prefix: Interface name prefix.
-        :param dp_worker_limit: How many cores for data plane work.
+        :param workers: Comma separated worker index numbers intended for
+            dataplane work.
         :type node: dict
         :type prefix: str
-        :type dp_worker_limit: Optional[int]
+        :type workers: str
         """
-        worker_id = 0
-        worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
-        if dp_worker_limit is not None:
-            worker_cnt = min(worker_cnt, dp_worker_limit)
+        thread_data = VPPUtil.vpp_show_threads(node)
+        worker_cnt = len(thread_data) - 1
         if not worker_cnt:
-            return
+            return None
+        worker_ids = list()
+        if workers:
+            for item in thread_data:
+                if str(item.cpu_id) in workers.split(u","):
+                    worker_ids.append(item.id)
+        else:
+            for item in thread_data:
+                if u"vpp_main" not in item.name:
+                    worker_ids.append(item.id)
+
+        worker_idx = 0
         for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
             for interface in node[u"interfaces"].values():
                 if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
                     and prefix in interface[u"name"]:
                     InterfaceUtil.vpp_sw_interface_set_rx_placement(
                         node, placement[u"sw_if_index"], placement[u"queue_id"],
-                        worker_id % worker_cnt
+                        worker_ids[worker_idx % len(worker_ids)] - 1
                     )
-                    worker_id += 1
+                    worker_idx += 1
 
     @staticmethod
     def vpp_round_robin_rx_placement_on_all_duts(
-            nodes, prefix, dp_core_limit=None):
-        """Set Round Robin interface RX placement on all worker threads
+            nodes, prefix, workers=None):
+        """Set Round Robin interface RX placement on worker threads
         on all DUTs.
 
-        If specified, dp_core_limit limits the number of physical cores used
+        If specified, workers limits the number of physical cores used
         for data plane I/O work. Other cores are presumed to do something else,
         e.g. asynchronous crypto processing.
         None means all cores are used for data plane work.
-        Note this keyword specifies cores, not workers.
 
         :param nodes: Topology nodes.
         :param prefix: Interface name prefix.
-        :param dp_worker_limit: How many cores for data plane work.
+        :param workers: Comma separated worker index numbers intended for
+            dataplane work.
         :type nodes: dict
         :type prefix: str
-        :type dp_worker_limit: Optional[int]
+        :type workers: str
         """
         for node in nodes.values():
             if node[u"type"] == NodeType.DUT:
-                dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt(
-                    phy_cores=dp_core_limit,
-                    smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]),
-                )
                 InterfaceUtil.vpp_round_robin_rx_placement(
-                    node, prefix, dp_worker_limit
+                    node, prefix, workers
                 )