fix(docs): Minor tweaks
[csit.git] / resources / libraries / python / InterfaceUtil.py
index e474e29..42474b4 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -212,6 +212,10 @@ class InterfaceUtil:
             raise ValueError(f"Unknown if_type: {if_type}")
 
         if node[u"type"] == NodeType.DUT:
+            if sw_if_index is None:
+                raise ValueError(
+                    f"Interface index for {interface} not assigned by VPP."
+                )
             if state == u"up":
                 flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
             elif state == u"down":
@@ -290,6 +294,21 @@ class InterfaceUtil:
             cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
             exec_cmd_no_error(node, cmd, sudo=True)
 
+    @staticmethod
+    def set_interface_xdp_off(node, pf_pcis):
+        """Detaches any currently attached XDP/BPF program from the specified
+        interfaces.
+
+        :param node: Topology node.
+        :param pf_pcis: List of node's interfaces PCI addresses.
+        :type nodes: dict
+        :type pf_pcis: list
+        """
+        for pf_pci in pf_pcis:
+            pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+            cmd = f"ip link set dev {pf_eth} xdp off"
+            exec_cmd_no_error(node, cmd, sudo=True)
+
     @staticmethod
     def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
         """Set Ethernet flow control for specified interfaces.
@@ -328,11 +347,13 @@ class InterfaceUtil:
             exec_cmd_no_error(node, cmd, sudo=True)
 
     @staticmethod
-    def vpp_set_interface_mtu(node, interface, mtu=9200):
-        """Set Ethernet MTU on interface.
+    def vpp_set_interface_mtu(node, interface, mtu):
+        """Apply new MTU value to a VPP hardware interface.
+
+        The interface should be down when this is called.
 
         :param node: VPP node.
-        :param interface: Interface to setup MTU. Default: 9200.
+        :param interface: Interface to set MTU on.
         :param mtu: Ethernet MTU size in Bytes.
         :type node: dict
         :type interface: str or int
@@ -342,43 +363,11 @@ class InterfaceUtil:
             sw_if_index = Topology.get_interface_sw_index(node, interface)
         else:
             sw_if_index = interface
-
         cmd = u"hw_interface_set_mtu"
         err_msg = f"Failed to set interface MTU on host {node[u'host']}"
-        args = dict(
-            sw_if_index=sw_if_index,
-            mtu=int(mtu)
-        )
-        try:
-            with PapiSocketExecutor(node) as papi_exec:
-                papi_exec.add(cmd, **args).get_reply(err_msg)
-        except AssertionError as err:
-            logger.debug(f"Setting MTU failed.\n{err}")
-
-    @staticmethod
-    def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
-        """Set Ethernet MTU on all interfaces.
-
-        :param node: VPP node.
-        :param mtu: Ethernet MTU size in Bytes. Default: 9200.
-        :type node: dict
-        :type mtu: int
-        """
-        for interface in node[u"interfaces"]:
-            InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
-
-    @staticmethod
-    def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
-        """Set Ethernet MTU on all interfaces on all DUTs.
-
-        :param nodes: VPP nodes.
-        :param mtu: Ethernet MTU size in Bytes. Default: 9200.
-        :type nodes: dict
-        :type mtu: int
-        """
-        for node in nodes.values():
-            if node[u"type"] == NodeType.DUT:
-                InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
+        args = dict(sw_if_index=sw_if_index, mtu=int(mtu))
+        with PapiSocketExecutor(node) as papi_exec:
+            papi_exec.add(cmd, **args).get_reply(err_msg)
 
     @staticmethod
     def vpp_node_interfaces_ready_wait(node, retries=15):
@@ -849,7 +838,7 @@ class InterfaceUtil:
         :raises RuntimeError: if it is unable to create VxLAN interface on the
             node.
         """
-        cmd = u"vxlan_add_del_tunnel"
+        cmd = u"vxlan_add_del_tunnel_v3"
         args = dict(
             is_add=True,
             instance=Constants.BITWISE_NON_ZERO,
@@ -903,7 +892,7 @@ class InterfaceUtil:
         err_msg = f"Failed to set VXLAN bypass on interface " \
             f"on host {node[u'host']}"
         with PapiSocketExecutor(node) as papi_exec:
-            papi_exec.add(cmd, **args).get_replies(err_msg)
+            papi_exec.add(cmd, **args).get_reply(err_msg)
 
     @staticmethod
     def vxlan_dump(node, interface=None):
@@ -1103,6 +1092,31 @@ class InterfaceUtil:
 
         return sw_if_index
 
+    @staticmethod
+    def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index):
+        """Enable GTPU offload RX onto interface.
+
+        :param node: Node to run command on.
+        :param interface: Name of the specific interface.
+        :param gtpu_if_index: Index of GTPU tunnel interface.
+
+        :type node: dict
+        :type interface: str
+        :type gtpu_interface: int
+        """
+        sw_if_index = Topology.get_interface_sw_index(node, interface)
+
+        cmd = u"gtpu_offload_rx"
+        args = dict(
+            hw_if_index=sw_if_index,
+            sw_if_index=gtpu_if_index,
+            enable=True
+        )
+
+        err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}"
+        with PapiSocketExecutor(node) as papi_exec:
+            papi_exec.add(cmd, **args).get_reply(err_msg)
+
     @staticmethod
     def vpp_create_loopback(node, mac=None):
         """Create loopback interface on VPP node.
@@ -1258,8 +1272,19 @@ class InterfaceUtil:
             txq_size=txq_size
         )
         err_msg = f"Failed to create AVF interface on host {node[u'host']}"
-        with PapiSocketExecutor(node) as papi_exec:
-            sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+        # FIXME: Remove once the fw/driver is upgraded.
+        for _ in range(10):
+            with PapiSocketExecutor(node) as papi_exec:
+                try:
+                    sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
+                        err_msg
+                    )
+                    break
+                except AssertionError:
+                    logger.error(err_msg)
+        else:
+            raise AssertionError(err_msg)
 
         InterfaceUtil.add_eth_interface(
             node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
@@ -1296,7 +1321,7 @@ class InterfaceUtil:
             node, u"set logging class af_xdp level debug"
         )
 
-        cmd = u"af_xdp_create"
+        cmd = u"af_xdp_create_v2"
         pci_addr = Topology.get_interface_pci_addr(node, if_key)
         args = dict(
             name=InterfaceUtil.pci_to_eth(node, pci_addr),
@@ -1348,7 +1373,7 @@ class InterfaceUtil:
             node, u"set logging class rdma level debug"
         )
 
-        cmd = u"rdma_create_v2"
+        cmd = u"rdma_create_v3"
         pci_addr = Topology.get_interface_pci_addr(node, if_key)
         args = dict(
             name=InterfaceUtil.pci_to_eth(node, pci_addr),
@@ -1360,6 +1385,7 @@ class InterfaceUtil:
             # Note: Set True for non-jumbo packets.
             no_multi_seg=False,
             max_pktlen=0,
+            # TODO: Apply desired RSS flags.
         )
         err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
         with PapiSocketExecutor(node) as papi_exec:
@@ -1780,6 +1806,10 @@ class InterfaceUtil:
             vf_keys = InterfaceUtil.init_generic_interface(
                 node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
             )
+        elif driver == u"rdma-core":
+            vf_keys = InterfaceUtil.init_generic_interface(
+                node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+            )
         return vf_keys
 
     @staticmethod
@@ -1813,8 +1843,9 @@ class InterfaceUtil:
             # PCI device must be re-bound to kernel driver before creating VFs.
             DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
             # Stop VPP to prevent deadlock.
-            # Unbind from current driver.
-            DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+            # Unbind from current driver if bound.
+            if current_driver:
+                DUTSetup.pci_driver_unbind(node, pf_pci_addr)
             # Bind to kernel driver.
             DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
 
@@ -1848,12 +1879,20 @@ class InterfaceUtil:
                 node, pf_dev, state=u"up"
             )
 
-            DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
-            DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
+            vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
+            current_driver = DUTSetup.get_pci_dev_driver(
+                node, vf_pci_addr.replace(":", r"\:")
+            )
+            if current_driver:
+                DUTSetup.pci_vf_driver_unbind(
+                    node, pf_pci_addr, vf_id
+                )
+            DUTSetup.pci_vf_driver_bind(
+                node, pf_pci_addr, vf_id, uio_driver
+            )
 
             # Add newly created ports into topology file
             vf_ifc_name = f"{ifc_key}_vif"
-            vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
             vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
             Topology.update_interface_name(
                 node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
@@ -1950,7 +1989,7 @@ class InterfaceUtil:
         thread_data = VPPUtil.vpp_show_threads(node)
         worker_cnt = len(thread_data) - 1
         if not worker_cnt:
-            return None
+            return
         worker_ids = list()
         if workers:
             for item in thread_data: