Revert "fix(IPsecUtil): Delete keywords no longer used"
[csit.git] / resources / libraries / python / CpuUtils.py
index 293d6b6..c77d0f8 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 """CPU utilities library."""
 
+from random import choice
+
 from robot.libraries.BuiltIn import BuiltIn
 
 from resources.libraries.python.Constants import Constants
 from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, NodeType
 
 __all__ = [u"CpuUtils"]
 
@@ -84,29 +86,6 @@ class CpuUtils:
                         [CpuUtils.__str2int(x) for x in line.split(u",")]
                     )
 
-    @staticmethod
-    def worker_count_from_cores_and_smt(phy_cores, smt_used):
-        """Simple conversion utility, needs smt from caller.
-
-        The implementation assumes we pack 1 or 2 workers per core,
-        depending on hyperthreading.
-
-        Some keywords use None to indicate no core/worker limit,
-        so this converts None to None.
-
-        :param phy_cores: How many physical cores to use for workers.
-        :param smt_used: Whether symmetric multithreading is used.
-        :type phy_cores: Optional[int]
-        :type smt_used: bool
-        :returns: How many VPP workers fit into the given number of cores.
-        :rtype: Optional[int]
-        """
-        if phy_cores is None:
-            return None
-        workers_per_core = CpuUtils.NR_OF_THREADS if smt_used else 1
-        workers = phy_cores * workers_per_core
-        return workers
-
     @staticmethod
     def cpu_node_count(node):
         """Return count of numa nodes.
@@ -291,7 +270,7 @@ class CpuUtils:
         :returns: List of CPUs allocated to NF.
         :rtype: list
         :raises RuntimeError: If we require more cpus than available or if
-        placement is not possible due to wrong parameters.
+            placement is not possible due to wrong parameters.
         """
         if not 1 <= nf_chain <= nf_chains:
             raise RuntimeError(u"ChainID is out of range!")
@@ -333,6 +312,36 @@ class CpuUtils:
         result[0:0] = cpu_list[mt_skip:mt_skip + 1]
         return result
 
+    @staticmethod
+    def get_affinity_af_xdp(
+            node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+        """Get affinity for AF_XDP interface. Result will be used to pin IRQs.
+
+        :param node: Topology node.
+        :param pf_key: Topology interface.
+        :param cpu_skip_cnt: Amount of CPU cores to skip.
+        :param cpu_cnt: CPU threads count.
+        :type node: dict
+        :type pf_key: str
+        :type cpu_skip_cnt: int
+        :type cpu_cnt: int
+        :returns: List of CPUs allocated to AF_XDP interface.
+        :rtype: list
+        """
+        if pf_key:
+            cpu_node = Topology.get_interface_numa_node(node, pf_key)
+        else:
+            cpu_node = 0
+
+        smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+        if smt_used:
+            cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+        return CpuUtils.cpu_slice_of_list_per_node(
+            node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+            smt_used=smt_used
+        )
+
     @staticmethod
     def get_affinity_nf(
             nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
@@ -381,25 +390,25 @@ class CpuUtils:
 
     @staticmethod
     def get_affinity_trex(
-            node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1):
+            node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0):
         """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
 
         :param node: TG node.
-        :param if1_pci: TG first interface.
-        :param if2_pci: TG second interface.
+        :param if_key: TG first interface.
         :param tg_mtc: TG main thread count.
         :param tg_dtc: TG dataplane thread count.
         :param tg_ltc: TG latency thread count.
+        :param tg_dtc_offset: TG dataplane thread offset.
         :type node: dict
-        :type if1_pci: str
-        :type if2_pci: str
+        :type if_key: str
         :type tg_mtc: int
         :type tg_dtc: int
         :type tg_ltc: int
+        :type tg_dtc_offset: int
         :returns: List of CPUs allocated to T-Rex including numa node.
         :rtype: int, int, int, list
         """
-        interface_list = [if1_pci, if2_pci]
+        interface_list = [if_key]
         cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
 
         master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
@@ -407,12 +416,11 @@ class CpuUtils:
             smt_used=False)
 
         threads = CpuUtils.cpu_slice_of_list_per_node(
-            node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
-            smt_used=False)
+            node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset,
+            cpu_cnt=tg_dtc, smt_used=False)
 
         latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
-            node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
-            smt_used=False)
+            node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False)
 
         return master_thread_id[0], latency_thread_id[0], cpu_node, threads
 
@@ -472,8 +480,8 @@ class CpuUtils:
 
     @staticmethod
     def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","):
-        """
-        Get idle CPU List
+        """Get idle CPU List.
+
         :param node: Node dictionary with cpuinfo.
         :param cpu_node: Numa node number.
         :param smt_used: True - we want to use SMT, otherwise false.
@@ -491,3 +499,102 @@ class CpuUtils:
         cpu_idle_list = [i for i in cpu_list
                          if str(i) not in cpu_alloc_str.split(sep)]
         return cpu_idle_list
+
+    @staticmethod
+    def get_affinity_vswitch(
+            nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
+        """Get affinity for vswitch on all DUTs.
+
+        :param nodes: Topology nodes.
+        :param phy_cores: Number of physical cores to allocate.
+        :param rx_queues: Number of RX queues. (Optional, Default: None)
+        :param rxd: Number of RX descriptors. (Optional, Default: None)
+        :param txd: Number of TX descriptors. (Optional, Default: None)
+        :type nodes: dict
+        :type phy_cores: int
+        :type rx_queues: int
+        :type rxd: int
+        :type txd: int
+        :returns: Compute resource information dictionary.
+        :rtype: dict
+        """
+        compute_resource_info = dict()
+        for node_name, node in nodes.items():
+            if node["type"] != NodeType.DUT:
+                continue
+            # Number of Data Plane physical cores.
+            dp_cores_count = BuiltIn().get_variable_value(
+                "${dp_cores_count}", phy_cores
+            )
+            # Number of Feature Plane physical cores.
+            fp_cores_count = BuiltIn().get_variable_value(
+                "${fp_cores_count}", phy_cores - dp_cores_count
+            )
+            # Ratio between RX queues and data plane threads.
+            rxq_ratio = BuiltIn().get_variable_value(
+                "${rxq_ratio}", 1
+            )
+
+            dut_pf_keys = BuiltIn().get_variable_value(
+                f"${{{node_name}_pf_keys}}"
+            )
+            # SMT override in case of non standard test cases.
+            smt_used = BuiltIn().get_variable_value(
+                "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"])
+            )
+
+            cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
+            skip_cnt = Constants.CPU_CNT_SYSTEM
+            cpu_main = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0,
+                smt_used=False
+            )
+            cpu_main = cpu_main if phy_cores else choice(cpu_main.split(","))
+            skip_cnt += Constants.CPU_CNT_MAIN
+            cpu_dp = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=int(dp_cores_count),
+                smt_used=smt_used
+            ) if int(dp_cores_count) else ""
+            skip_cnt = skip_cnt + int(dp_cores_count)
+            cpu_fp = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=int(fp_cores_count),
+                smt_used=smt_used
+            ) if int(fp_cores_count) else ""
+
+            fp_count_int = \
+                int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+                else int(fp_cores_count)
+            dp_count_int = \
+                int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+                else int(dp_cores_count)
+
+            rxq_count_int = \
+                int(rx_queues) if rx_queues \
+                else int(dp_count_int/rxq_ratio)
+            rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+            compute_resource_info["buffers_numa"] = \
+                215040 if smt_used else 107520
+            compute_resource_info["smt_used"] = smt_used
+            compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
+            compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
+            compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
+            compute_resource_info[f"{node_name}_cpu_wt"] = \
+                ",".join(filter(None, [cpu_dp, cpu_fp]))
+            compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
+                ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+            compute_resource_info["cpu_count_int"] = \
+                int(dp_cores_count) + int(fp_cores_count)
+            compute_resource_info["rxd_count_int"] = rxd
+            compute_resource_info["txd_count_int"] = txd
+            compute_resource_info["rxq_count_int"] = rxq_count_int
+            compute_resource_info["fp_count_int"] = fp_count_int
+            compute_resource_info["dp_count_int"] = dp_count_int
+
+        return compute_resource_info