Revert "fix(IPsecUtil): Delete keywords no longer used"
[csit.git] / resources / libraries / python / CpuUtils.py
index 1e306f0..c77d0f8 100644 (file)
 
 """CPU utilities library."""
 
+from random import choice
+
 from robot.libraries.BuiltIn import BuiltIn
 
 from resources.libraries.python.Constants import Constants
 from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, NodeType
 
 __all__ = [u"CpuUtils"]
 
@@ -388,7 +390,7 @@ class CpuUtils:
 
     @staticmethod
     def get_affinity_trex(
-            node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1):
+            node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0):
         """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
 
         :param node: TG node.
@@ -396,11 +398,13 @@ class CpuUtils:
         :param tg_mtc: TG main thread count.
         :param tg_dtc: TG dataplane thread count.
         :param tg_ltc: TG latency thread count.
+        :param tg_dtc_offset: TG dataplane thread offset.
         :type node: dict
         :type if_key: str
         :type tg_mtc: int
         :type tg_dtc: int
         :type tg_ltc: int
+        :type tg_dtc_offset: int
         :returns: List of CPUs allocated to T-Rex including numa node.
         :rtype: int, int, int, list
         """
@@ -412,12 +416,11 @@ class CpuUtils:
             smt_used=False)
 
         threads = CpuUtils.cpu_slice_of_list_per_node(
-            node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
-            smt_used=False)
+            node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset,
+            cpu_cnt=tg_dtc, smt_used=False)
 
         latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
-            node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
-            smt_used=False)
+            node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False)
 
         return master_thread_id[0], latency_thread_id[0], cpu_node, threads
 
@@ -499,17 +502,15 @@ class CpuUtils:
 
     @staticmethod
     def get_affinity_vswitch(
-            nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None):
-        """Get affinity for vswitch.
+            nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
+        """Get affinity for vswitch on all DUTs.
 
         :param nodes: Topology nodes.
-        :param node: Topology node string.
         :param phy_cores: Number of physical cores to allocate.
         :param rx_queues: Number of RX queues. (Optional, Default: None)
         :param rxd: Number of RX descriptors. (Optional, Default: None)
         :param txd: Number of TX descriptors. (Optional, Default: None)
         :type nodes: dict
-        :type node: str
         :type phy_cores: int
         :type rx_queues: int
         :type rxd: int
@@ -517,76 +518,83 @@ class CpuUtils:
         :returns: Compute resource information dictionary.
         :rtype: dict
         """
-        # Number of Data Plane physical cores.
-        dp_cores_count = BuiltIn().get_variable_value(
-            f"${{dp_cores_count}}", phy_cores
-        )
-        # Number of Feature Plane physical cores.
-        fp_cores_count = BuiltIn().get_variable_value(
-            f"${{fp_cores_count}}", phy_cores - dp_cores_count
-        )
-        # Ratio between RX queues and data plane threads.
-        rxq_ratio = BuiltIn().get_variable_value(
-            f"${{rxq_ratio}}", 1
-        )
-
-        dut_pf_keys = BuiltIn().get_variable_value(
-            f"${{{node}_pf_keys}}"
-        )
-        # SMT override in case of non standard test cases.
-        smt_used = BuiltIn().get_variable_value(
-            f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"])
-        )
-
-        cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys)
-        skip_cnt = Constants.CPU_CNT_SYSTEM
-        cpu_main = CpuUtils.cpu_list_per_node_str(
-            nodes[node], cpu_node,
-            skip_cnt=skip_cnt,
-            cpu_cnt=Constants.CPU_CNT_MAIN,
-            smt_used=False
-        )
-        skip_cnt += Constants.CPU_CNT_MAIN
-        cpu_dp = CpuUtils.cpu_list_per_node_str(
-            nodes[node], cpu_node,
-            skip_cnt=skip_cnt,
-            cpu_cnt=int(dp_cores_count),
-            smt_used=smt_used
-        ) if int(dp_cores_count) else u""
-        skip_cnt = skip_cnt + int(dp_cores_count)
-        cpu_fp = CpuUtils.cpu_list_per_node_str(
-            nodes[node], cpu_node,
-            skip_cnt=skip_cnt,
-            cpu_cnt=int(fp_cores_count),
-            smt_used=smt_used
-        ) if int(fp_cores_count) else u""
-
-        fp_count_int = \
-            int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
-            else int(fp_cores_count)
-        dp_count_int = \
-            int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
-            else int(dp_cores_count)
-
-        rxq_count_int = rx_queues if rx_queues else int(dp_count_int/rxq_ratio)
-        rxq_count_int = 1 if not rxq_count_int else rxq_count_int
-
         compute_resource_info = dict()
-        compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520
-        compute_resource_info[u"smt_used"] = smt_used
-        compute_resource_info[u"cpu_main"] = cpu_main
-        compute_resource_info[u"cpu_dp"] = cpu_dp
-        compute_resource_info[u"cpu_fp"] = cpu_fp
-        compute_resource_info[u"cpu_wt"] = \
-            u",".join(filter(None, [cpu_dp, cpu_fp]))
-        compute_resource_info[u"cpu_alloc_str"] = \
-            u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
-        compute_resource_info[u"cpu_count_int"] = \
-            int(dp_cores_count) + int(fp_cores_count)
-        compute_resource_info[u"rxd_count_int"] = rxd
-        compute_resource_info[u"txd_count_int"] = txd
-        compute_resource_info[u"rxq_count_int"] = rxq_count_int
-        compute_resource_info[u"fp_count_int"] = fp_count_int
-        compute_resource_info[u"dp_count_int"] = dp_count_int
+        for node_name, node in nodes.items():
+            if node["type"] != NodeType.DUT:
+                continue
+            # Number of Data Plane physical cores.
+            dp_cores_count = BuiltIn().get_variable_value(
+                "${dp_cores_count}", phy_cores
+            )
+            # Number of Feature Plane physical cores.
+            fp_cores_count = BuiltIn().get_variable_value(
+                "${fp_cores_count}", phy_cores - dp_cores_count
+            )
+            # Ratio between RX queues and data plane threads.
+            rxq_ratio = BuiltIn().get_variable_value(
+                "${rxq_ratio}", 1
+            )
+
+            dut_pf_keys = BuiltIn().get_variable_value(
+                f"${{{node_name}_pf_keys}}"
+            )
+            # SMT override in case of non standard test cases.
+            smt_used = BuiltIn().get_variable_value(
+                "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"])
+            )
+
+            cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
+            skip_cnt = Constants.CPU_CNT_SYSTEM
+            cpu_main = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0,
+                smt_used=False
+            )
+            cpu_main = cpu_main if phy_cores else choice(cpu_main.split(","))
+            skip_cnt += Constants.CPU_CNT_MAIN
+            cpu_dp = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=int(dp_cores_count),
+                smt_used=smt_used
+            ) if int(dp_cores_count) else ""
+            skip_cnt = skip_cnt + int(dp_cores_count)
+            cpu_fp = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=int(fp_cores_count),
+                smt_used=smt_used
+            ) if int(fp_cores_count) else ""
+
+            fp_count_int = \
+                int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+                else int(fp_cores_count)
+            dp_count_int = \
+                int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+                else int(dp_cores_count)
+
+            rxq_count_int = \
+                int(rx_queues) if rx_queues \
+                else int(dp_count_int/rxq_ratio)
+            rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+            compute_resource_info["buffers_numa"] = \
+                215040 if smt_used else 107520
+            compute_resource_info["smt_used"] = smt_used
+            compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
+            compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
+            compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
+            compute_resource_info[f"{node_name}_cpu_wt"] = \
+                ",".join(filter(None, [cpu_dp, cpu_fp]))
+            compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
+                ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+            compute_resource_info["cpu_count_int"] = \
+                int(dp_cores_count) + int(fp_cores_count)
+            compute_resource_info["rxd_count_int"] = rxd
+            compute_resource_info["txd_count_int"] = txd
+            compute_resource_info["rxq_count_int"] = rxq_count_int
+            compute_resource_info["fp_count_int"] = fp_count_int
+            compute_resource_info["dp_count_int"] = dp_count_int
 
         return compute_resource_info