fix: Show mrr trials in trending graphs
[csit.git] / resources / libraries / python / CpuUtils.py
index 5805ba7..518469b 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 """CPU utilities library."""
 
 
 """CPU utilities library."""
 
+from random import choice
+
 from robot.libraries.BuiltIn import BuiltIn
 
 from resources.libraries.python.Constants import Constants
 from resources.libraries.python.ssh import exec_cmd_no_error
 from robot.libraries.BuiltIn import BuiltIn
 
 from resources.libraries.python.Constants import Constants
 from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, NodeType
 
 __all__ = [u"CpuUtils"]
 
 
 __all__ = [u"CpuUtils"]
 
@@ -245,6 +247,9 @@ class CpuUtils:
         """Return list of DUT node related list of CPU numbers. The main
         computing unit is physical core count.
 
         """Return list of DUT node related list of CPU numbers. The main
         computing unit is physical core count.
 
+        On SMT enabled DUTs, both sibling logical cores are used,
+        unless Robot variable \${smt_used} is set to False.
+
         :param node: DUT node.
         :param cpu_node: Numa node number.
         :param nf_chains: Number of NF chains.
         :param node: DUT node.
         :param cpu_node: Numa node number.
         :param nf_chains: Number of NF chains.
@@ -276,6 +281,7 @@ class CpuUtils:
             raise RuntimeError(u"NodeID is out of range!")
 
         smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
             raise RuntimeError(u"NodeID is out of range!")
 
         smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+        smt_used = BuiltIn().get_variable_value("\${smt_used}", smt_used)
         cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
         # CPU thread sibling offset.
         sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
         cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
         # CPU thread sibling offset.
         sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
@@ -388,25 +394,25 @@ class CpuUtils:
 
     @staticmethod
     def get_affinity_trex(
 
     @staticmethod
     def get_affinity_trex(
-            node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1):
+            node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0):
         """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
 
         :param node: TG node.
         """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
 
         :param node: TG node.
-        :param if1_pci: TG first interface.
-        :param if2_pci: TG second interface.
+        :param if_key: TG first interface.
         :param tg_mtc: TG main thread count.
         :param tg_dtc: TG dataplane thread count.
         :param tg_ltc: TG latency thread count.
         :param tg_mtc: TG main thread count.
         :param tg_dtc: TG dataplane thread count.
         :param tg_ltc: TG latency thread count.
+        :param tg_dtc_offset: TG dataplane thread offset.
         :type node: dict
         :type node: dict
-        :type if1_pci: str
-        :type if2_pci: str
+        :type if_key: str
         :type tg_mtc: int
         :type tg_dtc: int
         :type tg_ltc: int
         :type tg_mtc: int
         :type tg_dtc: int
         :type tg_ltc: int
+        :type tg_dtc_offset: int
         :returns: List of CPUs allocated to T-Rex including numa node.
         :rtype: int, int, int, list
         """
         :returns: List of CPUs allocated to T-Rex including numa node.
         :rtype: int, int, int, list
         """
-        interface_list = [if1_pci, if2_pci]
+        interface_list = [if_key]
         cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
 
         master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
         cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
 
         master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
@@ -414,12 +420,11 @@ class CpuUtils:
             smt_used=False)
 
         threads = CpuUtils.cpu_slice_of_list_per_node(
             smt_used=False)
 
         threads = CpuUtils.cpu_slice_of_list_per_node(
-            node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
-            smt_used=False)
+            node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset,
+            cpu_cnt=tg_dtc, smt_used=False)
 
         latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
 
         latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
-            node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
-            smt_used=False)
+            node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False)
 
         return master_thread_id[0], latency_thread_id[0], cpu_node, threads
 
 
         return master_thread_id[0], latency_thread_id[0], cpu_node, threads
 
@@ -501,17 +506,15 @@ class CpuUtils:
 
     @staticmethod
     def get_affinity_vswitch(
 
     @staticmethod
     def get_affinity_vswitch(
-            nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None):
-        """Get affinity for vswitch.
+            nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
+        """Get affinity for vswitch on all DUTs.
 
         :param nodes: Topology nodes.
 
         :param nodes: Topology nodes.
-        :param node: Topology node string.
         :param phy_cores: Number of physical cores to allocate.
         :param rx_queues: Number of RX queues. (Optional, Default: None)
         :param rxd: Number of RX descriptors. (Optional, Default: None)
         :param txd: Number of TX descriptors. (Optional, Default: None)
         :type nodes: dict
         :param phy_cores: Number of physical cores to allocate.
         :param rx_queues: Number of RX queues. (Optional, Default: None)
         :param rxd: Number of RX descriptors. (Optional, Default: None)
         :param txd: Number of TX descriptors. (Optional, Default: None)
         :type nodes: dict
-        :type node: str
         :type phy_cores: int
         :type rx_queues: int
         :type rxd: int
         :type phy_cores: int
         :type rx_queues: int
         :type rxd: int
@@ -519,76 +522,83 @@ class CpuUtils:
         :returns: Compute resource information dictionary.
         :rtype: dict
         """
         :returns: Compute resource information dictionary.
         :rtype: dict
         """
-        # Number of Data Plane physical cores.
-        dp_cores_count = BuiltIn().get_variable_value(
-            f"${{dp_cores_count}}", phy_cores
-        )
-        # Number of Feature Plane physical cores.
-        fp_cores_count = BuiltIn().get_variable_value(
-            f"${{fp_cores_count}}", phy_cores - dp_cores_count
-        )
-        # Ratio between RX queues and data plane threads.
-        rxq_ratio = BuiltIn().get_variable_value(
-            f"${{rxq_ratio}}", 1
-        )
-
-        dut_pf_keys = BuiltIn().get_variable_value(
-            f"${{{node}_pf_keys}}"
-        )
-        # SMT override in case of non standard test cases.
-        smt_used = BuiltIn().get_variable_value(
-            f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"])
-        )
-
-        cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys)
-        skip_cnt = Constants.CPU_CNT_SYSTEM
-        cpu_main = CpuUtils.cpu_list_per_node_str(
-            nodes[node], cpu_node,
-            skip_cnt=skip_cnt,
-            cpu_cnt=Constants.CPU_CNT_MAIN,
-            smt_used=False
-        )
-        skip_cnt += Constants.CPU_CNT_MAIN
-        cpu_dp = CpuUtils.cpu_list_per_node_str(
-            nodes[node], cpu_node,
-            skip_cnt=skip_cnt,
-            cpu_cnt=int(dp_cores_count),
-            smt_used=smt_used
-        ) if int(dp_cores_count) else u""
-        skip_cnt = skip_cnt + int(dp_cores_count)
-        cpu_fp = CpuUtils.cpu_list_per_node_str(
-            nodes[node], cpu_node,
-            skip_cnt=skip_cnt,
-            cpu_cnt=int(fp_cores_count),
-            smt_used=smt_used
-        ) if int(fp_cores_count) else u""
-
-        fp_count_int = \
-            int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
-            else int(fp_cores_count)
-        dp_count_int = \
-            int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
-            else int(dp_cores_count)
-
-        rxq_count_int = rx_queues if rx_queues else int(dp_count_int/rxq_ratio)
-        rxq_count_int = 1 if not rxq_count_int else rxq_count_int
-
         compute_resource_info = dict()
         compute_resource_info = dict()
-        compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520
-        compute_resource_info[u"smt_used"] = smt_used
-        compute_resource_info[u"cpu_main"] = cpu_main
-        compute_resource_info[u"cpu_dp"] = cpu_dp
-        compute_resource_info[u"cpu_fp"] = cpu_fp
-        compute_resource_info[u"cpu_wt"] = \
-            u",".join(filter(None, [cpu_dp, cpu_fp]))
-        compute_resource_info[u"cpu_alloc_str"] = \
-            u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
-        compute_resource_info[u"cpu_count_int"] = \
-            int(dp_cores_count) + int(fp_cores_count)
-        compute_resource_info[u"rxd_count_int"] = rxd
-        compute_resource_info[u"txd_count_int"] = txd
-        compute_resource_info[u"rxq_count_int"] = rxq_count_int
-        compute_resource_info[u"fp_count_int"] = fp_count_int
-        compute_resource_info[u"dp_count_int"] = dp_count_int
+        for node_name, node in nodes.items():
+            if node["type"] != NodeType.DUT:
+                continue
+            # Number of Data Plane physical cores.
+            dp_cores_count = BuiltIn().get_variable_value(
+                "${dp_cores_count}", phy_cores
+            )
+            # Number of Feature Plane physical cores.
+            fp_cores_count = BuiltIn().get_variable_value(
+                "${fp_cores_count}", phy_cores - dp_cores_count
+            )
+            # Ratio between RX queues and data plane threads.
+            rxq_ratio = BuiltIn().get_variable_value(
+                "${rxq_ratio}", 1
+            )
+
+            dut_pf_keys = BuiltIn().get_variable_value(
+                f"${{{node_name}_pf_keys}}"
+            )
+            # SMT override in case of non standard test cases.
+            smt_used = BuiltIn().get_variable_value(
+                "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"])
+            )
+
+            cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
+            skip_cnt = Constants.CPU_CNT_SYSTEM
+            cpu_main = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0,
+                smt_used=False
+            )
+            cpu_main = cpu_main if phy_cores else choice(cpu_main.split(","))
+            skip_cnt += Constants.CPU_CNT_MAIN
+            cpu_dp = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=int(dp_cores_count),
+                smt_used=smt_used
+            ) if int(dp_cores_count) else ""
+            skip_cnt = skip_cnt + int(dp_cores_count)
+            cpu_fp = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=int(fp_cores_count),
+                smt_used=smt_used
+            ) if int(fp_cores_count) else ""
+
+            fp_count_int = \
+                int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+                else int(fp_cores_count)
+            dp_count_int = \
+                int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+                else int(dp_cores_count)
+
+            rxq_count_int = \
+                int(rx_queues) if rx_queues \
+                else int(dp_count_int/rxq_ratio)
+            rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+            compute_resource_info["buffers_numa"] = \
+                215040 if smt_used else 107520
+            compute_resource_info["smt_used"] = smt_used
+            compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
+            compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
+            compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
+            compute_resource_info[f"{node_name}_cpu_wt"] = \
+                ",".join(filter(None, [cpu_dp, cpu_fp]))
+            compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
+                ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+            compute_resource_info["cpu_count_int"] = \
+                int(dp_cores_count) + int(fp_cores_count)
+            compute_resource_info["rxd_count_int"] = rxd
+            compute_resource_info["txd_count_int"] = txd
+            compute_resource_info["rxq_count_int"] = rxq_count_int
+            compute_resource_info["fp_count_int"] = fp_count_int
+            compute_resource_info["dp_count_int"] = dp_count_int
 
         return compute_resource_info
 
         return compute_resource_info