fix(etl): Typo"
[csit.git] / resources / libraries / python / CpuUtils.py
index 842c16d..518469b 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 """CPU utilities library."""
 
 
 """CPU utilities library."""
 
+from random import choice
+
 from robot.libraries.BuiltIn import BuiltIn
 
 from resources.libraries.python.Constants import Constants
 from resources.libraries.python.ssh import exec_cmd_no_error
 from robot.libraries.BuiltIn import BuiltIn
 
 from resources.libraries.python.Constants import Constants
 from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, NodeType
 
 __all__ = [u"CpuUtils"]
 
 
 __all__ = [u"CpuUtils"]
 
@@ -141,7 +143,7 @@ class CpuUtils:
     @staticmethod
     def cpu_slice_of_list_per_node(
             node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False):
     @staticmethod
     def cpu_slice_of_list_per_node(
             node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False):
-        """Return string of node related list of CPU numbers.
+        """Return node related subset of list of CPU numbers.
 
         :param node: Node dictionary with cpuinfo.
         :param cpu_node: Numa node number.
 
         :param node: Node dictionary with cpuinfo.
         :param cpu_node: Numa node number.
@@ -232,7 +234,7 @@ class CpuUtils:
             cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
             cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
             cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
             cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
             cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
             cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
-                f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
+                        f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
         else:
             cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
 
         else:
             cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
 
@@ -245,6 +247,9 @@ class CpuUtils:
         """Return list of DUT node related list of CPU numbers. The main
         computing unit is physical core count.
 
         """Return list of DUT node related list of CPU numbers. The main
         computing unit is physical core count.
 
+        On SMT enabled DUTs, both sibling logical cores are used,
+        unless Robot variable \${smt_used} is set to False.
+
         :param node: DUT node.
         :param cpu_node: Numa node number.
         :param nf_chains: Number of NF chains.
         :param node: DUT node.
         :param cpu_node: Numa node number.
         :param nf_chains: Number of NF chains.
@@ -268,7 +273,7 @@ class CpuUtils:
         :returns: List of CPUs allocated to NF.
         :rtype: list
         :raises RuntimeError: If we require more cpus than available or if
         :returns: List of CPUs allocated to NF.
         :rtype: list
         :raises RuntimeError: If we require more cpus than available or if
-        placement is not possible due to wrong parameters.
+            placement is not possible due to wrong parameters.
         """
         if not 1 <= nf_chain <= nf_chains:
             raise RuntimeError(u"ChainID is out of range!")
         """
         if not 1 <= nf_chain <= nf_chains:
             raise RuntimeError(u"ChainID is out of range!")
@@ -276,6 +281,7 @@ class CpuUtils:
             raise RuntimeError(u"NodeID is out of range!")
 
         smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
             raise RuntimeError(u"NodeID is out of range!")
 
         smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+        smt_used = BuiltIn().get_variable_value("\${smt_used}", smt_used)
         cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
         # CPU thread sibling offset.
         sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
         cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
         # CPU thread sibling offset.
         sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
@@ -283,12 +289,6 @@ class CpuUtils:
         dtc_is_integer = isinstance(nf_dtc, int)
         if not smt_used and not dtc_is_integer:
             raise RuntimeError(u"Cannot allocate if SMT is not enabled!")
         dtc_is_integer = isinstance(nf_dtc, int)
         if not smt_used and not dtc_is_integer:
             raise RuntimeError(u"Cannot allocate if SMT is not enabled!")
-        # TODO: Please reword the following todo if it is still relevant
-        # TODO: Workaround as we are using physical core as main unit, we must
-        # adjust number of physical dataplane cores in case of float for further
-        # array referencing. As rounding method in Py2.7 and Py3.x differs, we
-        # are using static mapping. This can be rewritten using flat arrays and
-        # different logic (from Physical core unit to Logical core unit).
         if not dtc_is_integer:
             nf_dtc = 1
 
         if not dtc_is_integer:
             nf_dtc = 1
 
@@ -316,6 +316,36 @@ class CpuUtils:
         result[0:0] = cpu_list[mt_skip:mt_skip + 1]
         return result
 
         result[0:0] = cpu_list[mt_skip:mt_skip + 1]
         return result
 
+    @staticmethod
+    def get_affinity_af_xdp(
+            node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+        """Get affinity for AF_XDP interface. Result will be used to pin IRQs.
+
+        :param node: Topology node.
+        :param pf_key: Topology interface.
+        :param cpu_skip_cnt: Amount of CPU cores to skip.
+        :param cpu_cnt: CPU threads count.
+        :type node: dict
+        :type pf_key: str
+        :type cpu_skip_cnt: int
+        :type cpu_cnt: int
+        :returns: List of CPUs allocated to AF_XDP interface.
+        :rtype: list
+        """
+        if pf_key:
+            cpu_node = Topology.get_interface_numa_node(node, pf_key)
+        else:
+            cpu_node = 0
+
+        smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+        if smt_used:
+            cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+        return CpuUtils.cpu_slice_of_list_per_node(
+            node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+            smt_used=smt_used
+        )
+
     @staticmethod
     def get_affinity_nf(
             nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
     @staticmethod
     def get_affinity_nf(
             nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
@@ -361,3 +391,214 @@ class CpuUtils:
             nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
             nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt
         )
             nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
             nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt
         )
+
+    @staticmethod
+    def get_affinity_trex(
+            node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0):
+        """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
+
+        :param node: TG node.
+        :param if_key: TG first interface.
+        :param tg_mtc: TG main thread count.
+        :param tg_dtc: TG dataplane thread count.
+        :param tg_ltc: TG latency thread count.
+        :param tg_dtc_offset: TG dataplane thread offset.
+        :type node: dict
+        :type if_key: str
+        :type tg_mtc: int
+        :type tg_dtc: int
+        :type tg_ltc: int
+        :type tg_dtc_offset: int
+        :returns: List of CPUs allocated to T-Rex including numa node.
+        :rtype: int, int, int, list
+        """
+        interface_list = [if_key]
+        cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
+
+        master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
+            node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc,
+            smt_used=False)
+
+        threads = CpuUtils.cpu_slice_of_list_per_node(
+            node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset,
+            cpu_cnt=tg_dtc, smt_used=False)
+
+        latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
+            node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False)
+
+        return master_thread_id[0], latency_thread_id[0], cpu_node, threads
+
+    @staticmethod
+    def get_affinity_iperf(
+            node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+        """Get affinity for iPerf3. Result will be used to pin iPerf3 threads.
+
+        :param node: Topology node.
+        :param pf_key: Topology interface.
+        :param cpu_skip_cnt: Amount of CPU cores to skip.
+        :param cpu_cnt: CPU threads count.
+        :type node: dict
+        :type pf_key: str
+        :type cpu_skip_cnt: int
+        :type cpu_cnt: int
+        :returns: List of CPUs allocated to iPerf3.
+        :rtype: str
+        """
+        if pf_key:
+            cpu_node = Topology.get_interface_numa_node(node, pf_key)
+        else:
+            cpu_node = 0
+
+        return CpuUtils.cpu_range_per_node_str(
+            node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+            smt_used=False)
+
+    @staticmethod
+    def get_affinity_vhost(
+            node, pf_key, skip_cnt=0, cpu_cnt=1):
+        """Get affinity for vhost. Result will be used to pin vhost threads.
+
+        :param node: Topology node.
+        :param pf_key: Topology interface.
+        :param skip_cnt: Amount of CPU cores to skip.
+        :param cpu_cnt: CPU threads count.
+        :type node: dict
+        :type pf_key: str
+        :type skip_cnt: int
+        :type cpu_cnt: int
+        :returns: List of CPUs allocated to vhost process.
+        :rtype: str
+        """
+        if pf_key:
+            cpu_node = Topology.get_interface_numa_node(node, pf_key)
+        else:
+            cpu_node = 0
+
+        smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+        if smt_used:
+            cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+        return CpuUtils.cpu_slice_of_list_per_node(
+            node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
+            smt_used=False)
+
+    @staticmethod
+    def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","):
+        """Get idle CPU List.
+
+        :param node: Node dictionary with cpuinfo.
+        :param cpu_node: Numa node number.
+        :param smt_used: True - we want to use SMT, otherwise false.
+        :param cpu_alloc_str: vpp used cores.
+        :param sep: Separator, default: ",".
+        :type node: dict
+        :type cpu_node: int
+        :type smt_used: bool
+        :type cpu_alloc_str: str
+        :type smt_used: bool
+        :type sep: str
+        :rtype: list
+        """
+        cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
+        cpu_idle_list = [i for i in cpu_list
+                         if str(i) not in cpu_alloc_str.split(sep)]
+        return cpu_idle_list
+
+    @staticmethod
+    def get_affinity_vswitch(
+            nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
+        """Get affinity for vswitch on all DUTs.
+
+        :param nodes: Topology nodes.
+        :param phy_cores: Number of physical cores to allocate.
+        :param rx_queues: Number of RX queues. (Optional, Default: None)
+        :param rxd: Number of RX descriptors. (Optional, Default: None)
+        :param txd: Number of TX descriptors. (Optional, Default: None)
+        :type nodes: dict
+        :type phy_cores: int
+        :type rx_queues: int
+        :type rxd: int
+        :type txd: int
+        :returns: Compute resource information dictionary.
+        :rtype: dict
+        """
+        compute_resource_info = dict()
+        for node_name, node in nodes.items():
+            if node["type"] != NodeType.DUT:
+                continue
+            # Number of Data Plane physical cores.
+            dp_cores_count = BuiltIn().get_variable_value(
+                "${dp_cores_count}", phy_cores
+            )
+            # Number of Feature Plane physical cores.
+            fp_cores_count = BuiltIn().get_variable_value(
+                "${fp_cores_count}", phy_cores - dp_cores_count
+            )
+            # Ratio between RX queues and data plane threads.
+            rxq_ratio = BuiltIn().get_variable_value(
+                "${rxq_ratio}", 1
+            )
+
+            dut_pf_keys = BuiltIn().get_variable_value(
+                f"${{{node_name}_pf_keys}}"
+            )
+            # SMT override in case of non standard test cases.
+            smt_used = BuiltIn().get_variable_value(
+                "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"])
+            )
+
+            cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
+            skip_cnt = Constants.CPU_CNT_SYSTEM
+            cpu_main = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0,
+                smt_used=False
+            )
+            cpu_main = cpu_main if phy_cores else choice(cpu_main.split(","))
+            skip_cnt += Constants.CPU_CNT_MAIN
+            cpu_dp = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=int(dp_cores_count),
+                smt_used=smt_used
+            ) if int(dp_cores_count) else ""
+            skip_cnt = skip_cnt + int(dp_cores_count)
+            cpu_fp = CpuUtils.cpu_list_per_node_str(
+                node, cpu_node,
+                skip_cnt=skip_cnt,
+                cpu_cnt=int(fp_cores_count),
+                smt_used=smt_used
+            ) if int(fp_cores_count) else ""
+
+            fp_count_int = \
+                int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+                else int(fp_cores_count)
+            dp_count_int = \
+                int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+                else int(dp_cores_count)
+
+            rxq_count_int = \
+                int(rx_queues) if rx_queues \
+                else int(dp_count_int/rxq_ratio)
+            rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+            compute_resource_info["buffers_numa"] = \
+                215040 if smt_used else 107520
+            compute_resource_info["smt_used"] = smt_used
+            compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
+            compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
+            compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
+            compute_resource_info[f"{node_name}_cpu_wt"] = \
+                ",".join(filter(None, [cpu_dp, cpu_fp]))
+            compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
+                ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+            compute_resource_info["cpu_count_int"] = \
+                int(dp_cores_count) + int(fp_cores_count)
+            compute_resource_info["rxd_count_int"] = rxd
+            compute_resource_info["txd_count_int"] = txd
+            compute_resource_info["rxq_count_int"] = rxq_count_int
+            compute_resource_info["fp_count_int"] = fp_count_int
+            compute_resource_info["dp_count_int"] = dp_count_int
+
+        return compute_resource_info