docstring: Docstring warnings fixed.
[csit.git] / resources / libraries / python / CpuUtils.py
index 70177f5..5805ba7 100644 (file)
@@ -232,7 +232,7 @@ class CpuUtils:
             cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
             cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
             cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
-                f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
+                        f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
         else:
             cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
 
@@ -268,7 +268,7 @@ class CpuUtils:
         :returns: List of CPUs allocated to NF.
         :rtype: list
         :raises RuntimeError: If we require more cpus than available or if
-        placement is not possible due to wrong parameters.
+            placement is not possible due to wrong parameters.
         """
         if not 1 <= nf_chain <= nf_chains:
             raise RuntimeError(u"ChainID is out of range!")
@@ -310,6 +310,36 @@ class CpuUtils:
         result[0:0] = cpu_list[mt_skip:mt_skip + 1]
         return result
 
+    @staticmethod
+    def get_affinity_af_xdp(
+            node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+        """Get affinity for AF_XDP interface. Result will be used to pin IRQs.
+
+        :param node: Topology node.
+        :param pf_key: Topology interface.
+        :param cpu_skip_cnt: Amount of CPU cores to skip.
+        :param cpu_cnt: CPU threads count.
+        :type node: dict
+        :type pf_key: str
+        :type cpu_skip_cnt: int
+        :type cpu_cnt: int
+        :returns: List of CPUs allocated to AF_XDP interface.
+        :rtype: list
+        """
+        if pf_key:
+            cpu_node = Topology.get_interface_numa_node(node, pf_key)
+        else:
+            cpu_node = 0
+
+        smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+        if smt_used:
+            cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+        return CpuUtils.cpu_slice_of_list_per_node(
+            node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+            smt_used=smt_used
+        )
+
     @staticmethod
     def get_affinity_nf(
             nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
@@ -446,3 +476,119 @@ class CpuUtils:
         return CpuUtils.cpu_slice_of_list_per_node(
             node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
             smt_used=False)
+
+    @staticmethod
+    def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","):
+        """Get idle CPU List.
+
+        :param node: Node dictionary with cpuinfo.
+        :param cpu_node: Numa node number.
+        :param smt_used: True - we want to use SMT, otherwise false.
+        :param cpu_alloc_str: vpp used cores.
+        :param sep: Separator, default: ",".
+        :type node: dict
+        :type cpu_node: int
+        :type smt_used: bool
+        :type cpu_alloc_str: str
+        :type smt_used: bool
+        :type sep: str
+        :rtype: list
+        """
+        cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
+        cpu_idle_list = [i for i in cpu_list
+                         if str(i) not in cpu_alloc_str.split(sep)]
+        return cpu_idle_list
+
+    @staticmethod
+    def get_affinity_vswitch(
+            nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None):
+        """Get affinity for vswitch.
+
+        :param nodes: Topology nodes.
+        :param node: Topology node string.
+        :param phy_cores: Number of physical cores to allocate.
+        :param rx_queues: Number of RX queues. (Optional, Default: None)
+        :param rxd: Number of RX descriptors. (Optional, Default: None)
+        :param txd: Number of TX descriptors. (Optional, Default: None)
+        :type nodes: dict
+        :type node: str
+        :type phy_cores: int
+        :type rx_queues: int
+        :type rxd: int
+        :type txd: int
+        :returns: Compute resource information dictionary.
+        :rtype: dict
+        """
+        # Number of Data Plane physical cores.
+        dp_cores_count = BuiltIn().get_variable_value(
+            f"${{dp_cores_count}}", phy_cores
+        )
+        # Number of Feature Plane physical cores.
+        fp_cores_count = BuiltIn().get_variable_value(
+            f"${{fp_cores_count}}", phy_cores - dp_cores_count
+        )
+        # Ratio between RX queues and data plane threads.
+        rxq_ratio = BuiltIn().get_variable_value(
+            f"${{rxq_ratio}}", 1
+        )
+
+        dut_pf_keys = BuiltIn().get_variable_value(
+            f"${{{node}_pf_keys}}"
+        )
+        # SMT override in case of non standard test cases.
+        smt_used = BuiltIn().get_variable_value(
+            f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"])
+        )
+
+        cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys)
+        skip_cnt = Constants.CPU_CNT_SYSTEM
+        cpu_main = CpuUtils.cpu_list_per_node_str(
+            nodes[node], cpu_node,
+            skip_cnt=skip_cnt,
+            cpu_cnt=Constants.CPU_CNT_MAIN,
+            smt_used=False
+        )
+        skip_cnt += Constants.CPU_CNT_MAIN
+        cpu_dp = CpuUtils.cpu_list_per_node_str(
+            nodes[node], cpu_node,
+            skip_cnt=skip_cnt,
+            cpu_cnt=int(dp_cores_count),
+            smt_used=smt_used
+        ) if int(dp_cores_count) else u""
+        skip_cnt = skip_cnt + int(dp_cores_count)
+        cpu_fp = CpuUtils.cpu_list_per_node_str(
+            nodes[node], cpu_node,
+            skip_cnt=skip_cnt,
+            cpu_cnt=int(fp_cores_count),
+            smt_used=smt_used
+        ) if int(fp_cores_count) else u""
+
+        fp_count_int = \
+            int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+            else int(fp_cores_count)
+        dp_count_int = \
+            int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+            else int(dp_cores_count)
+
+        rxq_count_int = rx_queues if rx_queues else int(dp_count_int/rxq_ratio)
+        rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+        compute_resource_info = dict()
+        compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520
+        compute_resource_info[u"smt_used"] = smt_used
+        compute_resource_info[u"cpu_main"] = cpu_main
+        compute_resource_info[u"cpu_dp"] = cpu_dp
+        compute_resource_info[u"cpu_fp"] = cpu_fp
+        compute_resource_info[u"cpu_wt"] = \
+            u",".join(filter(None, [cpu_dp, cpu_fp]))
+        compute_resource_info[u"cpu_alloc_str"] = \
+            u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+        compute_resource_info[u"cpu_count_int"] = \
+            int(dp_cores_count) + int(fp_cores_count)
+        compute_resource_info[u"rxd_count_int"] = rxd
+        compute_resource_info[u"txd_count_int"] = txd
+        compute_resource_info[u"rxq_count_int"] = rxq_count_int
+        compute_resource_info[u"fp_count_int"] = fp_count_int
+        compute_resource_info[u"dp_count_int"] = dp_count_int
+
+        return compute_resource_info