X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FCpuUtils.py;h=e4fff010f1ac79fa70fdc9de254488b5ad5f85bc;hp=f556c518144c3dbfdbc4ddf07961b1b298ab48a4;hb=HEAD;hpb=a275fa0062158d712152f542b7bc9ec40b5c5f31 diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py index f556c51814..c77d0f83b1 100644 --- a/resources/libraries/python/CpuUtils.py +++ b/resources/libraries/python/CpuUtils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 Cisco and/or its affiliates. +# Copyright (c) 2023 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -13,11 +13,13 @@ """CPU utilities library.""" +from random import choice + from robot.libraries.BuiltIn import BuiltIn from resources.libraries.python.Constants import Constants from resources.libraries.python.ssh import exec_cmd_no_error -from resources.libraries.python.topology import Topology +from resources.libraries.python.topology import Topology, NodeType __all__ = [u"CpuUtils"] @@ -268,7 +270,7 @@ class CpuUtils: :returns: List of CPUs allocated to NF. :rtype: list :raises RuntimeError: If we require more cpus than available or if - placement is not possible due to wrong parameters. + placement is not possible due to wrong parameters. """ if not 1 <= nf_chain <= nf_chains: raise RuntimeError(u"ChainID is out of range!") @@ -388,25 +390,25 @@ class CpuUtils: @staticmethod def get_affinity_trex( - node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1): + node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0): """Get affinity for T-Rex. Result will be used to pin T-Rex threads. :param node: TG node. - :param if1_pci: TG first interface. - :param if2_pci: TG second interface. + :param if_key: TG first interface. :param tg_mtc: TG main thread count. :param tg_dtc: TG dataplane thread count. :param tg_ltc: TG latency thread count. + :param tg_dtc_offset: TG dataplane thread offset. :type node: dict - :type if1_pci: str - :type if2_pci: str + :type if_key: str :type tg_mtc: int :type tg_dtc: int :type tg_ltc: int + :type tg_dtc_offset: int :returns: List of CPUs allocated to T-Rex including numa node. :rtype: int, int, int, list """ - interface_list = [if1_pci, if2_pci] + interface_list = [if_key] cpu_node = Topology.get_interfaces_numa_node(node, *interface_list) master_thread_id = CpuUtils.cpu_slice_of_list_per_node( @@ -414,12 +416,11 @@ class CpuUtils: smt_used=False) threads = CpuUtils.cpu_slice_of_list_per_node( - node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc, - smt_used=False) + node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset, + cpu_cnt=tg_dtc, smt_used=False) latency_thread_id = CpuUtils.cpu_slice_of_list_per_node( - node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc, - smt_used=False) + node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False) return master_thread_id[0], latency_thread_id[0], cpu_node, threads @@ -500,18 +501,16 @@ class CpuUtils: return cpu_idle_list @staticmethod - def get_affinity_vpp_vswitch( - nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None): - """Get affinity or VPP switch. + def get_affinity_vswitch( + nodes, phy_cores, rx_queues=None, rxd=None, txd=None): + """Get affinity for vswitch on all DUTs. :param nodes: Topology nodes. - :param node: Topology node string. :param phy_cores: Number of physical cores to allocate. :param rx_queues: Number of RX queues. (Optional, Default: None) :param rxd: Number of RX descriptors. (Optional, Default: None) :param txd: Number of TX descriptors. (Optional, Default: None) :type nodes: dict - :type node: str :type phy_cores: int :type rx_queues: int :type rxd: int @@ -519,76 +518,83 @@ class CpuUtils: :returns: Compute resource information dictionary. :rtype: dict """ - # Number of Data Plane physical cores. - dp_cores_count = BuiltIn().get_variable_value( - f"${{dp_cores_count}}", phy_cores - ) - # Number of Feature Plane physical cores. - fp_cores_count = BuiltIn().get_variable_value( - f"${{fp_cores_count}}", phy_cores - dp_cores_count - ) - # Ratio between RX queues and data plane threads. - rxq_ratio = BuiltIn().get_variable_value( - f"${{rxq_ratio}}", 1 - ) - - dut_pf_keys = BuiltIn().get_variable_value( - f"${{{node}_pf_keys}}" - ) - # SMT override in case of non standard test cases. - smt_used = BuiltIn().get_variable_value( - f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"]) - ) - - cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys) - skip_cnt = Constants.CPU_CNT_SYSTEM - cpu_main = CpuUtils.cpu_list_per_node_str( - nodes[node], cpu_node, - skip_cnt=skip_cnt, - cpu_cnt=Constants.CPU_CNT_MAIN, - smt_used=False - ) - skip_cnt += Constants.CPU_CNT_MAIN - cpu_dp = CpuUtils.cpu_list_per_node_str( - nodes[node], cpu_node, - skip_cnt=skip_cnt, - cpu_cnt=int(dp_cores_count), - smt_used=smt_used - ) if int(dp_cores_count) else u"" - skip_cnt = skip_cnt + int(dp_cores_count) - cpu_fp = CpuUtils.cpu_list_per_node_str( - nodes[node], cpu_node, - skip_cnt=skip_cnt, - cpu_cnt=int(fp_cores_count), - smt_used=smt_used - ) if int(fp_cores_count) else u"" - - fp_count_int = \ - int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \ - else int(fp_cores_count) - dp_count_int = \ - int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \ - else int(dp_cores_count) - - rxq_count_int = rx_queues if rx_queues else int(dp_count_int/rxq_ratio) - rxq_count_int = 1 if not rxq_count_int else rxq_count_int - compute_resource_info = dict() - compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520 - compute_resource_info[u"smt_used"] = smt_used - compute_resource_info[u"cpu_main"] = cpu_main - compute_resource_info[u"cpu_dp"] = cpu_dp - compute_resource_info[u"cpu_fp"] = cpu_fp - compute_resource_info[u"cpu_wt"] = \ - u",".join(filter(None, [cpu_dp, cpu_fp])) - compute_resource_info[u"cpu_alloc_str"] = \ - u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp])) - compute_resource_info[u"cpu_count_int"] = \ - int(dp_cores_count) + int(fp_cores_count) - compute_resource_info[u"rxd_count_int"] = rxd - compute_resource_info[u"txd_count_int"] = txd - compute_resource_info[u"rxq_count_int"] = rxq_count_int - compute_resource_info[u"fp_count_int"] = fp_count_int - compute_resource_info[u"dp_count_int"] = dp_count_int + for node_name, node in nodes.items(): + if node["type"] != NodeType.DUT: + continue + # Number of Data Plane physical cores. + dp_cores_count = BuiltIn().get_variable_value( + "${dp_cores_count}", phy_cores + ) + # Number of Feature Plane physical cores. + fp_cores_count = BuiltIn().get_variable_value( + "${fp_cores_count}", phy_cores - dp_cores_count + ) + # Ratio between RX queues and data plane threads. + rxq_ratio = BuiltIn().get_variable_value( + "${rxq_ratio}", 1 + ) + + dut_pf_keys = BuiltIn().get_variable_value( + f"${{{node_name}_pf_keys}}" + ) + # SMT override in case of non standard test cases. + smt_used = BuiltIn().get_variable_value( + "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"]) + ) + + cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys) + skip_cnt = Constants.CPU_CNT_SYSTEM + cpu_main = CpuUtils.cpu_list_per_node_str( + node, cpu_node, + skip_cnt=skip_cnt, + cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0, + smt_used=False + ) + cpu_main = cpu_main if phy_cores else choice(cpu_main.split(",")) + skip_cnt += Constants.CPU_CNT_MAIN + cpu_dp = CpuUtils.cpu_list_per_node_str( + node, cpu_node, + skip_cnt=skip_cnt, + cpu_cnt=int(dp_cores_count), + smt_used=smt_used + ) if int(dp_cores_count) else "" + skip_cnt = skip_cnt + int(dp_cores_count) + cpu_fp = CpuUtils.cpu_list_per_node_str( + node, cpu_node, + skip_cnt=skip_cnt, + cpu_cnt=int(fp_cores_count), + smt_used=smt_used + ) if int(fp_cores_count) else "" + + fp_count_int = \ + int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \ + else int(fp_cores_count) + dp_count_int = \ + int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \ + else int(dp_cores_count) + + rxq_count_int = \ + int(rx_queues) if rx_queues \ + else int(dp_count_int/rxq_ratio) + rxq_count_int = 1 if not rxq_count_int else rxq_count_int + + compute_resource_info["buffers_numa"] = \ + 215040 if smt_used else 107520 + compute_resource_info["smt_used"] = smt_used + compute_resource_info[f"{node_name}_cpu_main"] = cpu_main + compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp + compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp + compute_resource_info[f"{node_name}_cpu_wt"] = \ + ",".join(filter(None, [cpu_dp, cpu_fp])) + compute_resource_info[f"{node_name}_cpu_alloc_str"] = \ + ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp])) + compute_resource_info["cpu_count_int"] = \ + int(dp_cores_count) + int(fp_cores_count) + compute_resource_info["rxd_count_int"] = rxd + compute_resource_info["txd_count_int"] = txd + compute_resource_info["rxq_count_int"] = rxq_count_int + compute_resource_info["fp_count_int"] = fp_count_int + compute_resource_info["dp_count_int"] = dp_count_int return compute_resource_info