X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FCpuUtils.py;h=e4fff010f1ac79fa70fdc9de254488b5ad5f85bc;hp=91db83eb5cfd9fc4ee41f41d61aecacf20315c46;hb=HEAD;hpb=91051c28e269f08bd58e8301366d3cca78c948ef diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py index 91db83eb5c..c77d0f83b1 100644 --- a/resources/libraries/python/CpuUtils.py +++ b/resources/libraries/python/CpuUtils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 Cisco and/or its affiliates. +# Copyright (c) 2023 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -13,16 +13,18 @@ """CPU utilities library.""" +from random import choice + from robot.libraries.BuiltIn import BuiltIn from resources.libraries.python.Constants import Constants from resources.libraries.python.ssh import exec_cmd_no_error -from resources.libraries.python.topology import Topology +from resources.libraries.python.topology import Topology, NodeType -__all__ = ["CpuUtils"] +__all__ = [u"CpuUtils"] -class CpuUtils(object): +class CpuUtils: """CPU utilities""" # Number of threads per core. @@ -54,7 +56,7 @@ class CpuUtils(object): :rtype: bool """ cpu_mems = [item[-4:] for item in cpu_info] - cpu_mems_len = len(cpu_mems) / CpuUtils.NR_OF_THREADS + cpu_mems_len = len(cpu_mems) // CpuUtils.NR_OF_THREADS count = 0 for cpu_mem in cpu_mems[:cpu_mems_len]: if cpu_mem in cpu_mems[cpu_mems_len:]: @@ -71,17 +73,18 @@ class CpuUtils(object): :param nodes: DICT__nodes from Topology.DICT__nodes. :type nodes: dict :raises RuntimeError: If an ssh command retrieving cpu information - fails. + fails. """ for node in nodes.values(): - stdout, _ = exec_cmd_no_error(node, 'uname -m') - node['arch'] = stdout.strip() - stdout, _ = exec_cmd_no_error(node, 'lscpu -p') - node['cpuinfo'] = list() - for line in stdout.split("\n"): - if line and line[0] != "#": - node['cpuinfo'].append([CpuUtils.__str2int(x) for x in - line.split(",")]) + stdout, _ = exec_cmd_no_error(node, u"uname -m") + node[u"arch"] = stdout.strip() + stdout, _ = exec_cmd_no_error(node, u"lscpu -p") + node[u"cpuinfo"] = list() + for line in stdout.split(u"\n"): + if line and line[0] != u"#": + node[u"cpuinfo"].append( + [CpuUtils.__str2int(x) for x in line.split(u",")] + ) @staticmethod def cpu_node_count(node): @@ -93,11 +96,11 @@ class CpuUtils(object): :rtype: int :raises RuntimeError: If node cpuinfo is not available. """ - cpu_info = node.get("cpuinfo") + cpu_info = node.get(u"cpuinfo") if cpu_info is not None: - return node["cpuinfo"][-1][3] + 1 - else: - raise RuntimeError("Node cpuinfo not available.") + return node[u"cpuinfo"][-1][3] + 1 + + raise RuntimeError(u"Node cpuinfo not available.") @staticmethod def cpu_list_per_node(node, cpu_node, smt_used=False): @@ -115,13 +118,13 @@ class CpuUtils(object): or if SMT is not enabled. """ cpu_node = int(cpu_node) - cpu_info = node.get("cpuinfo") + cpu_info = node.get(u"cpuinfo") if cpu_info is None: - raise RuntimeError("Node cpuinfo not available.") + raise RuntimeError(u"Node cpuinfo not available.") smt_enabled = CpuUtils.is_smt_enabled(cpu_info) if not smt_enabled and smt_used: - raise RuntimeError("SMT is not enabled.") + raise RuntimeError(u"SMT is not enabled.") cpu_list = [] for cpu in cpu_info: @@ -133,14 +136,14 @@ class CpuUtils(object): if smt_enabled and not smt_used: cpu_list_len = len(cpu_list) - cpu_list = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS] + cpu_list = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS] return cpu_list @staticmethod - def cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=0, cpu_cnt=0, - smt_used=False): - """Return string of node related list of CPU numbers. + def cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False): + """Return node related subset of list of CPU numbers. :param node: Node dictionary with cpuinfo. :param cpu_node: Numa node number. @@ -160,26 +163,25 @@ class CpuUtils(object): cpu_list_len = len(cpu_list) if cpu_cnt + skip_cnt > cpu_list_len: - raise RuntimeError("cpu_cnt + skip_cnt > length(cpu list).") + raise RuntimeError(u"cpu_cnt + skip_cnt > length(cpu list).") if cpu_cnt == 0: cpu_cnt = cpu_list_len - skip_cnt if smt_used: - cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS] - cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:] - cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]] - cpu_list_ex = [cpu for cpu in - cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]] + cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS] + cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:] + cpu_list = cpu_list_0[skip_cnt:skip_cnt + cpu_cnt] + cpu_list_ex = cpu_list_1[skip_cnt:skip_cnt + cpu_cnt] cpu_list.extend(cpu_list_ex) else: - cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]] + cpu_list = cpu_list[skip_cnt:skip_cnt + cpu_cnt] return cpu_list @staticmethod - def cpu_list_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",", - smt_used=False): + def cpu_list_per_node_str( + node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u",", smt_used=False): """Return string of node related list of CPU numbers. :param node: Node dictionary with cpuinfo. @@ -197,15 +199,15 @@ class CpuUtils(object): :returns: Cpu numbers related to numa from argument. :rtype: str """ - cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node, - skip_cnt=skip_cnt, - cpu_cnt=cpu_cnt, - smt_used=smt_used) + cpu_list = CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, + smt_used=smt_used + ) return sep.join(str(cpu) for cpu in cpu_list) @staticmethod - def cpu_range_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep="-", - smt_used=False): + def cpu_range_per_node_str( + node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u"-", smt_used=False): """Return string of node related range of CPU numbers, e.g. 0-4. :param node: Node dictionary with cpuinfo. @@ -223,27 +225,25 @@ class CpuUtils(object): :returns: String of node related range of CPU numbers. :rtype: str """ - cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node, - skip_cnt=skip_cnt, - cpu_cnt=cpu_cnt, - smt_used=smt_used) + cpu_list = CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, + smt_used=smt_used + ) if smt_used: cpu_list_len = len(cpu_list) - cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS] - cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:] - cpu_range = "{}{}{},{}{}{}".format(cpu_list_0[0], sep, - cpu_list_0[-1], - cpu_list_1[0], sep, - cpu_list_1[-1]) + cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS] + cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:] + cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \ + f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}" else: - cpu_range = "{}{}{}".format(cpu_list[0], sep, cpu_list[-1]) + cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}" return cpu_range @staticmethod - def cpu_slice_of_list_for_nf(node, cpu_node, nf_chains=1, nf_nodes=1, - nf_chain=1, nf_node=1, nf_dtc=1, nf_mtcr=2, - nf_dtcr=1, skip_cnt=0): + def cpu_slice_of_list_for_nf( + node, cpu_node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1, + nf_dtc=1, nf_mtcr=2, nf_dtcr=1, skip_cnt=0): """Return list of DUT node related list of CPU numbers. The main computing unit is physical core count. @@ -253,9 +253,9 @@ class CpuUtils(object): :param nf_nodes: Number of NF nodes in chain. :param nf_chain: Chain number indexed from 1. :param nf_node: Node number indexed from 1. - :param nf_dtc: Amount of physical cores for NF dataplane. + :param nf_dtc: Amount of physical cores for NF data plane. :param nf_mtcr: NF main thread per core ratio. - :param nf_dtcr: NF dataplane thread per core ratio. + :param nf_dtcr: NF data plane thread per core ratio. :param skip_cnt: Skip first "skip_cnt" CPUs. :type node: dict :param cpu_node: int. @@ -270,27 +270,21 @@ class CpuUtils(object): :returns: List of CPUs allocated to NF. :rtype: list :raises RuntimeError: If we require more cpus than available or if - placement is not possible due to wrong parameters. + placement is not possible due to wrong parameters. """ if not 1 <= nf_chain <= nf_chains: - raise RuntimeError("ChainID is out of range!") + raise RuntimeError(u"ChainID is out of range!") if not 1 <= nf_node <= nf_nodes: - raise RuntimeError("NodeID is out of range!") + raise RuntimeError(u"NodeID is out of range!") - smt_used = CpuUtils.is_smt_enabled(node['cpuinfo']) + smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"]) cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used) # CPU thread sibling offset. - sib = len(cpu_list) / CpuUtils.NR_OF_THREADS + sib = len(cpu_list) // CpuUtils.NR_OF_THREADS dtc_is_integer = isinstance(nf_dtc, int) if not smt_used and not dtc_is_integer: - raise RuntimeError("Cannot allocate if SMT is not enabled!") - # TODO: Please reword the following todo if it is still relevant - # TODO: Workaround as we are using physical core as main unit, we must - # adjust number of physical dataplane cores in case of float for further - # array referencing. As rounding method in Py2.7 and Py3.x differs, we - # are using static mapping. This can be rewritten using flat arrays and - # different logic (from Physical core unit to Logical core unit). + raise RuntimeError(u"Cannot allocate if SMT is not enabled!") if not dtc_is_integer: nf_dtc = 1 @@ -298,7 +292,7 @@ class CpuUtils(object): dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) // nf_dtcr if (skip_cnt + mt_req + dt_req) > (sib if smt_used else len(cpu_list)): - raise RuntimeError("Not enough CPU cores available for placement!") + raise RuntimeError(u"Not enough CPU cores available for placement!") offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes mt_skip = skip_cnt + (offset % mt_req) @@ -319,8 +313,39 @@ class CpuUtils(object): return result @staticmethod - def get_affinity_nf(nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, - nf_node=1, vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1): + def get_affinity_af_xdp( + node, pf_key, cpu_skip_cnt=0, cpu_cnt=1): + """Get affinity for AF_XDP interface. Result will be used to pin IRQs. + + :param node: Topology node. + :param pf_key: Topology interface. + :param cpu_skip_cnt: Amount of CPU cores to skip. + :param cpu_cnt: CPU threads count. + :type node: dict + :type pf_key: str + :type cpu_skip_cnt: int + :type cpu_cnt: int + :returns: List of CPUs allocated to AF_XDP interface. + :rtype: list + """ + if pf_key: + cpu_node = Topology.get_interface_numa_node(node, pf_key) + else: + cpu_node = 0 + + smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"]) + if smt_used: + cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS + + return CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt, + smt_used=smt_used + ) + + @staticmethod + def get_affinity_nf( + nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1, + vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1): """Get affinity of NF (network function). Result will be used to compute the amount of CPUs and also affinity. @@ -331,10 +356,10 @@ class CpuUtils(object): :param nf_nodes: Number of NF nodes in chain. :param nf_chain: Chain number indexed from 1. :param nf_node: Node number indexed from 1. - :param vs_dtc: Amount of physical cores for vswitch dataplane. - :param nf_dtc: Amount of physical cores for NF dataplane. + :param vs_dtc: Amount of physical cores for vswitch data plane. + :param nf_dtc: Amount of physical cores for NF data plane. :param nf_mtcr: NF main thread per core ratio. - :param nf_dtcr: NF dataplane thread per core ratio. + :param nf_dtcr: NF data plane thread per core ratio. :type nodes: dict :type node: dict :type nf_chains: int @@ -350,11 +375,9 @@ class CpuUtils(object): """ skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc - interface_list = [] - interface_list.append( - BuiltIn().get_variable_value('${{{node}_if1}}'.format(node=node))) - interface_list.append( - BuiltIn().get_variable_value('${{{node}_if2}}'.format(node=node))) + interface_list = list() + interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if1}}")) + interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if2}}")) cpu_node = Topology.get_interfaces_numa_node( nodes[node], *interface_list) @@ -362,5 +385,216 @@ class CpuUtils(object): return CpuUtils.cpu_slice_of_list_for_nf( node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains, nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node, - nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt) + nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt + ) + + @staticmethod + def get_affinity_trex( + node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0): + """Get affinity for T-Rex. Result will be used to pin T-Rex threads. + + :param node: TG node. + :param if_key: TG first interface. + :param tg_mtc: TG main thread count. + :param tg_dtc: TG dataplane thread count. + :param tg_ltc: TG latency thread count. + :param tg_dtc_offset: TG dataplane thread offset. + :type node: dict + :type if_key: str + :type tg_mtc: int + :type tg_dtc: int + :type tg_ltc: int + :type tg_dtc_offset: int + :returns: List of CPUs allocated to T-Rex including numa node. + :rtype: int, int, int, list + """ + interface_list = [if_key] + cpu_node = Topology.get_interfaces_numa_node(node, *interface_list) + + master_thread_id = CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc, + smt_used=False) + + threads = CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset, + cpu_cnt=tg_dtc, smt_used=False) + + latency_thread_id = CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False) + + return master_thread_id[0], latency_thread_id[0], cpu_node, threads + + @staticmethod + def get_affinity_iperf( + node, pf_key, cpu_skip_cnt=0, cpu_cnt=1): + """Get affinity for iPerf3. Result will be used to pin iPerf3 threads. + + :param node: Topology node. + :param pf_key: Topology interface. + :param cpu_skip_cnt: Amount of CPU cores to skip. + :param cpu_cnt: CPU threads count. + :type node: dict + :type pf_key: str + :type cpu_skip_cnt: int + :type cpu_cnt: int + :returns: List of CPUs allocated to iPerf3. + :rtype: str + """ + if pf_key: + cpu_node = Topology.get_interface_numa_node(node, pf_key) + else: + cpu_node = 0 + return CpuUtils.cpu_range_per_node_str( + node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt, + smt_used=False) + + @staticmethod + def get_affinity_vhost( + node, pf_key, skip_cnt=0, cpu_cnt=1): + """Get affinity for vhost. Result will be used to pin vhost threads. + + :param node: Topology node. + :param pf_key: Topology interface. + :param skip_cnt: Amount of CPU cores to skip. + :param cpu_cnt: CPU threads count. + :type node: dict + :type pf_key: str + :type skip_cnt: int + :type cpu_cnt: int + :returns: List of CPUs allocated to vhost process. + :rtype: str + """ + if pf_key: + cpu_node = Topology.get_interface_numa_node(node, pf_key) + else: + cpu_node = 0 + + smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"]) + if smt_used: + cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS + + return CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, + smt_used=False) + + @staticmethod + def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","): + """Get idle CPU List. + + :param node: Node dictionary with cpuinfo. + :param cpu_node: Numa node number. + :param smt_used: True - we want to use SMT, otherwise false. + :param cpu_alloc_str: vpp used cores. + :param sep: Separator, default: ",". + :type node: dict + :type cpu_node: int + :type smt_used: bool + :type cpu_alloc_str: str + :type smt_used: bool + :type sep: str + :rtype: list + """ + cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used) + cpu_idle_list = [i for i in cpu_list + if str(i) not in cpu_alloc_str.split(sep)] + return cpu_idle_list + + @staticmethod + def get_affinity_vswitch( + nodes, phy_cores, rx_queues=None, rxd=None, txd=None): + """Get affinity for vswitch on all DUTs. + + :param nodes: Topology nodes. + :param phy_cores: Number of physical cores to allocate. + :param rx_queues: Number of RX queues. (Optional, Default: None) + :param rxd: Number of RX descriptors. (Optional, Default: None) + :param txd: Number of TX descriptors. (Optional, Default: None) + :type nodes: dict + :type phy_cores: int + :type rx_queues: int + :type rxd: int + :type txd: int + :returns: Compute resource information dictionary. + :rtype: dict + """ + compute_resource_info = dict() + for node_name, node in nodes.items(): + if node["type"] != NodeType.DUT: + continue + # Number of Data Plane physical cores. + dp_cores_count = BuiltIn().get_variable_value( + "${dp_cores_count}", phy_cores + ) + # Number of Feature Plane physical cores. + fp_cores_count = BuiltIn().get_variable_value( + "${fp_cores_count}", phy_cores - dp_cores_count + ) + # Ratio between RX queues and data plane threads. + rxq_ratio = BuiltIn().get_variable_value( + "${rxq_ratio}", 1 + ) + + dut_pf_keys = BuiltIn().get_variable_value( + f"${{{node_name}_pf_keys}}" + ) + # SMT override in case of non standard test cases. + smt_used = BuiltIn().get_variable_value( + "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"]) + ) + + cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys) + skip_cnt = Constants.CPU_CNT_SYSTEM + cpu_main = CpuUtils.cpu_list_per_node_str( + node, cpu_node, + skip_cnt=skip_cnt, + cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0, + smt_used=False + ) + cpu_main = cpu_main if phy_cores else choice(cpu_main.split(",")) + skip_cnt += Constants.CPU_CNT_MAIN + cpu_dp = CpuUtils.cpu_list_per_node_str( + node, cpu_node, + skip_cnt=skip_cnt, + cpu_cnt=int(dp_cores_count), + smt_used=smt_used + ) if int(dp_cores_count) else "" + skip_cnt = skip_cnt + int(dp_cores_count) + cpu_fp = CpuUtils.cpu_list_per_node_str( + node, cpu_node, + skip_cnt=skip_cnt, + cpu_cnt=int(fp_cores_count), + smt_used=smt_used + ) if int(fp_cores_count) else "" + + fp_count_int = \ + int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \ + else int(fp_cores_count) + dp_count_int = \ + int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \ + else int(dp_cores_count) + + rxq_count_int = \ + int(rx_queues) if rx_queues \ + else int(dp_count_int/rxq_ratio) + rxq_count_int = 1 if not rxq_count_int else rxq_count_int + + compute_resource_info["buffers_numa"] = \ + 215040 if smt_used else 107520 + compute_resource_info["smt_used"] = smt_used + compute_resource_info[f"{node_name}_cpu_main"] = cpu_main + compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp + compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp + compute_resource_info[f"{node_name}_cpu_wt"] = \ + ",".join(filter(None, [cpu_dp, cpu_fp])) + compute_resource_info[f"{node_name}_cpu_alloc_str"] = \ + ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp])) + compute_resource_info["cpu_count_int"] = \ + int(dp_cores_count) + int(fp_cores_count) + compute_resource_info["rxd_count_int"] = rxd + compute_resource_info["txd_count_int"] = txd + compute_resource_info["rxq_count_int"] = rxq_count_int + compute_resource_info["fp_count_int"] = fp_count_int + compute_resource_info["dp_count_int"] = dp_count_int + + return compute_resource_info