X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FCpuUtils.py;h=293d6b691351f726c2d396000de907f216e9cf5d;hb=82863d5b8422b1b817d86bd6b1829a06a49feb02;hp=842c16d7ef06658e6e4d242c48b0b0efc72f4399;hpb=d68951ac245150eeefa6e0f4156e4c1b5c9e9325;p=csit.git diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py index 842c16d7ef..293d6b6913 100644 --- a/resources/libraries/python/CpuUtils.py +++ b/resources/libraries/python/CpuUtils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 Cisco and/or its affiliates. +# Copyright (c) 2021 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -84,6 +84,29 @@ class CpuUtils: [CpuUtils.__str2int(x) for x in line.split(u",")] ) + @staticmethod + def worker_count_from_cores_and_smt(phy_cores, smt_used): + """Simple conversion utility, needs smt from caller. + + The implementation assumes we pack 1 or 2 workers per core, + depending on hyperthreading. + + Some keywords use None to indicate no core/worker limit, + so this converts None to None. + + :param phy_cores: How many physical cores to use for workers. + :param smt_used: Whether symmetric multithreading is used. + :type phy_cores: Optional[int] + :type smt_used: bool + :returns: How many VPP workers fit into the given number of cores. + :rtype: Optional[int] + """ + if phy_cores is None: + return None + workers_per_core = CpuUtils.NR_OF_THREADS if smt_used else 1 + workers = phy_cores * workers_per_core + return workers + @staticmethod def cpu_node_count(node): """Return count of numa nodes. @@ -141,7 +164,7 @@ class CpuUtils: @staticmethod def cpu_slice_of_list_per_node( node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False): - """Return string of node related list of CPU numbers. + """Return node related subset of list of CPU numbers. :param node: Node dictionary with cpuinfo. :param cpu_node: Numa node number. @@ -232,7 +255,7 @@ class CpuUtils: cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS] cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:] cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \ - f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}" + f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}" else: cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}" @@ -283,12 +306,6 @@ class CpuUtils: dtc_is_integer = isinstance(nf_dtc, int) if not smt_used and not dtc_is_integer: raise RuntimeError(u"Cannot allocate if SMT is not enabled!") - # TODO: Please reword the following todo if it is still relevant - # TODO: Workaround as we are using physical core as main unit, we must - # adjust number of physical dataplane cores in case of float for further - # array referencing. As rounding method in Py2.7 and Py3.x differs, we - # are using static mapping. This can be rewritten using flat arrays and - # different logic (from Physical core unit to Logical core unit). if not dtc_is_integer: nf_dtc = 1 @@ -361,3 +378,116 @@ class CpuUtils: nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node, nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt ) + + @staticmethod + def get_affinity_trex( + node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1): + """Get affinity for T-Rex. Result will be used to pin T-Rex threads. + + :param node: TG node. + :param if1_pci: TG first interface. + :param if2_pci: TG second interface. + :param tg_mtc: TG main thread count. + :param tg_dtc: TG dataplane thread count. + :param tg_ltc: TG latency thread count. + :type node: dict + :type if1_pci: str + :type if2_pci: str + :type tg_mtc: int + :type tg_dtc: int + :type tg_ltc: int + :returns: List of CPUs allocated to T-Rex including numa node. + :rtype: int, int, int, list + """ + interface_list = [if1_pci, if2_pci] + cpu_node = Topology.get_interfaces_numa_node(node, *interface_list) + + master_thread_id = CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc, + smt_used=False) + + threads = CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc, + smt_used=False) + + latency_thread_id = CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc, + smt_used=False) + + return master_thread_id[0], latency_thread_id[0], cpu_node, threads + + @staticmethod + def get_affinity_iperf( + node, pf_key, cpu_skip_cnt=0, cpu_cnt=1): + """Get affinity for iPerf3. Result will be used to pin iPerf3 threads. + + :param node: Topology node. + :param pf_key: Topology interface. + :param cpu_skip_cnt: Amount of CPU cores to skip. + :param cpu_cnt: CPU threads count. + :type node: dict + :type pf_key: str + :type cpu_skip_cnt: int + :type cpu_cnt: int + :returns: List of CPUs allocated to iPerf3. + :rtype: str + """ + if pf_key: + cpu_node = Topology.get_interface_numa_node(node, pf_key) + else: + cpu_node = 0 + + return CpuUtils.cpu_range_per_node_str( + node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt, + smt_used=False) + + @staticmethod + def get_affinity_vhost( + node, pf_key, skip_cnt=0, cpu_cnt=1): + """Get affinity for vhost. Result will be used to pin vhost threads. + + :param node: Topology node. + :param pf_key: Topology interface. + :param skip_cnt: Amount of CPU cores to skip. + :param cpu_cnt: CPU threads count. + :type node: dict + :type pf_key: str + :type skip_cnt: int + :type cpu_cnt: int + :returns: List of CPUs allocated to vhost process. + :rtype: str + """ + if pf_key: + cpu_node = Topology.get_interface_numa_node(node, pf_key) + else: + cpu_node = 0 + + smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"]) + if smt_used: + cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS + + return CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, + smt_used=False) + + @staticmethod + def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","): + """ + Get idle CPU List + :param node: Node dictionary with cpuinfo. + :param cpu_node: Numa node number. + :param smt_used: True - we want to use SMT, otherwise false. + :param cpu_alloc_str: vpp used cores. + :param sep: Separator, default: ",". + :type node: dict + :type cpu_node: int + :type smt_used: bool + :type cpu_alloc_str: str + :type smt_used: bool + :type sep: str + :rtype: list + """ + cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used) + cpu_idle_list = [i for i in cpu_list + if str(i) not in cpu_alloc_str.split(sep)] + return cpu_idle_list