+
+ @staticmethod
+ def cpu_slice_of_list_for_nf(
+ node, cpu_node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
+ nf_dtc=1, nf_mtcr=2, nf_dtcr=1, skip_cnt=0):
+ """Return list of DUT node related list of CPU numbers. The main
+ computing unit is physical core count.
+
+ :param node: DUT node.
+ :param cpu_node: Numa node number.
+ :param nf_chains: Number of NF chains.
+ :param nf_nodes: Number of NF nodes in chain.
+ :param nf_chain: Chain number indexed from 1.
+ :param nf_node: Node number indexed from 1.
+ :param nf_dtc: Amount of physical cores for NF data plane.
+ :param nf_mtcr: NF main thread per core ratio.
+ :param nf_dtcr: NF data plane thread per core ratio.
+ :param skip_cnt: Skip first "skip_cnt" CPUs.
+ :type node: dict
+ :param cpu_node: int.
+ :type nf_chains: int
+ :type nf_nodes: int
+ :type nf_chain: int
+ :type nf_node: int
+ :type nf_dtc: int or float
+ :type nf_mtcr: int
+ :type nf_dtcr: int
+ :type skip_cnt: int
+ :returns: List of CPUs allocated to NF.
+ :rtype: list
+ :raises RuntimeError: If we require more cpus than available or if
+ placement is not possible due to wrong parameters.
+ """
+ if not 1 <= nf_chain <= nf_chains:
+ raise RuntimeError(u"ChainID is out of range!")
+ if not 1 <= nf_node <= nf_nodes:
+ raise RuntimeError(u"NodeID is out of range!")
+
+ smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+ cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
+ # CPU thread sibling offset.
+ sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
+
+ dtc_is_integer = isinstance(nf_dtc, int)
+ if not smt_used and not dtc_is_integer:
+ raise RuntimeError(u"Cannot allocate if SMT is not enabled!")
+ if not dtc_is_integer:
+ nf_dtc = 1
+
+ mt_req = ((nf_chains * nf_nodes) + nf_mtcr - 1) // nf_mtcr
+ dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) // nf_dtcr
+
+ if (skip_cnt + mt_req + dt_req) > (sib if smt_used else len(cpu_list)):
+ raise RuntimeError(u"Not enough CPU cores available for placement!")
+
+ offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes
+ mt_skip = skip_cnt + (offset % mt_req)
+ dt_skip = skip_cnt + mt_req + (offset % dt_req) * nf_dtc
+
+ result = cpu_list[dt_skip:dt_skip + nf_dtc]
+ if smt_used:
+ if (offset // mt_req) & 1: # check oddness
+ mt_skip += sib
+
+ dt_skip += sib
+ if dtc_is_integer:
+ result.extend(cpu_list[dt_skip:dt_skip + nf_dtc])
+ elif (offset // dt_req) & 1: # check oddness
+ result = cpu_list[dt_skip:dt_skip + nf_dtc]
+
+ result[0:0] = cpu_list[mt_skip:mt_skip + 1]
+ return result
+
+ @staticmethod
+ def get_affinity_nf(
+ nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
+ vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
+
+ """Get affinity of NF (network function). Result will be used to compute
+ the amount of CPUs and also affinity.
+
+ :param nodes: Physical topology nodes.
+ :param node: SUT node.
+ :param nf_chains: Number of NF chains.
+ :param nf_nodes: Number of NF nodes in chain.
+ :param nf_chain: Chain number indexed from 1.
+ :param nf_node: Node number indexed from 1.
+ :param vs_dtc: Amount of physical cores for vswitch data plane.
+ :param nf_dtc: Amount of physical cores for NF data plane.
+ :param nf_mtcr: NF main thread per core ratio.
+ :param nf_dtcr: NF data plane thread per core ratio.
+ :type nodes: dict
+ :type node: dict
+ :type nf_chains: int
+ :type nf_nodes: int
+ :type nf_chain: int
+ :type nf_node: int
+ :type vs_dtc: int
+ :type nf_dtc: int or float
+ :type nf_mtcr: int
+ :type nf_dtcr: int
+ :returns: List of CPUs allocated to NF.
+ :rtype: list
+ """
+ skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc
+
+ interface_list = list()
+ interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if1}}"))
+ interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if2}}"))
+
+ cpu_node = Topology.get_interfaces_numa_node(
+ nodes[node], *interface_list)
+
+ return CpuUtils.cpu_slice_of_list_for_nf(
+ node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains,
+ nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
+ nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt
+ )
+
+ @staticmethod
+ def get_affinity_trex(
+ node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1):
+ """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
+
+ :param node: TG node.
+ :param if1_pci: TG first interface.
+ :param if2_pci: TG second interface.
+ :param tg_mtc: TG main thread count.
+ :param tg_dtc: TG dataplane thread count.
+ :param tg_ltc: TG latency thread count.
+ :type node: dict
+ :type if1_pci: str
+ :type if2_pci: str
+ :type tg_mtc: int
+ :type tg_dtc: int
+ :type tg_ltc: int
+ :returns: List of CPUs allocated to T-Rex including numa node.
+ :rtype: int, int, int, list
+ """
+ interface_list = [if1_pci, if2_pci]
+ cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
+
+ master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc,
+ smt_used=False)
+
+ threads = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
+ smt_used=False)
+
+ latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
+ smt_used=False)
+
+ return master_thread_id[0], latency_thread_id[0], cpu_node, threads