-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
[CpuUtils.__str2int(x) for x in line.split(u",")]
)
+ @staticmethod
+ def worker_count_from_cores_and_smt(phy_cores, smt_used):
+ """Simple conversion utility, needs smt from caller.
+
+ The implementation assumes we pack 1 or 2 workers per core,
+ depending on hyperthreading.
+
+ Some keywords use None to indicate no core/worker limit,
+ so this converts None to None.
+
+ :param phy_cores: How many physical cores to use for workers.
+ :param smt_used: Whether symmetric multithreading is used.
+ :type phy_cores: Optional[int]
+ :type smt_used: bool
+ :returns: How many VPP workers fit into the given number of cores.
+ :rtype: Optional[int]
+ """
+ if phy_cores is None:
+ return None
+ workers_per_core = CpuUtils.NR_OF_THREADS if smt_used else 1
+ workers = phy_cores * workers_per_core
+ return workers
+
@staticmethod
def cpu_node_count(node):
"""Return count of numa nodes.
@staticmethod
def cpu_slice_of_list_per_node(
node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False):
- """Return string of node related list of CPU numbers.
+ """Return node related subset of list of CPU numbers.
:param node: Node dictionary with cpuinfo.
:param cpu_node: Numa node number.
dtc_is_integer = isinstance(nf_dtc, int)
if not smt_used and not dtc_is_integer:
raise RuntimeError(u"Cannot allocate if SMT is not enabled!")
- # TODO: Please reword the following todo if it is still relevant
- # TODO: Workaround as we are using physical core as main unit, we must
- # adjust number of physical dataplane cores in case of float for further
- # array referencing. As rounding method in Py2.7 and Py3.x differs, we
- # are using static mapping. This can be rewritten using flat arrays and
- # different logic (from Physical core unit to Logical core unit).
if not dtc_is_integer:
nf_dtc = 1
nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt
)
+
+ @staticmethod
+ def get_affinity_trex(
+ node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1):
+ """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
+
+ :param node: TG node.
+ :param if1_pci: TG first interface.
+ :param if2_pci: TG second interface.
+ :param tg_mtc: TG main thread count.
+ :param tg_dtc: TG dataplane thread count.
+ :param tg_ltc: TG latency thread count.
+ :type node: dict
+ :type if1_pci: str
+ :type if2_pci: str
+ :type tg_mtc: int
+ :type tg_dtc: int
+ :type tg_ltc: int
+ :returns: List of CPUs allocated to T-Rex including numa node.
+ :rtype: int, int, int, list
+ """
+ interface_list = [if1_pci, if2_pci]
+ cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
+
+ master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc,
+ smt_used=False)
+
+ threads = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
+ smt_used=False)
+
+ latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
+ smt_used=False)
+
+ return master_thread_id[0], latency_thread_id[0], cpu_node, threads
+
+ @staticmethod
+ def get_affinity_iperf(
+ node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+ """Get affinity for iPerf3. Result will be used to pin iPerf3 threads.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param cpu_skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type cpu_skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to iPerf3.
+ :rtype: str
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
+ else:
+ cpu_node = 0
+
+ return CpuUtils.cpu_range_per_node_str(
+ node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=False)
+
+ @staticmethod
+ def get_affinity_vhost(
+ node, pf_key, skip_cnt=0, cpu_cnt=1):
+ """Get affinity for vhost. Result will be used to pin vhost threads.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to vhost process.
+ :rtype: str
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
+ else:
+ cpu_node = 0
+
+ smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+ if smt_used:
+ cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+ return CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=False)