-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""CPU utilities library."""
-from resources.libraries.python.ssh import SSH
+from robot.libraries.BuiltIn import BuiltIn
-__all__ = ["CpuUtils"]
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.topology import Topology
+__all__ = [u"CpuUtils"]
-class CpuUtils(object):
+
+class CpuUtils:
"""CPU utilities"""
# Number of threads per core.
:rtype: bool
"""
cpu_mems = [item[-4:] for item in cpu_info]
- cpu_mems_len = len(cpu_mems) / CpuUtils.NR_OF_THREADS
+ cpu_mems_len = len(cpu_mems) // CpuUtils.NR_OF_THREADS
count = 0
for cpu_mem in cpu_mems[:cpu_mems_len]:
if cpu_mem in cpu_mems[cpu_mems_len:]:
return bool(count == cpu_mems_len)
@staticmethod
- def get_cpu_layout_from_all_nodes(nodes):
- """Retrieve cpu layout from all nodes, assuming all nodes
- are Linux nodes.
+ def get_cpu_info_from_all_nodes(nodes):
+ """Assuming all nodes are Linux nodes, retrieve the following
+ cpu information from all nodes:
+ - cpu architecture
+ - cpu layout
:param nodes: DICT__nodes from Topology.DICT__nodes.
:type nodes: dict
- :raises RuntimeError: If the ssh command "lscpu -p" fails.
+ :raises RuntimeError: If an ssh command retrieving cpu information
+ fails.
"""
- ssh = SSH()
for node in nodes.values():
- ssh.connect(node)
- cmd = "lscpu -p"
- ret, stdout, stderr = ssh.exec_command(cmd)
-# parsing of "lscpu -p" output:
-# # CPU,Core,Socket,Node,,L1d,L1i,L2,L3
-# 0,0,0,0,,0,0,0,0
-# 1,1,0,0,,1,1,1,0
- if ret != 0:
- raise RuntimeError(
- "Failed to execute ssh command, ret: {} err: {}".format(
- ret, stderr))
- node['cpuinfo'] = list()
- for line in stdout.split("\n"):
- if line and line[0] != "#":
- node['cpuinfo'].append([CpuUtils.__str2int(x) for x in
- line.split(",")])
+ stdout, _ = exec_cmd_no_error(node, u"uname -m")
+ node[u"arch"] = stdout.strip()
+ stdout, _ = exec_cmd_no_error(node, u"lscpu -p")
+ node[u"cpuinfo"] = list()
+ for line in stdout.split(u"\n"):
+ if line and line[0] != u"#":
+ node[u"cpuinfo"].append(
+ [CpuUtils.__str2int(x) for x in line.split(u",")]
+ )
@staticmethod
def cpu_node_count(node):
:rtype: int
:raises RuntimeError: If node cpuinfo is not available.
"""
- cpu_info = node.get("cpuinfo")
+ cpu_info = node.get(u"cpuinfo")
if cpu_info is not None:
- return node["cpuinfo"][-1][3] + 1
- else:
- raise RuntimeError("Node cpuinfo not available.")
+ return node[u"cpuinfo"][-1][3] + 1
+
+ raise RuntimeError(u"Node cpuinfo not available.")
@staticmethod
def cpu_list_per_node(node, cpu_node, smt_used=False):
or if SMT is not enabled.
"""
cpu_node = int(cpu_node)
- cpu_info = node.get("cpuinfo")
+ cpu_info = node.get(u"cpuinfo")
if cpu_info is None:
- raise RuntimeError("Node cpuinfo not available.")
+ raise RuntimeError(u"Node cpuinfo not available.")
smt_enabled = CpuUtils.is_smt_enabled(cpu_info)
if not smt_enabled and smt_used:
- raise RuntimeError("SMT is not enabled.")
+ raise RuntimeError(u"SMT is not enabled.")
cpu_list = []
for cpu in cpu_info:
if smt_enabled and not smt_used:
cpu_list_len = len(cpu_list)
- cpu_list = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS]
+ cpu_list = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
return cpu_list
@staticmethod
- def cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=0, cpu_cnt=0,
- smt_used=False):
- """Return string of node related list of CPU numbers.
+ def cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False):
+ """Return node related subset of list of CPU numbers.
:param node: Node dictionary with cpuinfo.
:param cpu_node: Numa node number.
cpu_list_len = len(cpu_list)
if cpu_cnt + skip_cnt > cpu_list_len:
- raise RuntimeError("cpu_cnt + skip_cnt > length(cpu list).")
+ raise RuntimeError(u"cpu_cnt + skip_cnt > length(cpu list).")
if cpu_cnt == 0:
cpu_cnt = cpu_list_len - skip_cnt
if smt_used:
- cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS]
- cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:]
- cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]]
- cpu_list_ex = [cpu for cpu in
- cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]]
+ cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
+ cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
+ cpu_list = cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]
+ cpu_list_ex = cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]
cpu_list.extend(cpu_list_ex)
else:
- cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]]
+ cpu_list = cpu_list[skip_cnt:skip_cnt + cpu_cnt]
return cpu_list
@staticmethod
- def cpu_list_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",",
- smt_used=False):
+ def cpu_list_per_node_str(
+ node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u",", smt_used=False):
"""Return string of node related list of CPU numbers.
:param node: Node dictionary with cpuinfo.
:returns: Cpu numbers related to numa from argument.
:rtype: str
"""
- cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=cpu_cnt,
- smt_used=smt_used)
+ cpu_list = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=smt_used
+ )
return sep.join(str(cpu) for cpu in cpu_list)
@staticmethod
- def cpu_range_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep="-",
- smt_used=False):
+ def cpu_range_per_node_str(
+ node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u"-", smt_used=False):
"""Return string of node related range of CPU numbers, e.g. 0-4.
:param node: Node dictionary with cpuinfo.
:returns: String of node related range of CPU numbers.
:rtype: str
"""
- cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=cpu_cnt,
- smt_used=smt_used)
+ cpu_list = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=smt_used
+ )
if smt_used:
cpu_list_len = len(cpu_list)
- cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS]
- cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:]
- cpu_range = "{}{}{},{}{}{}".format(cpu_list_0[0], sep,
- cpu_list_0[-1],
- cpu_list_1[0], sep,
- cpu_list_1[-1])
+ cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
+ cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
+ cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
+ f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
else:
- cpu_range = "{}{}{}".format(cpu_list[0], sep, cpu_list[-1])
+ cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
return cpu_range
@staticmethod
- def cpu_slice_of_list_for_nf(**kwargs):
- """Return list of node related list of CPU numbers.
+ def cpu_slice_of_list_for_nf(
+ node, cpu_node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
+ nf_dtc=1, nf_mtcr=2, nf_dtcr=1, skip_cnt=0):
+ """Return list of DUT node related list of CPU numbers. The main
+ computing unit is physical core count.
- :param kwargs: Key-value pairs used to compute placement.
- :type kwargs: dict
- :returns: Cpu numbers related to numa from argument.
+ :param node: DUT node.
+ :param cpu_node: Numa node number.
+ :param nf_chains: Number of NF chains.
+ :param nf_nodes: Number of NF nodes in chain.
+ :param nf_chain: Chain number indexed from 1.
+ :param nf_node: Node number indexed from 1.
+ :param nf_dtc: Amount of physical cores for NF data plane.
+ :param nf_mtcr: NF main thread per core ratio.
+ :param nf_dtcr: NF data plane thread per core ratio.
+ :param skip_cnt: Skip first "skip_cnt" CPUs.
+ :type node: dict
+ :param cpu_node: int.
+ :type nf_chains: int
+ :type nf_nodes: int
+ :type nf_chain: int
+ :type nf_node: int
+ :type nf_dtc: int or float
+ :type nf_mtcr: int
+ :type nf_dtcr: int
+ :type skip_cnt: int
+ :returns: List of CPUs allocated to NF.
:rtype: list
:raises RuntimeError: If we require more cpus than available or if
- placement is not possible due to wrong parameters.
+ placement is not possible due to wrong parameters.
"""
- if kwargs['chain_id'] - 1 >= kwargs['chains']:
- raise RuntimeError("ChainID is higher than total number of chains!")
- if kwargs['node_id'] - 1 >= kwargs['nodeness']:
- raise RuntimeError("NodeID is higher than chain nodeness!")
-
- smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
- cpu_list = CpuUtils.cpu_list_per_node(kwargs['node'],
- kwargs['cpu_node'], smt_used)
- cpu_list_len = len(cpu_list)
+ if not 1 <= nf_chain <= nf_chains:
+ raise RuntimeError(u"ChainID is out of range!")
+ if not 1 <= nf_node <= nf_nodes:
+ raise RuntimeError(u"NodeID is out of range!")
- mt_req = ((kwargs['chains'] * kwargs['nodeness']) + kwargs['mtcr'] - 1)\
- / kwargs['mtcr']
- dt_req = ((kwargs['chains'] * kwargs['nodeness']) + kwargs['dtcr'] - 1)\
- / kwargs['dtcr']
+ smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+ cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
+ # CPU thread sibling offset.
+ sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
- if kwargs['skip_cnt'] + mt_req + dt_req > cpu_list_len:
- raise RuntimeError("Not enough CPU cores available for placement!")
+ dtc_is_integer = isinstance(nf_dtc, int)
+ if not smt_used and not dtc_is_integer:
+ raise RuntimeError(u"Cannot allocate if SMT is not enabled!")
+ if not dtc_is_integer:
+ nf_dtc = 1
- offset = (kwargs['node_id'] - 1) + (kwargs['chain_id'] - 1)\
- * kwargs['nodeness']
- dtc = kwargs['dtc']
- try:
- mt_odd = (offset / mt_req) & 1
- mt_skip = kwargs['skip_cnt'] + (offset % mt_req)
- dt_skip = kwargs['skip_cnt'] + mt_req + (offset % dt_req) * dtc
- except ZeroDivisionError:
- raise RuntimeError("Invalid placement combination!")
+ mt_req = ((nf_chains * nf_nodes) + nf_mtcr - 1) // nf_mtcr
+ dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) // nf_dtcr
+
+ if (skip_cnt + mt_req + dt_req) > (sib if smt_used else len(cpu_list)):
+ raise RuntimeError(u"Not enough CPU cores available for placement!")
+
+ offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes
+ mt_skip = skip_cnt + (offset % mt_req)
+ dt_skip = skip_cnt + mt_req + (offset % dt_req) * nf_dtc
+
+ result = cpu_list[dt_skip:dt_skip + nf_dtc]
+ if smt_used:
+ if (offset // mt_req) & 1: # check oddness
+ mt_skip += sib
+
+ dt_skip += sib
+ if dtc_is_integer:
+ result.extend(cpu_list[dt_skip:dt_skip + nf_dtc])
+ elif (offset // dt_req) & 1: # check oddness
+ result = cpu_list[dt_skip:dt_skip + nf_dtc]
+
+ result[0:0] = cpu_list[mt_skip:mt_skip + 1]
+ return result
+
+ @staticmethod
+ def get_affinity_af_xdp(
+ node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+ """Get affinity for AF_XDP interface. Result will be used to pin IRQs.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param cpu_skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type cpu_skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to AF_XDP interface.
+ :rtype: list
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
+ else:
+ cpu_node = 0
+ smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
if smt_used:
- cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS]
- cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:]
+ cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+ return CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=smt_used
+ )
+
+ @staticmethod
+ def get_affinity_nf(
+ nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
+ vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
+
+ """Get affinity of NF (network function). Result will be used to compute
+ the amount of CPUs and also affinity.
+
+ :param nodes: Physical topology nodes.
+ :param node: SUT node.
+ :param nf_chains: Number of NF chains.
+ :param nf_nodes: Number of NF nodes in chain.
+ :param nf_chain: Chain number indexed from 1.
+ :param nf_node: Node number indexed from 1.
+ :param vs_dtc: Amount of physical cores for vswitch data plane.
+ :param nf_dtc: Amount of physical cores for NF data plane.
+ :param nf_mtcr: NF main thread per core ratio.
+ :param nf_dtcr: NF data plane thread per core ratio.
+ :type nodes: dict
+ :type node: dict
+ :type nf_chains: int
+ :type nf_nodes: int
+ :type nf_chain: int
+ :type nf_node: int
+ :type vs_dtc: int
+ :type nf_dtc: int or float
+ :type nf_mtcr: int
+ :type nf_dtcr: int
+ :returns: List of CPUs allocated to NF.
+ :rtype: list
+ """
+ skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc
+
+ interface_list = list()
+ interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if1}}"))
+ interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if2}}"))
+
+ cpu_node = Topology.get_interfaces_numa_node(
+ nodes[node], *interface_list)
+
+ return CpuUtils.cpu_slice_of_list_for_nf(
+ node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains,
+ nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
+ nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt
+ )
+
+ @staticmethod
+ def get_affinity_trex(
+ node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1):
+ """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
+
+ :param node: TG node.
+ :param if1_pci: TG first interface.
+ :param if2_pci: TG second interface.
+ :param tg_mtc: TG main thread count.
+ :param tg_dtc: TG dataplane thread count.
+ :param tg_ltc: TG latency thread count.
+ :type node: dict
+ :type if1_pci: str
+ :type if2_pci: str
+ :type tg_mtc: int
+ :type tg_dtc: int
+ :type tg_ltc: int
+ :returns: List of CPUs allocated to T-Rex including numa node.
+ :rtype: int, int, int, list
+ """
+ interface_list = [if1_pci, if2_pci]
+ cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
+
+ master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc,
+ smt_used=False)
+
+ threads = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
+ smt_used=False)
- mt_cpu_list = [cpu for cpu in cpu_list_1[mt_skip:mt_skip + 1]] \
- if mt_odd else [cpu for cpu in cpu_list_0[mt_skip:mt_skip + 1]]
+ latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
+ smt_used=False)
- dt_cpu_list = [cpu for cpu in cpu_list_0[dt_skip:dt_skip + dtc]]
- dt_cpu_list += [cpu for cpu in cpu_list_1[dt_skip:dt_skip + dtc]]
+ return master_thread_id[0], latency_thread_id[0], cpu_node, threads
+
+ @staticmethod
+ def get_affinity_iperf(
+ node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+ """Get affinity for iPerf3. Result will be used to pin iPerf3 threads.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param cpu_skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type cpu_skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to iPerf3.
+ :rtype: str
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
else:
- mt_cpu_list = [cpu for cpu in cpu_list[mt_skip:mt_skip + 1]]
- dt_cpu_list = [cpu for cpu in cpu_list[dt_skip:dt_skip + dtc]]
+ cpu_node = 0
+
+ return CpuUtils.cpu_range_per_node_str(
+ node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=False)
+
+ @staticmethod
+ def get_affinity_vhost(
+ node, pf_key, skip_cnt=0, cpu_cnt=1):
+ """Get affinity for vhost. Result will be used to pin vhost threads.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to vhost process.
+ :rtype: str
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
+ else:
+ cpu_node = 0
+
+ smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+ if smt_used:
+ cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+ return CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=False)
- return mt_cpu_list + dt_cpu_list
+ @staticmethod
+ def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","):
+ """Get idle CPU List.
+
+ :param node: Node dictionary with cpuinfo.
+ :param cpu_node: Numa node number.
+ :param smt_used: True - we want to use SMT, otherwise false.
+ :param cpu_alloc_str: vpp used cores.
+ :param sep: Separator, default: ",".
+ :type node: dict
+ :type cpu_node: int
+ :type smt_used: bool
+ :type cpu_alloc_str: str
+ :type smt_used: bool
+ :type sep: str
+ :rtype: list
+ """
+ cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
+ cpu_idle_list = [i for i in cpu_list
+ if str(i) not in cpu_alloc_str.split(sep)]
+ return cpu_idle_list
+
+ @staticmethod
+ def get_affinity_vswitch(
+ nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None):
+ """Get affinity for vswitch.
+
+ :param nodes: Topology nodes.
+ :param node: Topology node string.
+ :param phy_cores: Number of physical cores to allocate.
+ :param rx_queues: Number of RX queues. (Optional, Default: None)
+ :param rxd: Number of RX descriptors. (Optional, Default: None)
+ :param txd: Number of TX descriptors. (Optional, Default: None)
+ :type nodes: dict
+ :type node: str
+ :type phy_cores: int
+ :type rx_queues: int
+ :type rxd: int
+ :type txd: int
+ :returns: Compute resource information dictionary.
+ :rtype: dict
+ """
+ # Number of Data Plane physical cores.
+ dp_cores_count = BuiltIn().get_variable_value(
+ f"${{dp_cores_count}}", phy_cores
+ )
+ # Number of Feature Plane physical cores.
+ fp_cores_count = BuiltIn().get_variable_value(
+ f"${{fp_cores_count}}", phy_cores - dp_cores_count
+ )
+ # Ratio between RX queues and data plane threads.
+ rxq_ratio = BuiltIn().get_variable_value(
+ f"${{rxq_ratio}}", 1
+ )
+
+ dut_pf_keys = BuiltIn().get_variable_value(
+ f"${{{node}_pf_keys}}"
+ )
+ # SMT override in case of non standard test cases.
+ smt_used = BuiltIn().get_variable_value(
+ f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"])
+ )
+
+ cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys)
+ skip_cnt = Constants.CPU_CNT_SYSTEM
+ cpu_main = CpuUtils.cpu_list_per_node_str(
+ nodes[node], cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=Constants.CPU_CNT_MAIN,
+ smt_used=False
+ )
+ skip_cnt += Constants.CPU_CNT_MAIN
+ cpu_dp = CpuUtils.cpu_list_per_node_str(
+ nodes[node], cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(dp_cores_count),
+ smt_used=smt_used
+ ) if int(dp_cores_count) else u""
+ skip_cnt = skip_cnt + int(dp_cores_count)
+ cpu_fp = CpuUtils.cpu_list_per_node_str(
+ nodes[node], cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(fp_cores_count),
+ smt_used=smt_used
+ ) if int(fp_cores_count) else u""
+
+ fp_count_int = \
+ int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(fp_cores_count)
+ dp_count_int = \
+ int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(dp_cores_count)
+
+ rxq_count_int = rx_queues if rx_queues else int(dp_count_int/rxq_ratio)
+ rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+ compute_resource_info = dict()
+ compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520
+ compute_resource_info[u"smt_used"] = smt_used
+ compute_resource_info[u"cpu_main"] = cpu_main
+ compute_resource_info[u"cpu_dp"] = cpu_dp
+ compute_resource_info[u"cpu_fp"] = cpu_fp
+ compute_resource_info[u"cpu_wt"] = \
+ u",".join(filter(None, [cpu_dp, cpu_fp]))
+ compute_resource_info[u"cpu_alloc_str"] = \
+ u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+ compute_resource_info[u"cpu_count_int"] = \
+ int(dp_cores_count) + int(fp_cores_count)
+ compute_resource_info[u"rxd_count_int"] = rxd
+ compute_resource_info[u"txd_count_int"] = txd
+ compute_resource_info[u"rxq_count_int"] = rxq_count_int
+ compute_resource_info[u"fp_count_int"] = fp_count_int
+ compute_resource_info[u"dp_count_int"] = dp_count_int
+
+ return compute_resource_info