1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """CPU utilities library."""
16 from robot.libraries.BuiltIn import BuiltIn
18 from resources.libraries.python.Constants import Constants
19 from resources.libraries.python.ssh import exec_cmd_no_error
20 from resources.libraries.python.topology import Topology
22 __all__ = [u"CpuUtils"]
28 # Number of threads per core.
32 def __str2int(string):
33 """Conversion from string to integer, 0 in case of empty string.
35 :param string: Input string.
37 :returns: Integer converted from string, 0 in case of ValueError.
46 def is_smt_enabled(cpu_info):
47 """Uses CPU mapping to find out if SMT is enabled or not. If SMT is
48 enabled, the L1d,L1i,L2,L3 setting is the same for two processors. These
49 two processors are two threads of one core.
51 :param cpu_info: CPU info, the output of "lscpu -p".
53 :returns: True if SMT is enabled, False if SMT is disabled.
56 cpu_mems = [item[-4:] for item in cpu_info]
57 cpu_mems_len = len(cpu_mems) // CpuUtils.NR_OF_THREADS
59 for cpu_mem in cpu_mems[:cpu_mems_len]:
60 if cpu_mem in cpu_mems[cpu_mems_len:]:
62 return bool(count == cpu_mems_len)
65 def get_cpu_info_from_all_nodes(nodes):
66 """Assuming all nodes are Linux nodes, retrieve the following
67 cpu information from all nodes:
71 :param nodes: DICT__nodes from Topology.DICT__nodes.
73 :raises RuntimeError: If an ssh command retrieving cpu information
76 for node in nodes.values():
77 stdout, _ = exec_cmd_no_error(node, u"uname -m")
78 node[u"arch"] = stdout.strip()
79 stdout, _ = exec_cmd_no_error(node, u"lscpu -p")
80 node[u"cpuinfo"] = list()
81 for line in stdout.split(u"\n"):
82 if line and line[0] != u"#":
83 node[u"cpuinfo"].append(
84 [CpuUtils.__str2int(x) for x in line.split(u",")]
88 def worker_count_from_cores_and_smt(phy_cores, smt_used):
89 """Simple conversion utility, needs smt from caller.
91 The implementation assumes we pack 1 or 2 workers per core,
92 depending on hyperthreading.
94 Some keywords use None to indicate no core/worker limit,
95 so this converts None to None.
97 :param phy_cores: How many physical cores to use for workers.
98 :param smt_used: Whether symmetric multithreading is used.
99 :type phy_cores: Optional[int]
101 :returns: How many VPP workers fit into the given number of cores.
102 :rtype: Optional[int]
104 if phy_cores is None:
106 workers_per_core = CpuUtils.NR_OF_THREADS if smt_used else 1
107 workers = phy_cores * workers_per_core
111 def cpu_node_count(node):
112 """Return count of numa nodes.
114 :param node: Targeted node.
116 :returns: Count of numa nodes.
118 :raises RuntimeError: If node cpuinfo is not available.
120 cpu_info = node.get(u"cpuinfo")
121 if cpu_info is not None:
122 return node[u"cpuinfo"][-1][3] + 1
124 raise RuntimeError(u"Node cpuinfo not available.")
127 def cpu_list_per_node(node, cpu_node, smt_used=False):
128 """Return node related list of CPU numbers.
130 :param node: Node dictionary with cpuinfo.
131 :param cpu_node: Numa node number.
132 :param smt_used: True - we want to use SMT, otherwise false.
136 :returns: List of cpu numbers related to numa from argument.
138 :raises RuntimeError: If node cpuinfo is not available
139 or if SMT is not enabled.
141 cpu_node = int(cpu_node)
142 cpu_info = node.get(u"cpuinfo")
144 raise RuntimeError(u"Node cpuinfo not available.")
146 smt_enabled = CpuUtils.is_smt_enabled(cpu_info)
147 if not smt_enabled and smt_used:
148 raise RuntimeError(u"SMT is not enabled.")
152 if cpu[3] == cpu_node:
153 cpu_list.append(cpu[0])
155 if not smt_enabled or smt_enabled and smt_used:
158 if smt_enabled and not smt_used:
159 cpu_list_len = len(cpu_list)
160 cpu_list = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
165 def cpu_slice_of_list_per_node(
166 node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False):
167 """Return node related subset of list of CPU numbers.
169 :param node: Node dictionary with cpuinfo.
170 :param cpu_node: Numa node number.
171 :param skip_cnt: Skip first "skip_cnt" CPUs.
172 :param cpu_cnt: Count of cpus to return, if 0 then return all.
173 :param smt_used: True - we want to use SMT, otherwise false.
179 :returns: Cpu numbers related to numa from argument.
181 :raises RuntimeError: If we require more cpus than available.
183 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
185 cpu_list_len = len(cpu_list)
186 if cpu_cnt + skip_cnt > cpu_list_len:
187 raise RuntimeError(u"cpu_cnt + skip_cnt > length(cpu list).")
190 cpu_cnt = cpu_list_len - skip_cnt
193 cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
194 cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
195 cpu_list = cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]
196 cpu_list_ex = cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]
197 cpu_list.extend(cpu_list_ex)
199 cpu_list = cpu_list[skip_cnt:skip_cnt + cpu_cnt]
204 def cpu_list_per_node_str(
205 node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u",", smt_used=False):
206 """Return string of node related list of CPU numbers.
208 :param node: Node dictionary with cpuinfo.
209 :param cpu_node: Numa node number.
210 :param skip_cnt: Skip first "skip_cnt" CPUs.
211 :param cpu_cnt: Count of cpus to return, if 0 then return all.
212 :param sep: Separator, default: 1,2,3,4,....
213 :param smt_used: True - we want to use SMT, otherwise false.
220 :returns: Cpu numbers related to numa from argument.
223 cpu_list = CpuUtils.cpu_slice_of_list_per_node(
224 node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
227 return sep.join(str(cpu) for cpu in cpu_list)
230 def cpu_range_per_node_str(
231 node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u"-", smt_used=False):
232 """Return string of node related range of CPU numbers, e.g. 0-4.
234 :param node: Node dictionary with cpuinfo.
235 :param cpu_node: Numa node number.
236 :param skip_cnt: Skip first "skip_cnt" CPUs.
237 :param cpu_cnt: Count of cpus to return, if 0 then return all.
238 :param sep: Separator, default: "-".
239 :param smt_used: True - we want to use SMT, otherwise false.
246 :returns: String of node related range of CPU numbers.
249 cpu_list = CpuUtils.cpu_slice_of_list_per_node(
250 node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
254 cpu_list_len = len(cpu_list)
255 cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
256 cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
257 cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
258 f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
260 cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
265 def cpu_slice_of_list_for_nf(
266 node, cpu_node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
267 nf_dtc=1, nf_mtcr=2, nf_dtcr=1, skip_cnt=0):
268 """Return list of DUT node related list of CPU numbers. The main
269 computing unit is physical core count.
271 :param node: DUT node.
272 :param cpu_node: Numa node number.
273 :param nf_chains: Number of NF chains.
274 :param nf_nodes: Number of NF nodes in chain.
275 :param nf_chain: Chain number indexed from 1.
276 :param nf_node: Node number indexed from 1.
277 :param nf_dtc: Amount of physical cores for NF data plane.
278 :param nf_mtcr: NF main thread per core ratio.
279 :param nf_dtcr: NF data plane thread per core ratio.
280 :param skip_cnt: Skip first "skip_cnt" CPUs.
282 :param cpu_node: int.
287 :type nf_dtc: int or float
291 :returns: List of CPUs allocated to NF.
293 :raises RuntimeError: If we require more cpus than available or if
294 placement is not possible due to wrong parameters.
296 if not 1 <= nf_chain <= nf_chains:
297 raise RuntimeError(u"ChainID is out of range!")
298 if not 1 <= nf_node <= nf_nodes:
299 raise RuntimeError(u"NodeID is out of range!")
301 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
302 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
303 # CPU thread sibling offset.
304 sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
306 dtc_is_integer = isinstance(nf_dtc, int)
307 if not smt_used and not dtc_is_integer:
308 raise RuntimeError(u"Cannot allocate if SMT is not enabled!")
309 if not dtc_is_integer:
312 mt_req = ((nf_chains * nf_nodes) + nf_mtcr - 1) // nf_mtcr
313 dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) // nf_dtcr
315 if (skip_cnt + mt_req + dt_req) > (sib if smt_used else len(cpu_list)):
316 raise RuntimeError(u"Not enough CPU cores available for placement!")
318 offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes
319 mt_skip = skip_cnt + (offset % mt_req)
320 dt_skip = skip_cnt + mt_req + (offset % dt_req) * nf_dtc
322 result = cpu_list[dt_skip:dt_skip + nf_dtc]
324 if (offset // mt_req) & 1: # check oddness
329 result.extend(cpu_list[dt_skip:dt_skip + nf_dtc])
330 elif (offset // dt_req) & 1: # check oddness
331 result = cpu_list[dt_skip:dt_skip + nf_dtc]
333 result[0:0] = cpu_list[mt_skip:mt_skip + 1]
338 nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
339 vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
341 """Get affinity of NF (network function). Result will be used to compute
342 the amount of CPUs and also affinity.
344 :param nodes: Physical topology nodes.
345 :param node: SUT node.
346 :param nf_chains: Number of NF chains.
347 :param nf_nodes: Number of NF nodes in chain.
348 :param nf_chain: Chain number indexed from 1.
349 :param nf_node: Node number indexed from 1.
350 :param vs_dtc: Amount of physical cores for vswitch data plane.
351 :param nf_dtc: Amount of physical cores for NF data plane.
352 :param nf_mtcr: NF main thread per core ratio.
353 :param nf_dtcr: NF data plane thread per core ratio.
361 :type nf_dtc: int or float
364 :returns: List of CPUs allocated to NF.
367 skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc
369 interface_list = list()
370 interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if1}}"))
371 interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if2}}"))
373 cpu_node = Topology.get_interfaces_numa_node(
374 nodes[node], *interface_list)
376 return CpuUtils.cpu_slice_of_list_for_nf(
377 node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains,
378 nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
379 nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt
383 def get_affinity_trex(
384 node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1):
385 """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
387 :param node: TG node.
388 :param if1_pci: TG first interface.
389 :param if2_pci: TG second interface.
390 :param tg_mtc: TG main thread count.
391 :param tg_dtc: TG dataplane thread count.
392 :param tg_ltc: TG latency thread count.
399 :returns: List of CPUs allocated to T-Rex including numa node.
400 :rtype: int, int, int, list
402 interface_list = [if1_pci, if2_pci]
403 cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
405 master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
406 node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc,
409 threads = CpuUtils.cpu_slice_of_list_per_node(
410 node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
413 latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
414 node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
417 return master_thread_id[0], latency_thread_id[0], cpu_node, threads
420 def get_affinity_iperf(
421 node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
422 """Get affinity for iPerf3. Result will be used to pin iPerf3 threads.
424 :param node: Topology node.
425 :param pf_key: Topology interface.
426 :param cpu_skip_cnt: Amount of CPU cores to skip.
427 :param cpu_cnt: CPU threads count.
430 :type cpu_skip_cnt: int
432 :returns: List of CPUs allocated to iPerf3.
436 cpu_node = Topology.get_interface_numa_node(node, pf_key)
440 return CpuUtils.cpu_range_per_node_str(
441 node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
445 def get_affinity_vhost(
446 node, pf_key, skip_cnt=0, cpu_cnt=1):
447 """Get affinity for vhost. Result will be used to pin vhost threads.
449 :param node: Topology node.
450 :param pf_key: Topology interface.
451 :param skip_cnt: Amount of CPU cores to skip.
452 :param cpu_cnt: CPU threads count.
457 :returns: List of CPUs allocated to vhost process.
461 cpu_node = Topology.get_interface_numa_node(node, pf_key)
465 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
467 cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
469 return CpuUtils.cpu_slice_of_list_per_node(
470 node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,