1 # Copyright (c) 2024 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """CPU utilities library."""
16 from random import choice
18 from robot.libraries.BuiltIn import BuiltIn
20 from resources.libraries.python.Constants import Constants
21 from resources.libraries.python.ssh import exec_cmd_no_error
22 from resources.libraries.python.topology import Topology, NodeType
24 __all__ = [u"CpuUtils"]
30 # Number of threads per core.
34 def __str2int(string):
35 """Conversion from string to integer, 0 in case of empty string.
37 :param string: Input string.
39 :returns: Integer converted from string, 0 in case of ValueError.
48 def is_smt_enabled(cpu_info):
49 """Uses CPU mapping to find out if SMT is enabled or not. If SMT is
50 enabled, the L1d,L1i,L2,L3 setting is the same for two processors. These
51 two processors are two threads of one core.
53 :param cpu_info: CPU info, the output of "lscpu -p".
55 :returns: True if SMT is enabled, False if SMT is disabled.
58 cpu_mems = [item[-4:] for item in cpu_info]
59 cpu_mems_len = len(cpu_mems) // CpuUtils.NR_OF_THREADS
61 for cpu_mem in cpu_mems[:cpu_mems_len]:
62 if cpu_mem in cpu_mems[cpu_mems_len:]:
64 return bool(count == cpu_mems_len)
67 def get_cpu_info_from_all_nodes(nodes):
68 """Assuming all nodes are Linux nodes, retrieve the following
69 cpu information from all nodes:
73 :param nodes: DICT__nodes from Topology.DICT__nodes.
75 :raises RuntimeError: If an ssh command retrieving cpu information
78 for node in nodes.values():
79 stdout, _ = exec_cmd_no_error(node, u"uname -m")
80 node[u"arch"] = stdout.strip()
81 stdout, _ = exec_cmd_no_error(node, u"lscpu -p")
82 node[u"cpuinfo"] = list()
83 for line in stdout.split(u"\n"):
84 if line and line[0] != u"#":
85 node[u"cpuinfo"].append(
86 [CpuUtils.__str2int(x) for x in line.split(u",")]
90 def cpu_node_count(node):
91 """Return count of numa nodes.
93 :param node: Targeted node.
95 :returns: Count of numa nodes.
97 :raises RuntimeError: If node cpuinfo is not available.
99 cpu_info = node.get(u"cpuinfo")
100 if cpu_info is not None:
101 return node[u"cpuinfo"][-1][3] + 1
103 raise RuntimeError(u"Node cpuinfo not available.")
106 def cpu_list_per_node(node, cpu_node, smt_used=False):
107 """Return node related list of CPU numbers.
109 :param node: Node dictionary with cpuinfo.
110 :param cpu_node: Numa node number.
111 :param smt_used: True - we want to use SMT, otherwise false.
115 :returns: List of cpu numbers related to numa from argument.
117 :raises RuntimeError: If node cpuinfo is not available
118 or if SMT is not enabled.
120 cpu_node = int(cpu_node)
121 cpu_info = node.get(u"cpuinfo")
123 raise RuntimeError(u"Node cpuinfo not available.")
125 smt_enabled = CpuUtils.is_smt_enabled(cpu_info)
126 if not smt_enabled and smt_used:
127 raise RuntimeError(u"SMT is not enabled.")
131 if cpu[3] == cpu_node:
132 cpu_list.append(cpu[0])
134 if not smt_enabled or smt_enabled and smt_used:
137 if smt_enabled and not smt_used:
138 cpu_list_len = len(cpu_list)
139 cpu_list = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
144 def cpu_slice_of_list_per_node(
145 node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False):
146 """Return node related subset of list of CPU numbers.
148 :param node: Node dictionary with cpuinfo.
149 :param cpu_node: Numa node number.
150 :param skip_cnt: Skip first "skip_cnt" CPUs.
151 :param cpu_cnt: Count of cpus to return, if 0 then return all.
152 :param smt_used: True - we want to use SMT, otherwise false.
158 :returns: Cpu numbers related to numa from argument.
160 :raises RuntimeError: If we require more cpus than available.
162 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
164 cpu_list_len = len(cpu_list)
165 if cpu_cnt + skip_cnt > cpu_list_len:
166 raise RuntimeError(u"cpu_cnt + skip_cnt > length(cpu list).")
169 cpu_cnt = cpu_list_len - skip_cnt
172 cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
173 cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
174 cpu_list = cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]
175 cpu_list_ex = cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]
176 cpu_list.extend(cpu_list_ex)
178 cpu_list = cpu_list[skip_cnt:skip_cnt + cpu_cnt]
183 def cpu_list_per_node_str(
184 node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u",", smt_used=False):
185 """Return string of node related list of CPU numbers.
187 :param node: Node dictionary with cpuinfo.
188 :param cpu_node: Numa node number.
189 :param skip_cnt: Skip first "skip_cnt" CPUs.
190 :param cpu_cnt: Count of cpus to return, if 0 then return all.
191 :param sep: Separator, default: 1,2,3,4,....
192 :param smt_used: True - we want to use SMT, otherwise false.
199 :returns: Cpu numbers related to numa from argument.
202 cpu_list = CpuUtils.cpu_slice_of_list_per_node(
203 node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
206 return sep.join(str(cpu) for cpu in cpu_list)
209 def cpu_range_per_node_str(
210 node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u"-", smt_used=False):
211 """Return string of node related range of CPU numbers, e.g. 0-4.
213 :param node: Node dictionary with cpuinfo.
214 :param cpu_node: Numa node number.
215 :param skip_cnt: Skip first "skip_cnt" CPUs.
216 :param cpu_cnt: Count of cpus to return, if 0 then return all.
217 :param sep: Separator, default: "-".
218 :param smt_used: True - we want to use SMT, otherwise false.
225 :returns: String of node related range of CPU numbers.
228 cpu_list = CpuUtils.cpu_slice_of_list_per_node(
229 node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
233 cpu_list_len = len(cpu_list)
234 cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
235 cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
236 cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
237 f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
239 cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
244 def cpu_slice_of_list_for_nf(
245 node, cpu_node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
246 nf_dtc=1, nf_mtcr=2, nf_dtcr=1, skip_cnt=0):
247 """Return list of DUT node related list of CPU numbers. The main
248 computing unit is physical core count.
250 On SMT enabled DUTs, both sibling logical cores are used,
251 unless Robot variable \${smt_used} is set to False.
253 :param node: DUT node.
254 :param cpu_node: Numa node number.
255 :param nf_chains: Number of NF chains.
256 :param nf_nodes: Number of NF nodes in chain.
257 :param nf_chain: Chain number indexed from 1.
258 :param nf_node: Node number indexed from 1.
259 :param nf_dtc: Amount of physical cores for NF data plane.
260 :param nf_mtcr: NF main thread per core ratio.
261 :param nf_dtcr: NF data plane thread per core ratio.
262 :param skip_cnt: Skip first "skip_cnt" CPUs.
264 :param cpu_node: int.
269 :type nf_dtc: int or float
273 :returns: List of CPUs allocated to NF.
275 :raises RuntimeError: If we require more cpus than available or if
276 placement is not possible due to wrong parameters.
278 if not 1 <= nf_chain <= nf_chains:
279 raise RuntimeError(u"ChainID is out of range!")
280 if not 1 <= nf_node <= nf_nodes:
281 raise RuntimeError(u"NodeID is out of range!")
283 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
284 smt_used = BuiltIn().get_variable_value("\${smt_used}", smt_used)
285 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
286 # CPU thread sibling offset.
287 sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
289 dtc_is_integer = isinstance(nf_dtc, int)
290 if not smt_used and not dtc_is_integer:
291 raise RuntimeError(u"Cannot allocate if SMT is not enabled!")
292 if not dtc_is_integer:
295 mt_req = ((nf_chains * nf_nodes) + nf_mtcr - 1) // nf_mtcr
296 dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) // nf_dtcr
298 if (skip_cnt + mt_req + dt_req) > (sib if smt_used else len(cpu_list)):
299 raise RuntimeError(u"Not enough CPU cores available for placement!")
301 offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes
302 mt_skip = skip_cnt + (offset % mt_req)
303 dt_skip = skip_cnt + mt_req + (offset % dt_req) * nf_dtc
305 result = cpu_list[dt_skip:dt_skip + nf_dtc]
307 if (offset // mt_req) & 1: # check oddness
312 result.extend(cpu_list[dt_skip:dt_skip + nf_dtc])
313 elif (offset // dt_req) & 1: # check oddness
314 result = cpu_list[dt_skip:dt_skip + nf_dtc]
316 result[0:0] = cpu_list[mt_skip:mt_skip + 1]
320 def get_affinity_af_xdp(
321 node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
322 """Get affinity for AF_XDP interface. Result will be used to pin IRQs.
324 :param node: Topology node.
325 :param pf_key: Topology interface.
326 :param cpu_skip_cnt: Amount of CPU cores to skip.
327 :param cpu_cnt: CPU threads count.
330 :type cpu_skip_cnt: int
332 :returns: List of CPUs allocated to AF_XDP interface.
336 cpu_node = Topology.get_interface_numa_node(node, pf_key)
340 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
342 cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
344 return CpuUtils.cpu_slice_of_list_per_node(
345 node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
351 nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
352 vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
354 """Get affinity of NF (network function). Result will be used to compute
355 the amount of CPUs and also affinity.
357 :param nodes: Physical topology nodes.
358 :param node: SUT node.
359 :param nf_chains: Number of NF chains.
360 :param nf_nodes: Number of NF nodes in chain.
361 :param nf_chain: Chain number indexed from 1.
362 :param nf_node: Node number indexed from 1.
363 :param vs_dtc: Amount of physical cores for vswitch data plane.
364 :param nf_dtc: Amount of physical cores for NF data plane.
365 :param nf_mtcr: NF main thread per core ratio.
366 :param nf_dtcr: NF data plane thread per core ratio.
374 :type nf_dtc: int or float
377 :returns: List of CPUs allocated to NF.
380 skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc
382 interface_list = list()
383 interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if1}}"))
384 interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if2}}"))
386 cpu_node = Topology.get_interfaces_numa_node(
387 nodes[node], *interface_list)
389 return CpuUtils.cpu_slice_of_list_for_nf(
390 node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains,
391 nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
392 nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt
396 def get_affinity_trex(
397 node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0):
398 """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
400 :param node: TG node.
401 :param if_key: TG first interface.
402 :param tg_mtc: TG main thread count.
403 :param tg_dtc: TG dataplane thread count.
404 :param tg_ltc: TG latency thread count.
405 :param tg_dtc_offset: TG dataplane thread offset.
411 :type tg_dtc_offset: int
412 :returns: List of CPUs allocated to T-Rex including numa node.
413 :rtype: int, int, int, list
415 interface_list = [if_key]
416 cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
418 master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
419 node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc,
422 threads = CpuUtils.cpu_slice_of_list_per_node(
423 node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset,
424 cpu_cnt=tg_dtc, smt_used=False)
426 latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
427 node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False)
429 return master_thread_id[0], latency_thread_id[0], cpu_node, threads
432 def get_affinity_iperf(
433 node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
434 """Get affinity for iPerf3. Result will be used to pin iPerf3 threads.
436 :param node: Topology node.
437 :param pf_key: Topology interface.
438 :param cpu_skip_cnt: Amount of CPU cores to skip.
439 :param cpu_cnt: CPU threads count.
442 :type cpu_skip_cnt: int
444 :returns: List of CPUs allocated to iPerf3.
448 cpu_node = Topology.get_interface_numa_node(node, pf_key)
452 return CpuUtils.cpu_range_per_node_str(
453 node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
457 def get_affinity_vhost(
458 node, pf_key, skip_cnt=0, cpu_cnt=1):
459 """Get affinity for vhost. Result will be used to pin vhost threads.
461 :param node: Topology node.
462 :param pf_key: Topology interface.
463 :param skip_cnt: Amount of CPU cores to skip.
464 :param cpu_cnt: CPU threads count.
469 :returns: List of CPUs allocated to vhost process.
473 cpu_node = Topology.get_interface_numa_node(node, pf_key)
477 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
479 cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
481 return CpuUtils.cpu_slice_of_list_per_node(
482 node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
486 def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","):
487 """Get idle CPU List.
489 :param node: Node dictionary with cpuinfo.
490 :param cpu_node: Numa node number.
491 :param smt_used: True - we want to use SMT, otherwise false.
492 :param cpu_alloc_str: vpp used cores.
493 :param sep: Separator, default: ",".
497 :type cpu_alloc_str: str
502 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
503 cpu_idle_list = [i for i in cpu_list
504 if str(i) not in cpu_alloc_str.split(sep)]
508 def get_affinity_vswitch(
509 nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
510 """Get affinity for vswitch on all DUTs.
512 :param nodes: Topology nodes.
513 :param phy_cores: Number of physical cores to allocate.
514 :param rx_queues: Number of RX queues. (Optional, Default: None)
515 :param rxd: Number of RX descriptors. (Optional, Default: None)
516 :param txd: Number of TX descriptors. (Optional, Default: None)
522 :returns: Compute resource information dictionary.
525 compute_resource_info = dict()
526 for node_name, node in nodes.items():
527 if node["type"] != NodeType.DUT:
529 # Number of Data Plane physical cores.
530 dp_cores_count = BuiltIn().get_variable_value(
531 "${dp_cores_count}", phy_cores
533 # Number of Feature Plane physical cores.
534 fp_cores_count = BuiltIn().get_variable_value(
535 "${fp_cores_count}", phy_cores - dp_cores_count
537 # Ratio between RX queues and data plane threads.
538 rxq_ratio = BuiltIn().get_variable_value(
542 dut_pf_keys = BuiltIn().get_variable_value(
543 f"${{{node_name}_pf_keys}}"
545 # SMT override in case of non standard test cases.
546 smt_used = BuiltIn().get_variable_value(
547 "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"])
550 cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
551 skip_cnt = Constants.CPU_CNT_SYSTEM
552 cpu_main = CpuUtils.cpu_list_per_node_str(
555 cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0,
558 cpu_main = cpu_main if phy_cores else choice(cpu_main.split(","))
559 skip_cnt += Constants.CPU_CNT_MAIN
560 cpu_dp = CpuUtils.cpu_list_per_node_str(
563 cpu_cnt=int(dp_cores_count),
565 ) if int(dp_cores_count) else ""
566 skip_cnt = skip_cnt + int(dp_cores_count)
567 cpu_fp = CpuUtils.cpu_list_per_node_str(
570 cpu_cnt=int(fp_cores_count),
572 ) if int(fp_cores_count) else ""
575 int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
576 else int(fp_cores_count)
578 int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
579 else int(dp_cores_count)
582 int(rx_queues) if rx_queues \
583 else int(dp_count_int/rxq_ratio)
584 rxq_count_int = 1 if not rxq_count_int else rxq_count_int
586 compute_resource_info["buffers_numa"] = \
587 215040 if smt_used else 107520
588 compute_resource_info["smt_used"] = smt_used
589 compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
590 compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
591 compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
592 compute_resource_info[f"{node_name}_cpu_wt"] = \
593 ",".join(filter(None, [cpu_dp, cpu_fp]))
594 compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
595 ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
596 compute_resource_info["cpu_count_int"] = \
597 int(dp_cores_count) + int(fp_cores_count)
598 compute_resource_info["rxd_count_int"] = rxd
599 compute_resource_info["txd_count_int"] = txd
600 compute_resource_info["rxq_count_int"] = rxq_count_int
601 compute_resource_info["fp_count_int"] = fp_count_int
602 compute_resource_info["dp_count_int"] = dp_count_int
604 return compute_resource_info