1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """CPU utilities library."""
16 from random import choice
18 from robot.libraries.BuiltIn import BuiltIn
20 from resources.libraries.python.Constants import Constants
21 from resources.libraries.python.ssh import exec_cmd_no_error
22 from resources.libraries.python.topology import Topology, NodeType
24 __all__ = [u"CpuUtils"]
30 # Number of threads per core.
34 def __str2int(string):
35 """Conversion from string to integer, 0 in case of empty string.
37 :param string: Input string.
39 :returns: Integer converted from string, 0 in case of ValueError.
48 def is_smt_enabled(cpu_info):
49 """Uses CPU mapping to find out if SMT is enabled or not. If SMT is
50 enabled, the L1d,L1i,L2,L3 setting is the same for two processors. These
51 two processors are two threads of one core.
53 :param cpu_info: CPU info, the output of "lscpu -p".
55 :returns: True if SMT is enabled, False if SMT is disabled.
58 cpu_mems = [item[-4:] for item in cpu_info]
59 cpu_mems_len = len(cpu_mems) // CpuUtils.NR_OF_THREADS
61 for cpu_mem in cpu_mems[:cpu_mems_len]:
62 if cpu_mem in cpu_mems[cpu_mems_len:]:
64 return bool(count == cpu_mems_len)
67 def get_cpu_info_from_all_nodes(nodes):
68 """Assuming all nodes are Linux nodes, retrieve the following
69 cpu information from all nodes:
73 :param nodes: DICT__nodes from Topology.DICT__nodes.
75 :raises RuntimeError: If an ssh command retrieving cpu information
78 for node in nodes.values():
79 stdout, _ = exec_cmd_no_error(node, u"uname -m")
80 node[u"arch"] = stdout.strip()
81 stdout, _ = exec_cmd_no_error(node, u"lscpu -p")
82 node[u"cpuinfo"] = list()
83 for line in stdout.split(u"\n"):
84 if line and line[0] != u"#":
85 node[u"cpuinfo"].append(
86 [CpuUtils.__str2int(x) for x in line.split(u",")]
90 def cpu_node_count(node):
91 """Return count of numa nodes.
93 :param node: Targeted node.
95 :returns: Count of numa nodes.
97 :raises RuntimeError: If node cpuinfo is not available.
99 cpu_info = node.get(u"cpuinfo")
100 if cpu_info is not None:
101 return node[u"cpuinfo"][-1][3] + 1
103 raise RuntimeError(u"Node cpuinfo not available.")
106 def cpu_list_per_node(node, cpu_node, smt_used=False):
107 """Return node related list of CPU numbers.
109 :param node: Node dictionary with cpuinfo.
110 :param cpu_node: Numa node number.
111 :param smt_used: True - we want to use SMT, otherwise false.
115 :returns: List of cpu numbers related to numa from argument.
117 :raises RuntimeError: If node cpuinfo is not available
118 or if SMT is not enabled.
120 cpu_node = int(cpu_node)
121 cpu_info = node.get(u"cpuinfo")
123 raise RuntimeError(u"Node cpuinfo not available.")
125 smt_enabled = CpuUtils.is_smt_enabled(cpu_info)
126 if not smt_enabled and smt_used:
127 raise RuntimeError(u"SMT is not enabled.")
131 if cpu[3] == cpu_node:
132 cpu_list.append(cpu[0])
134 if not smt_enabled or smt_enabled and smt_used:
137 if smt_enabled and not smt_used:
138 cpu_list_len = len(cpu_list)
139 cpu_list = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
144 def cpu_slice_of_list_per_node(
145 node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False):
146 """Return node related subset of list of CPU numbers.
148 :param node: Node dictionary with cpuinfo.
149 :param cpu_node: Numa node number.
150 :param skip_cnt: Skip first "skip_cnt" CPUs.
151 :param cpu_cnt: Count of cpus to return, if 0 then return all.
152 :param smt_used: True - we want to use SMT, otherwise false.
158 :returns: Cpu numbers related to numa from argument.
160 :raises RuntimeError: If we require more cpus than available.
162 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
164 cpu_list_len = len(cpu_list)
165 if cpu_cnt + skip_cnt > cpu_list_len:
166 raise RuntimeError(u"cpu_cnt + skip_cnt > length(cpu list).")
169 cpu_cnt = cpu_list_len - skip_cnt
172 cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
173 cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
174 cpu_list = cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]
175 cpu_list_ex = cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]
176 cpu_list.extend(cpu_list_ex)
178 cpu_list = cpu_list[skip_cnt:skip_cnt + cpu_cnt]
183 def cpu_list_per_node_str(
184 node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u",", smt_used=False):
185 """Return string of node related list of CPU numbers.
187 :param node: Node dictionary with cpuinfo.
188 :param cpu_node: Numa node number.
189 :param skip_cnt: Skip first "skip_cnt" CPUs.
190 :param cpu_cnt: Count of cpus to return, if 0 then return all.
191 :param sep: Separator, default: 1,2,3,4,....
192 :param smt_used: True - we want to use SMT, otherwise false.
199 :returns: Cpu numbers related to numa from argument.
202 cpu_list = CpuUtils.cpu_slice_of_list_per_node(
203 node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
206 return sep.join(str(cpu) for cpu in cpu_list)
209 def cpu_range_per_node_str(
210 node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u"-", smt_used=False):
211 """Return string of node related range of CPU numbers, e.g. 0-4.
213 :param node: Node dictionary with cpuinfo.
214 :param cpu_node: Numa node number.
215 :param skip_cnt: Skip first "skip_cnt" CPUs.
216 :param cpu_cnt: Count of cpus to return, if 0 then return all.
217 :param sep: Separator, default: "-".
218 :param smt_used: True - we want to use SMT, otherwise false.
225 :returns: String of node related range of CPU numbers.
228 cpu_list = CpuUtils.cpu_slice_of_list_per_node(
229 node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
233 cpu_list_len = len(cpu_list)
234 cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
235 cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
236 cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
237 f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
239 cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
244 def cpu_slice_of_list_for_nf(
245 node, cpu_node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
246 nf_dtc=1, nf_mtcr=2, nf_dtcr=1, skip_cnt=0):
247 """Return list of DUT node related list of CPU numbers. The main
248 computing unit is physical core count.
250 :param node: DUT node.
251 :param cpu_node: Numa node number.
252 :param nf_chains: Number of NF chains.
253 :param nf_nodes: Number of NF nodes in chain.
254 :param nf_chain: Chain number indexed from 1.
255 :param nf_node: Node number indexed from 1.
256 :param nf_dtc: Amount of physical cores for NF data plane.
257 :param nf_mtcr: NF main thread per core ratio.
258 :param nf_dtcr: NF data plane thread per core ratio.
259 :param skip_cnt: Skip first "skip_cnt" CPUs.
261 :param cpu_node: int.
266 :type nf_dtc: int or float
270 :returns: List of CPUs allocated to NF.
272 :raises RuntimeError: If we require more cpus than available or if
273 placement is not possible due to wrong parameters.
275 if not 1 <= nf_chain <= nf_chains:
276 raise RuntimeError(u"ChainID is out of range!")
277 if not 1 <= nf_node <= nf_nodes:
278 raise RuntimeError(u"NodeID is out of range!")
280 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
281 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
282 # CPU thread sibling offset.
283 sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
285 dtc_is_integer = isinstance(nf_dtc, int)
286 if not smt_used and not dtc_is_integer:
287 raise RuntimeError(u"Cannot allocate if SMT is not enabled!")
288 if not dtc_is_integer:
291 mt_req = ((nf_chains * nf_nodes) + nf_mtcr - 1) // nf_mtcr
292 dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) // nf_dtcr
294 if (skip_cnt + mt_req + dt_req) > (sib if smt_used else len(cpu_list)):
295 raise RuntimeError(u"Not enough CPU cores available for placement!")
297 offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes
298 mt_skip = skip_cnt + (offset % mt_req)
299 dt_skip = skip_cnt + mt_req + (offset % dt_req) * nf_dtc
301 result = cpu_list[dt_skip:dt_skip + nf_dtc]
303 if (offset // mt_req) & 1: # check oddness
308 result.extend(cpu_list[dt_skip:dt_skip + nf_dtc])
309 elif (offset // dt_req) & 1: # check oddness
310 result = cpu_list[dt_skip:dt_skip + nf_dtc]
312 result[0:0] = cpu_list[mt_skip:mt_skip + 1]
316 def get_affinity_af_xdp(
317 node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
318 """Get affinity for AF_XDP interface. Result will be used to pin IRQs.
320 :param node: Topology node.
321 :param pf_key: Topology interface.
322 :param cpu_skip_cnt: Amount of CPU cores to skip.
323 :param cpu_cnt: CPU threads count.
326 :type cpu_skip_cnt: int
328 :returns: List of CPUs allocated to AF_XDP interface.
332 cpu_node = Topology.get_interface_numa_node(node, pf_key)
336 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
338 cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
340 return CpuUtils.cpu_slice_of_list_per_node(
341 node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
347 nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
348 vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
350 """Get affinity of NF (network function). Result will be used to compute
351 the amount of CPUs and also affinity.
353 :param nodes: Physical topology nodes.
354 :param node: SUT node.
355 :param nf_chains: Number of NF chains.
356 :param nf_nodes: Number of NF nodes in chain.
357 :param nf_chain: Chain number indexed from 1.
358 :param nf_node: Node number indexed from 1.
359 :param vs_dtc: Amount of physical cores for vswitch data plane.
360 :param nf_dtc: Amount of physical cores for NF data plane.
361 :param nf_mtcr: NF main thread per core ratio.
362 :param nf_dtcr: NF data plane thread per core ratio.
370 :type nf_dtc: int or float
373 :returns: List of CPUs allocated to NF.
376 skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc
378 interface_list = list()
379 interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if1}}"))
380 interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if2}}"))
382 cpu_node = Topology.get_interfaces_numa_node(
383 nodes[node], *interface_list)
385 return CpuUtils.cpu_slice_of_list_for_nf(
386 node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains,
387 nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
388 nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt
392 def get_affinity_trex(
393 node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0):
394 """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
396 :param node: TG node.
397 :param if_key: TG first interface.
398 :param tg_mtc: TG main thread count.
399 :param tg_dtc: TG dataplane thread count.
400 :param tg_ltc: TG latency thread count.
401 :param tg_dtc_offset: TG dataplane thread offset.
407 :type tg_dtc_offset: int
408 :returns: List of CPUs allocated to T-Rex including numa node.
409 :rtype: int, int, int, list
411 interface_list = [if_key]
412 cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
414 master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
415 node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc,
418 threads = CpuUtils.cpu_slice_of_list_per_node(
419 node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset,
420 cpu_cnt=tg_dtc, smt_used=False)
422 latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
423 node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False)
425 return master_thread_id[0], latency_thread_id[0], cpu_node, threads
428 def get_affinity_iperf(
429 node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
430 """Get affinity for iPerf3. Result will be used to pin iPerf3 threads.
432 :param node: Topology node.
433 :param pf_key: Topology interface.
434 :param cpu_skip_cnt: Amount of CPU cores to skip.
435 :param cpu_cnt: CPU threads count.
438 :type cpu_skip_cnt: int
440 :returns: List of CPUs allocated to iPerf3.
444 cpu_node = Topology.get_interface_numa_node(node, pf_key)
448 return CpuUtils.cpu_range_per_node_str(
449 node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
453 def get_affinity_vhost(
454 node, pf_key, skip_cnt=0, cpu_cnt=1):
455 """Get affinity for vhost. Result will be used to pin vhost threads.
457 :param node: Topology node.
458 :param pf_key: Topology interface.
459 :param skip_cnt: Amount of CPU cores to skip.
460 :param cpu_cnt: CPU threads count.
465 :returns: List of CPUs allocated to vhost process.
469 cpu_node = Topology.get_interface_numa_node(node, pf_key)
473 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
475 cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
477 return CpuUtils.cpu_slice_of_list_per_node(
478 node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
482 def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","):
483 """Get idle CPU List.
485 :param node: Node dictionary with cpuinfo.
486 :param cpu_node: Numa node number.
487 :param smt_used: True - we want to use SMT, otherwise false.
488 :param cpu_alloc_str: vpp used cores.
489 :param sep: Separator, default: ",".
493 :type cpu_alloc_str: str
498 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
499 cpu_idle_list = [i for i in cpu_list
500 if str(i) not in cpu_alloc_str.split(sep)]
504 def get_affinity_vswitch(
505 nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
506 """Get affinity for vswitch on all DUTs.
508 :param nodes: Topology nodes.
509 :param phy_cores: Number of physical cores to allocate.
510 :param rx_queues: Number of RX queues. (Optional, Default: None)
511 :param rxd: Number of RX descriptors. (Optional, Default: None)
512 :param txd: Number of TX descriptors. (Optional, Default: None)
518 :returns: Compute resource information dictionary.
521 compute_resource_info = dict()
522 for node_name, node in nodes.items():
523 if node["type"] != NodeType.DUT:
525 # Number of Data Plane physical cores.
526 dp_cores_count = BuiltIn().get_variable_value(
527 "${dp_cores_count}", phy_cores
529 # Number of Feature Plane physical cores.
530 fp_cores_count = BuiltIn().get_variable_value(
531 "${fp_cores_count}", phy_cores - dp_cores_count
533 # Ratio between RX queues and data plane threads.
534 rxq_ratio = BuiltIn().get_variable_value(
538 dut_pf_keys = BuiltIn().get_variable_value(
539 f"${{{node_name}_pf_keys}}"
541 # SMT override in case of non standard test cases.
542 smt_used = BuiltIn().get_variable_value(
543 "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"])
546 cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
547 skip_cnt = Constants.CPU_CNT_SYSTEM
548 cpu_main = CpuUtils.cpu_list_per_node_str(
551 cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0,
554 cpu_main = cpu_main if phy_cores else choice(cpu_main.split(","))
555 skip_cnt += Constants.CPU_CNT_MAIN
556 cpu_dp = CpuUtils.cpu_list_per_node_str(
559 cpu_cnt=int(dp_cores_count),
561 ) if int(dp_cores_count) else ""
562 skip_cnt = skip_cnt + int(dp_cores_count)
563 cpu_fp = CpuUtils.cpu_list_per_node_str(
566 cpu_cnt=int(fp_cores_count),
568 ) if int(fp_cores_count) else ""
571 int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
572 else int(fp_cores_count)
574 int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
575 else int(dp_cores_count)
578 int(rx_queues) if rx_queues \
579 else int(dp_count_int/rxq_ratio)
580 rxq_count_int = 1 if not rxq_count_int else rxq_count_int
582 compute_resource_info["buffers_numa"] = \
583 215040 if smt_used else 107520
584 compute_resource_info["smt_used"] = smt_used
585 compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
586 compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
587 compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
588 compute_resource_info[f"{node_name}_cpu_wt"] = \
589 ",".join(filter(None, [cpu_dp, cpu_fp]))
590 compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
591 ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
592 compute_resource_info["cpu_count_int"] = \
593 int(dp_cores_count) + int(fp_cores_count)
594 compute_resource_info["rxd_count_int"] = rxd
595 compute_resource_info["txd_count_int"] = txd
596 compute_resource_info["rxq_count_int"] = rxq_count_int
597 compute_resource_info["fp_count_int"] = fp_count_int
598 compute_resource_info["dp_count_int"] = dp_count_int
600 return compute_resource_info