1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """CPU utilities library."""
16 from robot.libraries.BuiltIn import BuiltIn
18 from resources.libraries.python.Constants import Constants
19 from resources.libraries.python.ssh import exec_cmd_no_error
20 from resources.libraries.python.topology import Topology
22 __all__ = [u"CpuUtils"]
28 # Number of threads per core.
32 def __str2int(string):
33 """Conversion from string to integer, 0 in case of empty string.
35 :param string: Input string.
37 :returns: Integer converted from string, 0 in case of ValueError.
46 def is_smt_enabled(cpu_info):
47 """Uses CPU mapping to find out if SMT is enabled or not. If SMT is
48 enabled, the L1d,L1i,L2,L3 setting is the same for two processors. These
49 two processors are two threads of one core.
51 :param cpu_info: CPU info, the output of "lscpu -p".
53 :returns: True if SMT is enabled, False if SMT is disabled.
56 cpu_mems = [item[-4:] for item in cpu_info]
57 cpu_mems_len = len(cpu_mems) // CpuUtils.NR_OF_THREADS
59 for cpu_mem in cpu_mems[:cpu_mems_len]:
60 if cpu_mem in cpu_mems[cpu_mems_len:]:
62 return bool(count == cpu_mems_len)
65 def get_cpu_info_from_all_nodes(nodes):
66 """Assuming all nodes are Linux nodes, retrieve the following
67 cpu information from all nodes:
71 :param nodes: DICT__nodes from Topology.DICT__nodes.
73 :raises RuntimeError: If an ssh command retrieving cpu information
76 for node in nodes.values():
77 stdout, _ = exec_cmd_no_error(node, u"uname -m")
78 node[u"arch"] = stdout.strip()
79 stdout, _ = exec_cmd_no_error(node, u"lscpu -p")
80 node[u"cpuinfo"] = list()
81 for line in stdout.split(u"\n"):
82 if line and line[0] != u"#":
83 node[u"cpuinfo"].append(
84 [CpuUtils.__str2int(x) for x in line.split(u",")]
88 def cpu_node_count(node):
89 """Return count of numa nodes.
91 :param node: Targeted node.
93 :returns: Count of numa nodes.
95 :raises RuntimeError: If node cpuinfo is not available.
97 cpu_info = node.get(u"cpuinfo")
98 if cpu_info is not None:
99 return node[u"cpuinfo"][-1][3] + 1
101 raise RuntimeError(u"Node cpuinfo not available.")
104 def cpu_list_per_node(node, cpu_node, smt_used=False):
105 """Return node related list of CPU numbers.
107 :param node: Node dictionary with cpuinfo.
108 :param cpu_node: Numa node number.
109 :param smt_used: True - we want to use SMT, otherwise false.
113 :returns: List of cpu numbers related to numa from argument.
115 :raises RuntimeError: If node cpuinfo is not available
116 or if SMT is not enabled.
118 cpu_node = int(cpu_node)
119 cpu_info = node.get(u"cpuinfo")
121 raise RuntimeError(u"Node cpuinfo not available.")
123 smt_enabled = CpuUtils.is_smt_enabled(cpu_info)
124 if not smt_enabled and smt_used:
125 raise RuntimeError(u"SMT is not enabled.")
129 if cpu[3] == cpu_node:
130 cpu_list.append(cpu[0])
132 if not smt_enabled or smt_enabled and smt_used:
135 if smt_enabled and not smt_used:
136 cpu_list_len = len(cpu_list)
137 cpu_list = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
142 def cpu_slice_of_list_per_node(
143 node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False):
144 """Return node related subset of list of CPU numbers.
146 :param node: Node dictionary with cpuinfo.
147 :param cpu_node: Numa node number.
148 :param skip_cnt: Skip first "skip_cnt" CPUs.
149 :param cpu_cnt: Count of cpus to return, if 0 then return all.
150 :param smt_used: True - we want to use SMT, otherwise false.
156 :returns: Cpu numbers related to numa from argument.
158 :raises RuntimeError: If we require more cpus than available.
160 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
162 cpu_list_len = len(cpu_list)
163 if cpu_cnt + skip_cnt > cpu_list_len:
164 raise RuntimeError(u"cpu_cnt + skip_cnt > length(cpu list).")
167 cpu_cnt = cpu_list_len - skip_cnt
170 cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
171 cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
172 cpu_list = cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]
173 cpu_list_ex = cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]
174 cpu_list.extend(cpu_list_ex)
176 cpu_list = cpu_list[skip_cnt:skip_cnt + cpu_cnt]
181 def cpu_list_per_node_str(
182 node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u",", smt_used=False):
183 """Return string of node related list of CPU numbers.
185 :param node: Node dictionary with cpuinfo.
186 :param cpu_node: Numa node number.
187 :param skip_cnt: Skip first "skip_cnt" CPUs.
188 :param cpu_cnt: Count of cpus to return, if 0 then return all.
189 :param sep: Separator, default: 1,2,3,4,....
190 :param smt_used: True - we want to use SMT, otherwise false.
197 :returns: Cpu numbers related to numa from argument.
200 cpu_list = CpuUtils.cpu_slice_of_list_per_node(
201 node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
204 return sep.join(str(cpu) for cpu in cpu_list)
207 def cpu_range_per_node_str(
208 node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u"-", smt_used=False):
209 """Return string of node related range of CPU numbers, e.g. 0-4.
211 :param node: Node dictionary with cpuinfo.
212 :param cpu_node: Numa node number.
213 :param skip_cnt: Skip first "skip_cnt" CPUs.
214 :param cpu_cnt: Count of cpus to return, if 0 then return all.
215 :param sep: Separator, default: "-".
216 :param smt_used: True - we want to use SMT, otherwise false.
223 :returns: String of node related range of CPU numbers.
226 cpu_list = CpuUtils.cpu_slice_of_list_per_node(
227 node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
231 cpu_list_len = len(cpu_list)
232 cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
233 cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
234 cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
235 f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
237 cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
242 def cpu_slice_of_list_for_nf(
243 node, cpu_node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
244 nf_dtc=1, nf_mtcr=2, nf_dtcr=1, skip_cnt=0):
245 """Return list of DUT node related list of CPU numbers. The main
246 computing unit is physical core count.
248 :param node: DUT node.
249 :param cpu_node: Numa node number.
250 :param nf_chains: Number of NF chains.
251 :param nf_nodes: Number of NF nodes in chain.
252 :param nf_chain: Chain number indexed from 1.
253 :param nf_node: Node number indexed from 1.
254 :param nf_dtc: Amount of physical cores for NF data plane.
255 :param nf_mtcr: NF main thread per core ratio.
256 :param nf_dtcr: NF data plane thread per core ratio.
257 :param skip_cnt: Skip first "skip_cnt" CPUs.
259 :param cpu_node: int.
264 :type nf_dtc: int or float
268 :returns: List of CPUs allocated to NF.
270 :raises RuntimeError: If we require more cpus than available or if
271 placement is not possible due to wrong parameters.
273 if not 1 <= nf_chain <= nf_chains:
274 raise RuntimeError(u"ChainID is out of range!")
275 if not 1 <= nf_node <= nf_nodes:
276 raise RuntimeError(u"NodeID is out of range!")
278 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
279 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
280 # CPU thread sibling offset.
281 sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
283 dtc_is_integer = isinstance(nf_dtc, int)
284 if not smt_used and not dtc_is_integer:
285 raise RuntimeError(u"Cannot allocate if SMT is not enabled!")
286 if not dtc_is_integer:
289 mt_req = ((nf_chains * nf_nodes) + nf_mtcr - 1) // nf_mtcr
290 dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) // nf_dtcr
292 if (skip_cnt + mt_req + dt_req) > (sib if smt_used else len(cpu_list)):
293 raise RuntimeError(u"Not enough CPU cores available for placement!")
295 offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes
296 mt_skip = skip_cnt + (offset % mt_req)
297 dt_skip = skip_cnt + mt_req + (offset % dt_req) * nf_dtc
299 result = cpu_list[dt_skip:dt_skip + nf_dtc]
301 if (offset // mt_req) & 1: # check oddness
306 result.extend(cpu_list[dt_skip:dt_skip + nf_dtc])
307 elif (offset // dt_req) & 1: # check oddness
308 result = cpu_list[dt_skip:dt_skip + nf_dtc]
310 result[0:0] = cpu_list[mt_skip:mt_skip + 1]
314 def get_affinity_af_xdp(
315 node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
316 """Get affinity for AF_XDP interface. Result will be used to pin IRQs.
318 :param node: Topology node.
319 :param pf_key: Topology interface.
320 :param cpu_skip_cnt: Amount of CPU cores to skip.
321 :param cpu_cnt: CPU threads count.
324 :type cpu_skip_cnt: int
326 :returns: List of CPUs allocated to AF_XDP interface.
330 cpu_node = Topology.get_interface_numa_node(node, pf_key)
334 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
336 cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
338 return CpuUtils.cpu_slice_of_list_per_node(
339 node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
345 nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
346 vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
348 """Get affinity of NF (network function). Result will be used to compute
349 the amount of CPUs and also affinity.
351 :param nodes: Physical topology nodes.
352 :param node: SUT node.
353 :param nf_chains: Number of NF chains.
354 :param nf_nodes: Number of NF nodes in chain.
355 :param nf_chain: Chain number indexed from 1.
356 :param nf_node: Node number indexed from 1.
357 :param vs_dtc: Amount of physical cores for vswitch data plane.
358 :param nf_dtc: Amount of physical cores for NF data plane.
359 :param nf_mtcr: NF main thread per core ratio.
360 :param nf_dtcr: NF data plane thread per core ratio.
368 :type nf_dtc: int or float
371 :returns: List of CPUs allocated to NF.
374 skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc
376 interface_list = list()
377 interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if1}}"))
378 interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if2}}"))
380 cpu_node = Topology.get_interfaces_numa_node(
381 nodes[node], *interface_list)
383 return CpuUtils.cpu_slice_of_list_for_nf(
384 node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains,
385 nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
386 nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt
390 def get_affinity_trex(
391 node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1):
392 """Get affinity for T-Rex. Result will be used to pin T-Rex threads.
394 :param node: TG node.
395 :param if1_pci: TG first interface.
396 :param if2_pci: TG second interface.
397 :param tg_mtc: TG main thread count.
398 :param tg_dtc: TG dataplane thread count.
399 :param tg_ltc: TG latency thread count.
406 :returns: List of CPUs allocated to T-Rex including numa node.
407 :rtype: int, int, int, list
409 interface_list = [if1_pci, if2_pci]
410 cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
412 master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
413 node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc,
416 threads = CpuUtils.cpu_slice_of_list_per_node(
417 node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
420 latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
421 node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
424 return master_thread_id[0], latency_thread_id[0], cpu_node, threads
427 def get_affinity_iperf(
428 node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
429 """Get affinity for iPerf3. Result will be used to pin iPerf3 threads.
431 :param node: Topology node.
432 :param pf_key: Topology interface.
433 :param cpu_skip_cnt: Amount of CPU cores to skip.
434 :param cpu_cnt: CPU threads count.
437 :type cpu_skip_cnt: int
439 :returns: List of CPUs allocated to iPerf3.
443 cpu_node = Topology.get_interface_numa_node(node, pf_key)
447 return CpuUtils.cpu_range_per_node_str(
448 node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
452 def get_affinity_vhost(
453 node, pf_key, skip_cnt=0, cpu_cnt=1):
454 """Get affinity for vhost. Result will be used to pin vhost threads.
456 :param node: Topology node.
457 :param pf_key: Topology interface.
458 :param skip_cnt: Amount of CPU cores to skip.
459 :param cpu_cnt: CPU threads count.
464 :returns: List of CPUs allocated to vhost process.
468 cpu_node = Topology.get_interface_numa_node(node, pf_key)
472 smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
474 cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
476 return CpuUtils.cpu_slice_of_list_per_node(
477 node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
481 def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","):
482 """Get idle CPU List.
484 :param node: Node dictionary with cpuinfo.
485 :param cpu_node: Numa node number.
486 :param smt_used: True - we want to use SMT, otherwise false.
487 :param cpu_alloc_str: vpp used cores.
488 :param sep: Separator, default: ",".
492 :type cpu_alloc_str: str
497 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
498 cpu_idle_list = [i for i in cpu_list
499 if str(i) not in cpu_alloc_str.split(sep)]
503 def get_affinity_vswitch(
504 nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None):
505 """Get affinity for vswitch.
507 :param nodes: Topology nodes.
508 :param node: Topology node string.
509 :param phy_cores: Number of physical cores to allocate.
510 :param rx_queues: Number of RX queues. (Optional, Default: None)
511 :param rxd: Number of RX descriptors. (Optional, Default: None)
512 :param txd: Number of TX descriptors. (Optional, Default: None)
519 :returns: Compute resource information dictionary.
522 # Number of Data Plane physical cores.
523 dp_cores_count = BuiltIn().get_variable_value(
524 f"${{dp_cores_count}}", phy_cores
526 # Number of Feature Plane physical cores.
527 fp_cores_count = BuiltIn().get_variable_value(
528 f"${{fp_cores_count}}", phy_cores - dp_cores_count
530 # Ratio between RX queues and data plane threads.
531 rxq_ratio = BuiltIn().get_variable_value(
535 dut_pf_keys = BuiltIn().get_variable_value(
536 f"${{{node}_pf_keys}}"
538 # SMT override in case of non standard test cases.
539 smt_used = BuiltIn().get_variable_value(
540 f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"])
543 cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys)
544 skip_cnt = Constants.CPU_CNT_SYSTEM
545 cpu_main = CpuUtils.cpu_list_per_node_str(
546 nodes[node], cpu_node,
548 cpu_cnt=Constants.CPU_CNT_MAIN,
551 skip_cnt += Constants.CPU_CNT_MAIN
552 cpu_dp = CpuUtils.cpu_list_per_node_str(
553 nodes[node], cpu_node,
555 cpu_cnt=int(dp_cores_count),
557 ) if int(dp_cores_count) else u""
558 skip_cnt = skip_cnt + int(dp_cores_count)
559 cpu_fp = CpuUtils.cpu_list_per_node_str(
560 nodes[node], cpu_node,
562 cpu_cnt=int(fp_cores_count),
564 ) if int(fp_cores_count) else u""
567 int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
568 else int(fp_cores_count)
570 int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
571 else int(dp_cores_count)
573 rxq_count_int = rx_queues if rx_queues else int(dp_count_int/rxq_ratio)
574 rxq_count_int = 1 if not rxq_count_int else rxq_count_int
576 compute_resource_info = dict()
577 compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520
578 compute_resource_info[u"smt_used"] = smt_used
579 compute_resource_info[u"cpu_main"] = cpu_main
580 compute_resource_info[u"cpu_dp"] = cpu_dp
581 compute_resource_info[u"cpu_fp"] = cpu_fp
582 compute_resource_info[u"cpu_wt"] = \
583 u",".join(filter(None, [cpu_dp, cpu_fp]))
584 compute_resource_info[u"cpu_alloc_str"] = \
585 u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
586 compute_resource_info[u"cpu_count_int"] = \
587 int(dp_cores_count) + int(fp_cores_count)
588 compute_resource_info[u"rxd_count_int"] = rxd
589 compute_resource_info[u"txd_count_int"] = txd
590 compute_resource_info[u"rxq_count_int"] = rxq_count_int
591 compute_resource_info[u"fp_count_int"] = fp_count_int
592 compute_resource_info[u"dp_count_int"] = dp_count_int
594 return compute_resource_info