1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """CPU utilities library."""
16 from robot.libraries.BuiltIn import BuiltIn
18 from resources.libraries.python.Constants import Constants
19 from resources.libraries.python.ssh import exec_cmd_no_error
20 from resources.libraries.python.topology import Topology
22 __all__ = ["CpuUtils"]
25 class CpuUtils(object):
28 # Number of threads per core.
32 def __str2int(string):
33 """Conversion from string to integer, 0 in case of empty string.
35 :param string: Input string.
37 :returns: Integer converted from string, 0 in case of ValueError.
46 def is_smt_enabled(cpu_info):
47 """Uses CPU mapping to find out if SMT is enabled or not. If SMT is
48 enabled, the L1d,L1i,L2,L3 setting is the same for two processors. These
49 two processors are two threads of one core.
51 :param cpu_info: CPU info, the output of "lscpu -p".
53 :returns: True if SMT is enabled, False if SMT is disabled.
56 cpu_mems = [item[-4:] for item in cpu_info]
57 cpu_mems_len = len(cpu_mems) / CpuUtils.NR_OF_THREADS
59 for cpu_mem in cpu_mems[:cpu_mems_len]:
60 if cpu_mem in cpu_mems[cpu_mems_len:]:
62 return bool(count == cpu_mems_len)
65 def get_cpu_info_from_all_nodes(nodes):
66 """Assuming all nodes are Linux nodes, retrieve the following
67 cpu information from all nodes:
71 :param nodes: DICT__nodes from Topology.DICT__nodes.
73 :raises RuntimeError: If an ssh command retrieving cpu information
76 for node in nodes.values():
77 stdout, _ = exec_cmd_no_error(node, 'uname -m')
78 node['arch'] = stdout.strip()
79 stdout, _ = exec_cmd_no_error(node, 'lscpu -p')
80 node['cpuinfo'] = list()
81 for line in stdout.split("\n"):
82 if line and line[0] != "#":
83 node['cpuinfo'].append([CpuUtils.__str2int(x) for x in
87 def cpu_node_count(node):
88 """Return count of numa nodes.
90 :param node: Targeted node.
92 :returns: Count of numa nodes.
94 :raises RuntimeError: If node cpuinfo is not available.
96 cpu_info = node.get("cpuinfo")
97 if cpu_info is not None:
98 return node["cpuinfo"][-1][3] + 1
100 raise RuntimeError("Node cpuinfo not available.")
103 def cpu_list_per_node(node, cpu_node, smt_used=False):
104 """Return node related list of CPU numbers.
106 :param node: Node dictionary with cpuinfo.
107 :param cpu_node: Numa node number.
108 :param smt_used: True - we want to use SMT, otherwise false.
112 :returns: List of cpu numbers related to numa from argument.
114 :raises RuntimeError: If node cpuinfo is not available
115 or if SMT is not enabled.
117 cpu_node = int(cpu_node)
118 cpu_info = node.get("cpuinfo")
120 raise RuntimeError("Node cpuinfo not available.")
122 smt_enabled = CpuUtils.is_smt_enabled(cpu_info)
123 if not smt_enabled and smt_used:
124 raise RuntimeError("SMT is not enabled.")
128 if cpu[3] == cpu_node:
129 cpu_list.append(cpu[0])
131 if not smt_enabled or smt_enabled and smt_used:
134 if smt_enabled and not smt_used:
135 cpu_list_len = len(cpu_list)
136 cpu_list = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS]
141 def cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=0, cpu_cnt=0,
143 """Return string of node related list of CPU numbers.
145 :param node: Node dictionary with cpuinfo.
146 :param cpu_node: Numa node number.
147 :param skip_cnt: Skip first "skip_cnt" CPUs.
148 :param cpu_cnt: Count of cpus to return, if 0 then return all.
149 :param smt_used: True - we want to use SMT, otherwise false.
155 :returns: Cpu numbers related to numa from argument.
157 :raises RuntimeError: If we require more cpus than available.
159 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
161 cpu_list_len = len(cpu_list)
162 if cpu_cnt + skip_cnt > cpu_list_len:
163 raise RuntimeError("cpu_cnt + skip_cnt > length(cpu list).")
166 cpu_cnt = cpu_list_len - skip_cnt
169 cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS]
170 cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:]
171 cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]]
172 cpu_list_ex = [cpu for cpu in
173 cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]]
174 cpu_list.extend(cpu_list_ex)
176 cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]]
181 def cpu_list_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",",
183 """Return string of node related list of CPU numbers.
185 :param node: Node dictionary with cpuinfo.
186 :param cpu_node: Numa node number.
187 :param skip_cnt: Skip first "skip_cnt" CPUs.
188 :param cpu_cnt: Count of cpus to return, if 0 then return all.
189 :param sep: Separator, default: 1,2,3,4,....
190 :param smt_used: True - we want to use SMT, otherwise false.
197 :returns: Cpu numbers related to numa from argument.
200 cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node,
204 return sep.join(str(cpu) for cpu in cpu_list)
207 def cpu_range_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep="-",
209 """Return string of node related range of CPU numbers, e.g. 0-4.
211 :param node: Node dictionary with cpuinfo.
212 :param cpu_node: Numa node number.
213 :param skip_cnt: Skip first "skip_cnt" CPUs.
214 :param cpu_cnt: Count of cpus to return, if 0 then return all.
215 :param sep: Separator, default: "-".
216 :param smt_used: True - we want to use SMT, otherwise false.
223 :returns: String of node related range of CPU numbers.
226 cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node,
231 cpu_list_len = len(cpu_list)
232 cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS]
233 cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:]
234 cpu_range = "{}{}{},{}{}{}".format(cpu_list_0[0], sep,
239 cpu_range = "{}{}{}".format(cpu_list[0], sep, cpu_list[-1])
244 def cpu_slice_of_list_for_nf(node, cpu_node, nf_chains=1, nf_nodes=1,
245 nf_chain=1, nf_node=1, nf_dtc=1, nf_mtcr=2,
246 nf_dtcr=1, skip_cnt=0):
247 """Return list of DUT node related list of CPU numbers. The main
248 computing unit is physical core count.
250 :param node: DUT node.
251 :param cpu_node: Numa node number.
252 :param nf_chains: Number of NF chains.
253 :param nf_nodes: Number of NF nodes in chain.
254 :param nf_chain: Chain number indexed from 1.
255 :param nf_node: Node number indexed from 1.
256 :param vs_dtc: Amount of physical cores for vswitch dataplane.
257 :param nf_dtc: Amount of physical cores for NF dataplane.
258 :param nf_mtcr: NF main thread per core ratio.
259 :param nf_dtcr: NF dataplane thread per core ratio.
260 :param skip_cnt: Skip first "skip_cnt" CPUs.
262 :param cpu_node: int.
268 :type nf_dtc: int or float
272 :returns: List of CPUs allocated to NF.
274 :raises RuntimeError: If we require more cpus than available or if
275 placement is not possible due to wrong parameters.
277 if nf_chain - 1 >= nf_chains:
278 raise RuntimeError("ChainID is higher than total number of chains!")
279 if nf_node - 1 >= nf_nodes:
280 raise RuntimeError("NodeID is higher than chain nodes!")
282 smt_used = CpuUtils.is_smt_enabled(node['cpuinfo'])
283 cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
284 # CPU thread sibling offset.
285 sib = len(cpu_list) / CpuUtils.NR_OF_THREADS
287 if not smt_used and not isinstance(nf_dtc, int):
288 raise RuntimeError("Cannot allocate if SMT is not enabled!")
289 # TODO: Workaround as we are using physical core as main unit, we must
290 # adjust number of physical dataplane cores in case of float for further
291 # array referencing. As rounding method in Py2.7 and Py3.x differs, we
292 # are using static mapping. This can be rewritten using flat arrays and
293 # different logic (from Physical core unit to Logical core unit).
294 dtc = 1 if not isinstance(nf_dtc, int) else nf_dtc
296 mt_req = ((nf_chains * nf_nodes) + nf_mtcr - 1) / nf_mtcr
297 dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) / nf_dtcr
298 cpu_req = skip_cnt + mt_req + dt_req
300 if smt_used and cpu_req > len(cpu_list) / CpuUtils.NR_OF_THREADS:
301 raise RuntimeError("Not enough CPU cores available for placement!")
302 elif not smt_used and cpu_req > len(cpu_list):
303 raise RuntimeError("Not enough CPU cores available for placement!")
305 offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes
307 mt_odd = (offset / mt_req) & 1
308 mt_skip = skip_cnt + (offset % mt_req)
309 dt_odd = (offset / dt_req) & 1
310 dt_skip = skip_cnt + mt_req + (offset % dt_req) * dtc
311 except ZeroDivisionError:
312 raise RuntimeError("Invalid placement combination!")
314 mt_list = [cpu for cpu in cpu_list[mt_skip+sib:mt_skip+sib + 1]] \
315 if mt_odd else [cpu for cpu in cpu_list[mt_skip:mt_skip + 1]]
316 dt_list = [cpu for cpu in cpu_list[dt_skip+sib:dt_skip+sib + dtc]] \
317 if dt_odd else [cpu for cpu in cpu_list[dt_skip:dt_skip + dtc]]
318 if isinstance(nf_dtc, int):
320 [cpu for cpu in cpu_list[dt_skip:dt_skip + dtc]]
322 [cpu for cpu in cpu_list[dt_skip+sib:dt_skip+sib + dtc]]
324 mt_list = [cpu for cpu in cpu_list[mt_skip:mt_skip + 1]]
325 dt_list = [cpu for cpu in cpu_list[dt_skip:dt_skip + dtc]]
327 return mt_list + dt_list
330 def get_affinity_nf(nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1,
331 nf_node=1, vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
333 """Get affinity of NF (network function). Result will be used to compute
334 the amount of CPUs and also affinity.
336 :param nodes: Physical topology nodes.
337 :param node: SUT node.
338 :param nf_chains: Number of NF chains.
339 :param nf_nodes: Number of NF nodes in chain.
340 :param nf_chain: Chain number indexed from 1.
341 :param nf_node: Node number indexed from 1.
342 :param vs_dtc: Amount of physical cores for vswitch dataplane.
343 :param nf_dtc: Amount of physical cores for NF dataplane.
344 :param nf_mtcr: NF main thread per core ratio.
345 :param nf_dtcr: NF dataplane thread per core ratio.
353 :type nf_dtc: int or float
356 :returns: List of CPUs allocated to NF.
359 skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc
362 interface_list.append(
363 BuiltIn().get_variable_value('${{{node}_if1}}'.format(node=node)))
364 interface_list.append(
365 BuiltIn().get_variable_value('${{{node}_if2}}'.format(node=node)))
367 cpu_node = Topology.get_interfaces_numa_node(
368 nodes[node], *interface_list)
370 return CpuUtils.cpu_slice_of_list_for_nf(
371 node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains,
372 nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
373 nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt)