vpp_config: Updates for 20.05
[vpp.git] / extras / vpp_config / vpplib / AutoConfig.py
1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library that supports Auto Configuration."""
15 from __future__ import absolute_import, division, print_function
16
17 import logging
18 import os
19 import re
20 from ipaddress import ip_address
21
22 import yaml
23
24 from vpplib.VPPUtil import VPPUtil
25 from vpplib.VppPCIUtil import VppPCIUtil
26 from vpplib.VppHugePageUtil import VppHugePageUtil
27 from vpplib.CpuUtils import CpuUtils
28 from vpplib.VppGrubUtil import VppGrubUtil
29 from vpplib.QemuUtils import QemuUtils
30
31 #  Python2/3 compatible
32 try:
33     input = raw_input  # noqa
34 except NameError:
35     pass
36
37 __all__ = ["AutoConfig"]
38
39 # Constants
40 MIN_SYSTEM_CPUS = 2
41 MIN_TOTAL_HUGE_PAGES = 1024
42 MAX_PERCENT_FOR_HUGE_PAGES = 70
43
44 IPERFVM_XML = 'configs/iperf-vm.xml'
45 IPERFVM_IMAGE = 'images/xenial-mod.img'
46 IPERFVM_ISO = 'configs/cloud-config.iso'
47
48
49 class AutoConfig(object):
50     """Auto Configuration Tools"""
51
52     def __init__(self, rootdir, filename, clean=False):
53         """
54         The Auto Configure class.
55
56         :param rootdir: The root directory for all the auto configuration files
57         :param filename: The autoconfiguration file
58         :param clean: When set initialize the nodes from the auto-config file
59         :type rootdir: str
60         :type filename: str
61         :type clean: bool
62         """
63         self._autoconfig_filename = rootdir + filename
64         self._rootdir = rootdir
65         self._metadata = {}
66         self._nodes = {}
67         self._vpp_devices_node = {}
68         self._hugepage_config = ""
69         self._clean = clean
70         self._loadconfig()
71         self._sockfilename = ""
72
73     def get_nodes(self):
74         """
75         Returns the nodes dictionary.
76
77         :returns: The nodes
78         :rtype: dictionary
79         """
80
81         return self._nodes
82
83     @staticmethod
84     def _autoconfig_backup_file(filename):
85         """
86         Create a backup file.
87
88         :param filename: The file to backup
89         :type filename: str
90         """
91
92         # Does a copy of the file exist, if not create one
93         ofile = filename + '.orig'
94         (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
95         if ret != 0:
96             logging.debug(stderr)
97             if stdout.strip('\n') != ofile:
98                 cmd = 'sudo cp {} {}'.format(filename, ofile)
99                 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
100                 if ret != 0:
101                     logging.debug(stderr)
102
103     # noinspection PyBroadException
104     @staticmethod
105     def _ask_user_ipv4():
106         """
107         Asks the user for a number within a range.
108         default is returned if return is entered.
109
110         :returns: IP address with cidr
111         :rtype: str
112         """
113
114         while True:
115             answer = input("Please enter the IPv4 Address [n.n.n.n/n]: ")
116             try:
117                 ipinput = answer.split('/')
118                 ipaddr = ip_address(ipinput[0])
119                 if len(ipinput) > 1:
120                     plen = answer.split('/')[1]
121                 else:
122                     answer = input("Please enter the netmask [n.n.n.n]: ")
123                     plen = ip_address(answer).netmask_bits()
124                 return '{}/{}'.format(ipaddr, plen)
125             except ValueError:
126                 print("Please enter a valid IPv4 address.")
127
128     @staticmethod
129     def _ask_user_range(question, first, last, default):
130         """
131         Asks the user for a number within a range.
132         default is returned if return is entered.
133
134         :param question: Text of a question.
135         :param first: First number in the range
136         :param last: Last number in the range
137         :param default: The value returned when return is entered
138         :type question: string
139         :type first: int
140         :type last: int
141         :type default: int
142         :returns: The answer to the question
143         :rtype: int
144         """
145
146         while True:
147             answer = input(question)
148             if answer == '':
149                 answer = default
150                 break
151             if re.findall(r'[0-9+]', answer):
152                 if int(answer) in range(first, last + 1):
153                     break
154                 else:
155                     print("Please a value between {} and {} or Return.".
156                           format(first, last))
157             else:
158                 print("Please a number between {} and {} or Return.".
159                       format(first, last))
160
161         return int(answer)
162
163     @staticmethod
164     def _ask_user_yn(question, default):
165         """
166         Asks the user for a yes or no question.
167
168         :param question: Text of a question.
169         :param default: The value returned when return is entered
170         :type question: string
171         :type default: string
172         :returns: The answer to the question
173         :rtype: string
174         """
175
176         input_valid = False
177         default = default.lower()
178         answer = ''
179         while not input_valid:
180             answer = input(question)
181             if answer == '':
182                 answer = default
183             if re.findall(r'[YyNn]', answer):
184                 input_valid = True
185                 answer = answer[0].lower()
186             else:
187                 print("Please answer Y, N or Return.")
188
189         return answer
190
191     def _loadconfig(self):
192         """
193         Load the testbed configuration, given the auto configuration file.
194
195         """
196
197         # Get the Topology, from the topology layout file
198         topo = {}
199         with open(self._autoconfig_filename, 'r') as stream:
200             try:
201                 topo = yaml.load(stream)
202                 if 'metadata' in topo:
203                     self._metadata = topo['metadata']
204             except yaml.YAMLError as exc:
205                 raise RuntimeError(
206                     "Couldn't read the Auto config file {}.".format(
207                         self._autoconfig_filename, exc))
208
209         systemfile = self._rootdir + self._metadata['system_config_file']
210         if self._clean is False and os.path.isfile(systemfile):
211             with open(systemfile, 'r') as sysstream:
212                 try:
213                     systopo = yaml.load(sysstream)
214                     if 'nodes' in systopo:
215                         self._nodes = systopo['nodes']
216                 except yaml.YAMLError as sysexc:
217                     raise RuntimeError(
218                         "Couldn't read the System config file {}.".format(
219                             systemfile, sysexc))
220         else:
221             # Get the nodes from Auto Config
222             if 'nodes' in topo:
223                 self._nodes = topo['nodes']
224
225         # Set the root directory in all the nodes
226         for i in self._nodes.items():
227             node = i[1]
228             node['rootdir'] = self._rootdir
229
230     def updateconfig(self):
231         """
232         Update the testbed configuration, given the auto configuration file.
233         We will write the system configuration file with the current node
234         information
235
236         """
237
238         # Initialize the yaml data
239         ydata = {'metadata': self._metadata, 'nodes': self._nodes}
240
241         # Write the system config file
242         filename = self._rootdir + self._metadata['system_config_file']
243         with open(filename, 'w') as yamlfile:
244             yaml.dump(ydata, yamlfile)
245
246     def _update_auto_config(self):
247         """
248         Write the auto configuration file with the new configuration data,
249         input from the user.
250
251         """
252
253         # Initialize the yaml data
254         nodes = {}
255         with open(self._autoconfig_filename, 'r') as stream:
256             try:
257                 ydata = yaml.load(stream)
258                 if 'nodes' in ydata:
259                     nodes = ydata['nodes']
260             except yaml.YAMLError as exc:
261                 print(exc)
262                 return
263
264         for i in nodes.items():
265             key = i[0]
266             node = i[1]
267
268             # Interfaces
269             node['interfaces'] = {}
270             for item in self._nodes[key]['interfaces'].items():
271                 port = item[0]
272                 interface = item[1]
273
274                 node['interfaces'][port] = {}
275                 addr = '{}'.format(interface['pci_address'])
276                 node['interfaces'][port]['pci_address'] = addr
277                 if 'mac_address' in interface:
278                     node['interfaces'][port]['mac_address'] = \
279                         interface['mac_address']
280
281             if 'total_other_cpus' in self._nodes[key]['cpu']:
282                 node['cpu']['total_other_cpus'] = \
283                     self._nodes[key]['cpu']['total_other_cpus']
284             if 'total_vpp_cpus' in self._nodes[key]['cpu']:
285                 node['cpu']['total_vpp_cpus'] = \
286                     self._nodes[key]['cpu']['total_vpp_cpus']
287             if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
288                 node['cpu']['reserve_vpp_main_core'] = \
289                     self._nodes[key]['cpu']['reserve_vpp_main_core']
290
291             # TCP
292             if 'active_open_sessions' in self._nodes[key]['tcp']:
293                 node['tcp']['active_open_sessions'] = \
294                     self._nodes[key]['tcp']['active_open_sessions']
295             if 'passive_open_sessions' in self._nodes[key]['tcp']:
296                 node['tcp']['passive_open_sessions'] = \
297                     self._nodes[key]['tcp']['passive_open_sessions']
298
299             # Huge pages
300             node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
301
302         # Write the auto config config file
303         with open(self._autoconfig_filename, 'w') as yamlfile:
304             yaml.dump(ydata, yamlfile)
305
306     def apply_huge_pages(self):
307         """
308         Apply the huge page config
309
310         """
311
312         for i in self._nodes.items():
313             node = i[1]
314
315             hpg = VppHugePageUtil(node)
316             hpg.hugepages_dryrun_apply()
317
318     @staticmethod
319     def _apply_vpp_cpu(node):
320         """
321         Apply the VPP cpu config
322
323         :param node: Node dictionary with cpuinfo.
324         :type node: dict
325         """
326
327         # Get main core
328         cpu = '\n'
329         if 'vpp_main_core' in node['cpu']:
330             vpp_main_core = node['cpu']['vpp_main_core']
331         else:
332             vpp_main_core = 0
333         if vpp_main_core != 0:
334             cpu += '  main-core {}\n'.format(vpp_main_core)
335
336         # Get workers
337         vpp_workers = node['cpu']['vpp_workers']
338         vpp_worker_len = len(vpp_workers)
339         if vpp_worker_len > 0:
340             vpp_worker_str = ''
341             for i, worker in enumerate(vpp_workers):
342                 if i > 0:
343                     vpp_worker_str += ','
344                 if worker[0] == worker[1]:
345                     vpp_worker_str += "{}".format(worker[0])
346                 else:
347                     vpp_worker_str += "{}-{}".format(worker[0], worker[1])
348
349             cpu += '  corelist-workers {}\n'.format(vpp_worker_str)
350
351         return cpu
352
353     @staticmethod
354     def _apply_vpp_devices(node):
355         """
356         Apply VPP PCI Device configuration to vpp startup.
357
358         :param node: Node dictionary with cpuinfo.
359         :type node: dict
360         """
361
362         devices = ''
363         ports_per_numa = node['cpu']['ports_per_numa']
364
365         for item in ports_per_numa.items():
366             value = item[1]
367             interfaces = value['interfaces']
368
369             # if 0 was specified for the number of vpp workers, use 1 queue
370             num_rx_queues = None
371             num_tx_queues = None
372             if 'rx_queues' in value:
373                 num_rx_queues = value['rx_queues']
374             if 'tx_queues' in value:
375                 num_tx_queues = value['tx_queues']
376
377             num_rx_desc = None
378             num_tx_desc = None
379
380             # Create the devices string
381             for interface in interfaces:
382                 pci_address = interface['pci_address']
383                 pci_address = pci_address.lstrip("'").rstrip("'")
384                 devices += '\n'
385                 devices += '  dev {} {{ \n'.format(pci_address)
386                 if num_rx_queues:
387                     devices += '    num-rx-queues {}\n'.format(num_rx_queues)
388                 else:
389                     devices += '    num-rx-queues {}\n'.format(1)
390                 if num_tx_queues:
391                     devices += '    num-tx-queues {}\n'.format(num_tx_queues)
392                 if num_rx_desc:
393                     devices += '    num-rx-desc {}\n'.format(num_rx_desc)
394                 if num_tx_desc:
395                     devices += '    num-tx-desc {}\n'.format(num_tx_desc)
396                 devices += '  }'
397
398         return devices
399
400     @staticmethod
401     def _apply_buffers(node):
402         """
403         Apply VPP PCI Device configuration to vpp startup.
404
405         :param node: Node dictionary with cpuinfo.
406         :type node: dict
407         """
408         buffers = ''
409         total_mbufs = node['cpu']['total_mbufs']
410
411         # If the total mbufs is not 0 or less than the default, set num-bufs
412         logging.debug("Total mbufs: {}".format(total_mbufs))
413         if total_mbufs != 0 and total_mbufs > 16384:
414             buffers += '  buffers-per-numa {}'.format(total_mbufs)
415
416         return buffers
417
418     @staticmethod
419     def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end,
420                           total_vpp_workers,
421                           reserve_vpp_main_core):
422         """
423         Calculate the VPP worker information
424
425         :param node: Node dictionary
426         :param vpp_workers: List of VPP workers
427         :param numa_node: Numa node
428         :param other_cpus_end: The end of the cpus allocated for cores
429         other than vpp
430         :param total_vpp_workers: The number of vpp workers needed
431         :param reserve_vpp_main_core: Is there a core needed for
432         the vpp main core
433         :type node: dict
434         :type numa_node: int
435         :type other_cpus_end: int
436         :type total_vpp_workers: int
437         :type reserve_vpp_main_core: bool
438         :returns: Is a core still needed for the vpp main core
439         :rtype: bool
440         """
441
442         # Can we fit the workers in one of these slices
443         cpus = node['cpu']['cpus_per_node'][numa_node]
444         for cpu in cpus:
445             start = cpu[0]
446             end = cpu[1]
447             if start <= other_cpus_end:
448                 start = other_cpus_end + 1
449
450             if reserve_vpp_main_core:
451                 start += 1
452
453             workers_end = start + total_vpp_workers - 1
454
455             if workers_end <= end:
456                 if reserve_vpp_main_core:
457                     node['cpu']['vpp_main_core'] = start - 1
458                 reserve_vpp_main_core = False
459                 if total_vpp_workers:
460                     vpp_workers.append((start, workers_end))
461                 break
462
463         # We still need to reserve the main core
464         if reserve_vpp_main_core:
465             node['cpu']['vpp_main_core'] = other_cpus_end + 1
466
467         return reserve_vpp_main_core
468
469     @staticmethod
470     def _calc_desc_and_queues(total_numa_nodes,
471                               total_ports_per_numa,
472                               total_rx_queues,
473                               ports_per_numa_value):
474         """
475         Calculate the number of descriptors and queues
476
477         :param total_numa_nodes: The total number of numa nodes
478         :param total_ports_per_numa: The total number of ports for this
479         numa node
480         :param total_rx_queues: The total number of rx queues / port
481         :param ports_per_numa_value: The value from the ports_per_numa
482         dictionary
483         :type total_numa_nodes: int
484         :type total_ports_per_numa: int
485         :type total_rx_queues: int
486         :type ports_per_numa_value: dict
487         :returns The total number of message buffers
488         :rtype: int
489         """
490
491         # Get the number of rx queues
492         rx_queues = max(1, total_rx_queues)
493         tx_queues = rx_queues * total_numa_nodes + 1
494
495         # Get the descriptor entries
496         desc_entries = 1024
497         ports_per_numa_value['rx_queues'] = rx_queues
498         total_mbufs = ((rx_queues * desc_entries) + (tx_queues * desc_entries)) * total_ports_per_numa
499
500         return total_mbufs
501
502     @staticmethod
503     def _create_ports_per_numa(node, interfaces):
504         """
505         Create a dictionary or ports per numa node
506         :param node: Node dictionary
507         :param interfaces: All the interfaces to be used by vpp
508         :type node: dict
509         :type interfaces: dict
510         :returns: The ports per numa dictionary
511         :rtype: dict
512         """
513
514         # Make a list of ports by numa node
515         ports_per_numa = {}
516         for item in interfaces.items():
517             i = item[1]
518             if i['numa_node'] not in ports_per_numa:
519                 ports_per_numa[i['numa_node']] = {'interfaces': []}
520                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
521             else:
522                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
523         node['cpu']['ports_per_numa'] = ports_per_numa
524
525         return ports_per_numa
526
527     def calculate_cpu_parameters(self):
528         """
529         Calculate the cpu configuration.
530
531         """
532
533         # Calculate the cpu parameters, needed for the
534         # vpp_startup and grub configuration
535         for i in self._nodes.items():
536             node = i[1]
537
538             # get total number of nic ports
539             interfaces = node['interfaces']
540
541             # Make a list of ports by numa node
542             ports_per_numa = self._create_ports_per_numa(node, interfaces)
543
544             # Get the number of cpus to skip, we never use the first cpu
545             other_cpus_start = 1
546             other_cpus_end = other_cpus_start + node['cpu']['total_other_cpus'] - 1
547             other_workers = None
548             if other_cpus_end != 0:
549                 other_workers = (other_cpus_start, other_cpus_end)
550             node['cpu']['other_workers'] = other_workers
551
552             # Allocate the VPP main core and workers
553             vpp_workers = []
554             reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
555             total_vpp_cpus = node['cpu']['total_vpp_cpus']
556             total_rx_queues = node['cpu']['total_rx_queues']
557
558             # If total_vpp_cpus is 0 or is less than the numa nodes with ports
559             #  then we shouldn't get workers
560             total_workers_node = 0
561             if len(ports_per_numa):
562                 total_workers_node = total_vpp_cpus // len(ports_per_numa)
563             total_main = 0
564             if reserve_vpp_main_core:
565                 total_main = 1
566             total_mbufs = 0
567             if total_main + total_workers_node != 0:
568                 for item in ports_per_numa.items():
569                     numa_node = item[0]
570                     value = item[1]
571
572                     # Get the number of descriptors and queues
573                     mbufs = self._calc_desc_and_queues(
574                         len(ports_per_numa),
575                         len(value['interfaces']), total_rx_queues, value)
576                     total_mbufs += mbufs
577
578                     # Get the VPP workers
579                     reserve_vpp_main_core = self._calc_vpp_workers(
580                         node, vpp_workers, numa_node,
581                         other_cpus_end, total_workers_node,
582                         reserve_vpp_main_core)
583
584                 total_mbufs *= 2.5
585                 total_mbufs = int(total_mbufs)
586             else:
587                 total_mbufs = 0
588
589             # Save the info
590             node['cpu']['vpp_workers'] = vpp_workers
591             node['cpu']['total_mbufs'] = total_mbufs
592
593         # Write the config
594         self.updateconfig()
595
596     @staticmethod
597     def _apply_vpp_tcp(node):
598         """
599         Apply the tcp config
600
601         :param node: Node dictionary with cpuinfo.
602         :type node: dict
603         """
604
605         active_open_sessions = node['tcp']['active_open_sessions']
606         aos = int(active_open_sessions)
607
608         passive_open_sessions = node['tcp']['passive_open_sessions']
609         pos = int(passive_open_sessions)
610
611         # Generate the api-segment gid vpp sheit in any case
612         if (aos + pos) == 0:
613             tcp = '\n'.join([
614                 "api-segment {",
615                 "  gid vpp",
616                 "}"
617             ])
618             return tcp.rstrip('\n')
619
620         tcp = '\n'.join([
621             "# TCP stack-related configuration parameters",
622             "# expecting {:d} client sessions, {:d} server sessions\n".format(
623                 aos, pos),
624             "heapsize 4g\n",
625             "api-segment {",
626             "  global-size 2000M",
627             "  api-size 1G",
628             "}\n",
629
630             "session {",
631             "  event-queue-length {:d}".format(aos + pos),
632             "  preallocated-sessions {:d}".format(aos + pos),
633             "  v4-session-table-buckets {:d}".format((aos + pos) // 4),
634             "  v4-session-table-memory 3g\n"
635         ])
636         if aos > 0:
637             tcp = tcp + "  v4-halfopen-table-buckets {:d}".format(
638                 (aos + pos) // 4) + "\n"
639             tcp = tcp + "  v4-halfopen-table-memory 3g\n"
640             tcp = tcp + "  local-endpoints-table-buckets {:d}".format(
641                 (aos + pos) // 4) + "\n"
642             tcp = tcp + "  local-endpoints-table-memory 3g\n"
643         tcp = tcp + "}\n\n"
644
645         tcp = tcp + "tcp {\n"
646         tcp = tcp + "  preallocated-connections {:d}".format(aos + pos) + "\n"
647         if aos > 0:
648             tcp = tcp + "  preallocated-half-open-connections {:d}".format(
649                 aos) + "\n"
650         tcp = tcp + "}\n\n"
651
652         return tcp.rstrip('\n')
653
654     def apply_vpp_startup(self):
655         """
656         Apply the vpp startup configration
657
658         """
659
660         # Apply the VPP startup configruation
661         for i in self._nodes.items():
662             node = i[1]
663
664             # Get the startup file
665             rootdir = node['rootdir']
666             sfile = rootdir + node['vpp']['startup_config_file']
667
668             # Get the buffers
669             devices = self._apply_vpp_devices(node)
670
671             # Get the CPU config
672             cpu = self._apply_vpp_cpu(node)
673
674             # Get the buffer configuration
675             buffers = self._apply_buffers(node)
676             # Get the TCP configuration, if any
677             tcp = self._apply_vpp_tcp(node)
678
679             # Make a backup if needed
680             self._autoconfig_backup_file(sfile)
681
682             # Get the template
683             tfile = sfile + '.template'
684             (ret, stdout, stderr) = \
685                 VPPUtil.exec_command('cat {}'.format(tfile))
686             if ret != 0:
687                 raise RuntimeError('Executing cat command failed to node {}'.
688                                    format(node['host']))
689             startup = stdout.format(cpu=cpu,
690                                     buffers=buffers,
691                                     devices=devices,
692                                     tcp=tcp)
693
694             (ret, stdout, stderr) = \
695                 VPPUtil.exec_command('rm {}'.format(sfile))
696             if ret != 0:
697                 logging.debug(stderr)
698
699             cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
700             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
701             if ret != 0:
702                 raise RuntimeError('Writing config failed node {}'.
703                                    format(node['host']))
704
705     def apply_grub_cmdline(self):
706         """
707         Apply the grub cmdline
708
709         """
710
711         for i in self._nodes.items():
712             node = i[1]
713
714             # Get the isolated CPUs
715             other_workers = node['cpu']['other_workers']
716             vpp_workers = node['cpu']['vpp_workers']
717             if 'vpp_main_core' in node['cpu']:
718                 vpp_main_core = node['cpu']['vpp_main_core']
719             else:
720                 vpp_main_core = 0
721             all_workers = []
722             if other_workers is not None:
723                 all_workers = [other_workers]
724             if vpp_main_core != 0:
725                 all_workers += [(vpp_main_core, vpp_main_core)]
726             all_workers += vpp_workers
727             isolated_cpus = ''
728             for idx, worker in enumerate(all_workers):
729                 if worker is None:
730                     continue
731                 if idx > 0:
732                     isolated_cpus += ','
733                 if worker[0] == worker[1]:
734                     isolated_cpus += "{}".format(worker[0])
735                 else:
736                     isolated_cpus += "{}-{}".format(worker[0], worker[1])
737
738             vppgrb = VppGrubUtil(node)
739             current_cmdline = vppgrb.get_current_cmdline()
740             if 'grub' not in node:
741                 node['grub'] = {}
742             node['grub']['current_cmdline'] = current_cmdline
743             node['grub']['default_cmdline'] = \
744                 vppgrb.apply_cmdline(node, isolated_cpus)
745
746         self.updateconfig()
747
748     def get_hugepages(self):
749         """
750         Get the hugepage configuration
751
752         """
753
754         for i in self._nodes.items():
755             node = i[1]
756
757             hpg = VppHugePageUtil(node)
758             max_map_count, shmmax = hpg.get_huge_page_config()
759             node['hugepages']['max_map_count'] = max_map_count
760             node['hugepages']['shmax'] = shmmax
761             total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
762             node['hugepages']['actual_total'] = total
763             node['hugepages']['free'] = free
764             node['hugepages']['size'] = size
765             node['hugepages']['memtotal'] = memtotal
766             node['hugepages']['memfree'] = memfree
767
768         self.updateconfig()
769
770     def get_grub(self):
771         """
772         Get the grub configuration
773
774         """
775
776         for i in self._nodes.items():
777             node = i[1]
778
779             vppgrb = VppGrubUtil(node)
780             current_cmdline = vppgrb.get_current_cmdline()
781             default_cmdline = vppgrb.get_default_cmdline()
782
783             # Get the total number of isolated CPUs
784             current_iso_cpus = 0
785             iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
786             iso_cpurl = len(iso_cpur)
787             if iso_cpurl > 0:
788                 iso_cpu_str = iso_cpur[0]
789                 iso_cpu_str = iso_cpu_str.split('=')[1]
790                 iso_cpul = iso_cpu_str.split(',')
791                 for iso_cpu in iso_cpul:
792                     isocpuspl = iso_cpu.split('-')
793                     if len(isocpuspl) == 1:
794                         current_iso_cpus += 1
795                     else:
796                         first = int(isocpuspl[0])
797                         second = int(isocpuspl[1])
798                         if first == second:
799                             current_iso_cpus += 1
800                         else:
801                             current_iso_cpus += second - first
802
803             if 'grub' not in node:
804                 node['grub'] = {}
805             node['grub']['current_cmdline'] = current_cmdline
806             node['grub']['default_cmdline'] = default_cmdline
807             node['grub']['current_iso_cpus'] = current_iso_cpus
808
809         self.updateconfig()
810
811     @staticmethod
812     def _get_device(node):
813         """
814         Get the device configuration for a single node
815
816         :param node: Node dictionary with cpuinfo.
817         :type node: dict
818
819         """
820
821         vpp = VppPCIUtil(node)
822         vpp.get_all_devices()
823
824         # Save the device information
825         node['devices'] = {}
826         node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
827         node['devices']['kernel_devices'] = vpp.get_kernel_devices()
828         node['devices']['other_devices'] = vpp.get_other_devices()
829         node['devices']['linkup_devices'] = vpp.get_link_up_devices()
830
831     def get_devices_per_node(self):
832         """
833         Get the device configuration for all the nodes
834
835         """
836
837         for i in self._nodes.items():
838             node = i[1]
839             # Update the interface data
840
841             self._get_device(node)
842
843         self.updateconfig()
844
845     @staticmethod
846     def get_cpu_layout(node):
847         """
848         Get the cpu layout
849
850         using lscpu -p get the cpu layout.
851         Returns a list with each item representing a single cpu.
852
853         :param node: Node dictionary.
854         :type node: dict
855         :returns: The cpu layout
856         :rtype: list
857         """
858
859         cmd = 'lscpu -p'
860         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
861         if ret != 0:
862             raise RuntimeError('{} failed on node {} {}'.
863                                format(cmd, node['host'], stderr))
864
865         pcpus = []
866         lines = stdout.split('\n')
867         for line in lines:
868             if line == '' or line[0] == '#':
869                 continue
870             linesplit = line.split(',')
871             layout = {'cpu': linesplit[0], 'core': linesplit[1],
872                       'socket': linesplit[2], 'node': linesplit[3]}
873
874             # cpu, core, socket, node
875             pcpus.append(layout)
876
877         return pcpus
878
879     def get_cpu(self):
880         """
881         Get the cpu configuration
882
883         """
884
885         # Get the CPU layout
886         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
887
888         for i in self._nodes.items():
889             node = i[1]
890
891             # Get the cpu layout
892             layout = self.get_cpu_layout(node)
893             node['cpu']['layout'] = layout
894
895             cpuinfo = node['cpuinfo']
896             smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
897             node['cpu']['smt_enabled'] = smt_enabled
898
899             # We don't want to write the cpuinfo
900             node['cpuinfo'] = ""
901
902         # Write the config
903         self.updateconfig()
904
905     def discover(self):
906         """
907         Get the current system configuration.
908
909         """
910
911         # Get the Huge Page configuration
912         self.get_hugepages()
913
914         # Get the device configuration
915         self.get_devices_per_node()
916
917         # Get the CPU configuration
918         self.get_cpu()
919
920         # Get the current grub cmdline
921         self.get_grub()
922
923     def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
924         """
925         Ask the user questions related to the cpu configuration.
926
927         :param node: Node dictionary
928         :param total_cpus: The total number of cpus in the system
929         :param numa_nodes: The list of numa nodes in the system
930         :type node: dict
931         :type total_cpus: int
932         :type numa_nodes: list
933         """
934
935         print("\nYour system has {} core(s) and {} Numa Nodes.".
936               format(total_cpus, len(numa_nodes)))
937         print("To begin, we suggest not reserving any cores for "
938               "VPP or other processes.")
939         print("Then to improve performance start reserving cores and "
940               "adding queues as needed.")
941
942         # Leave 1 for the general system
943         total_cpus -= 1
944         max_vpp_cpus = min(total_cpus, 4)
945         total_vpp_cpus = 0
946         if max_vpp_cpus > 0:
947             question = "\nHow many core(s) shall we reserve for " \
948                        "VPP [0-{}][0]? ".format(max_vpp_cpus)
949             total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
950             node['cpu']['total_vpp_cpus'] = total_vpp_cpus
951
952         total_other_cpus = 0
953         max_other_cores = total_cpus - total_vpp_cpus
954         if max_other_cores > 0:
955             question = 'How many core(s) do you want to reserve for ' \
956                        'processes other than VPP? [0-{}][0]? '. format(str(max_other_cores))
957             total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
958             node['cpu']['total_other_cpus'] = total_other_cpus
959
960         max_main_cpus = total_cpus - total_vpp_cpus - total_other_cpus
961         reserve_vpp_main_core = False
962         if max_main_cpus > 0:
963             question = "Should we reserve 1 core for the VPP Main thread? "
964             question += "[y/N]? "
965             answer = self._ask_user_yn(question, 'n')
966             if answer == 'y':
967                 reserve_vpp_main_core = True
968             node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
969             node['cpu']['vpp_main_core'] = 0
970
971         question = "How many RX queues per port shall we use for " \
972                    "VPP [1-4][1]? ".format(max_vpp_cpus)
973         total_rx_queues = self._ask_user_range(question, 1, 4, 1)
974         node['cpu']['total_rx_queues'] = total_rx_queues
975
976     def modify_cpu(self, ask_questions=True):
977         """
978         Modify the cpu configuration, asking for the user for the values.
979
980         :param ask_questions: When true ask the user for config parameters
981
982         """
983
984         # Get the CPU layout
985         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
986
987         for i in self._nodes.items():
988             node = i[1]
989             total_cpus = 0
990             total_cpus_per_slice = 0
991             cpus_per_node = {}
992             numa_nodes = []
993             cores = []
994             cpu_layout = self.get_cpu_layout(node)
995
996             # Assume the number of cpus per slice is always the same as the
997             # first slice
998             first_node = '0'
999             for cpu in cpu_layout:
1000                 if cpu['node'] != first_node:
1001                     break
1002                 total_cpus_per_slice += 1
1003
1004             # Get the total number of cpus, cores, and numa nodes from the
1005             # cpu layout
1006             for cpul in cpu_layout:
1007                 numa_node = cpul['node']
1008                 core = cpul['core']
1009                 cpu = cpul['cpu']
1010                 total_cpus += 1
1011
1012                 if numa_node not in cpus_per_node:
1013                     cpus_per_node[numa_node] = []
1014                 cpuperslice = int(cpu) % total_cpus_per_slice
1015                 if cpuperslice == 0:
1016                     cpus_per_node[numa_node].append((int(cpu), int(cpu) +
1017                                                      total_cpus_per_slice - 1))
1018                 if numa_node not in numa_nodes:
1019                     numa_nodes.append(numa_node)
1020                 if core not in cores:
1021                     cores.append(core)
1022             node['cpu']['cpus_per_node'] = cpus_per_node
1023
1024             # Ask the user some questions
1025             if ask_questions and total_cpus >= 4:
1026                 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1027
1028             # Populate the interfaces with the numa node
1029             if 'interfaces' in node:
1030                 ikeys = node['interfaces'].keys()
1031                 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1032
1033             # We don't want to write the cpuinfo
1034             node['cpuinfo'] = ""
1035
1036         # Write the configs
1037         self._update_auto_config()
1038         self.updateconfig()
1039
1040     def _modify_other_devices(self, node,
1041                               other_devices, kernel_devices, dpdk_devices):
1042         """
1043         Modify the devices configuration, asking for the user for the values.
1044
1045         """
1046
1047         odevices_len = len(other_devices)
1048         if odevices_len > 0:
1049             print("\nThese device(s) are currently NOT being used "
1050                   "by VPP or the OS.\n")
1051             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1052             question = "\nWould you like to give any of these devices"
1053             question += " back to the OS [Y/n]? "
1054             answer = self._ask_user_yn(question, 'Y')
1055             if answer == 'y':
1056                 vppd = {}
1057                 for dit in other_devices.items():
1058                     dvid = dit[0]
1059                     device = dit[1]
1060                     question = "Would you like to use device {} for". \
1061                         format(dvid)
1062                     question += " the OS [y/N]? "
1063                     answer = self._ask_user_yn(question, 'n')
1064                     if answer == 'y':
1065                         if 'unused' in device and len(
1066                                 device['unused']) != 0 and \
1067                                 device['unused'][0] != '':
1068                             driver = device['unused'][0]
1069                             ret = VppPCIUtil.bind_vpp_device(
1070                                 node, driver, dvid)
1071                             if ret:
1072                                 logging.debug(
1073                                     'Could not bind device {}'.format(dvid))
1074                             else:
1075                                 vppd[dvid] = device
1076                 for dit in vppd.items():
1077                     dvid = dit[0]
1078                     device = dit[1]
1079                     kernel_devices[dvid] = device
1080                     del other_devices[dvid]
1081
1082         odevices_len = len(other_devices)
1083         if odevices_len > 0:
1084             print("\nThese device(s) are still NOT being used "
1085                   "by VPP or the OS.\n")
1086             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1087             question = "\nWould you like use any of these for VPP [y/N]? "
1088             answer = self._ask_user_yn(question, 'N')
1089             if answer == 'y':
1090                 vppd = {}
1091                 for dit in other_devices.items():
1092                     dvid = dit[0]
1093                     device = dit[1]
1094                     question = "Would you like to use device {} ".format(dvid)
1095                     question += "for VPP [y/N]? "
1096                     answer = self._ask_user_yn(question, 'n')
1097                     if answer == 'y':
1098                         vppd[dvid] = device
1099                 for dit in vppd.items():
1100                     dvid = dit[0]
1101                     device = dit[1]
1102                     if 'unused' in device and len(device['unused']) != 0 and \
1103                             device['unused'][0] != '':
1104                         driver = device['unused'][0]
1105                         logging.debug(
1106                             'Binding device {} to driver {}'.format(dvid,
1107                                                                     driver))
1108                         ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1109                         if ret:
1110                             logging.debug(
1111                                 'Could not bind device {}'.format(dvid))
1112                         else:
1113                             dpdk_devices[dvid] = device
1114                             del other_devices[dvid]
1115
1116     def update_interfaces_config(self):
1117         """
1118         Modify the interfaces directly from the config file.
1119
1120         """
1121
1122         for i in self._nodes.items():
1123             node = i[1]
1124             devices = node['devices']
1125             all_devices = devices['other_devices']
1126             all_devices.update(devices['dpdk_devices'])
1127             all_devices.update(devices['kernel_devices'])
1128
1129             current_ifcs = {}
1130             interfaces = {}
1131             if 'interfaces' in node:
1132                 current_ifcs = node['interfaces']
1133             if current_ifcs:
1134                 for ifc in current_ifcs.values():
1135                     dvid = ifc['pci_address']
1136                     if dvid in all_devices:
1137                         VppPCIUtil.vpp_create_interface(interfaces, dvid,
1138                                                         all_devices[dvid])
1139             node['interfaces'] = interfaces
1140
1141         self.updateconfig()
1142
1143     def modify_devices(self):
1144         """
1145         Modify the devices configuration, asking for the user for the values.
1146
1147         """
1148
1149         for i in self._nodes.items():
1150             node = i[1]
1151             devices = node['devices']
1152             other_devices = devices['other_devices']
1153             kernel_devices = devices['kernel_devices']
1154             dpdk_devices = devices['dpdk_devices']
1155
1156             if other_devices:
1157                 self._modify_other_devices(node, other_devices,
1158                                            kernel_devices, dpdk_devices)
1159
1160                 # Get the devices again for this node
1161                 self._get_device(node)
1162                 devices = node['devices']
1163                 kernel_devices = devices['kernel_devices']
1164                 dpdk_devices = devices['dpdk_devices']
1165
1166             klen = len(kernel_devices)
1167             if klen > 0:
1168                 print("\nThese devices are safe to be used with VPP.\n")
1169                 VppPCIUtil.show_vpp_devices(kernel_devices)
1170                 question = "\nWould you like to use any of these " \
1171                            "device(s) for VPP [y/N]? "
1172                 answer = self._ask_user_yn(question, 'n')
1173                 if answer == 'y':
1174                     vppd = {}
1175                     for dit in kernel_devices.items():
1176                         dvid = dit[0]
1177                         device = dit[1]
1178                         question = "Would you like to use device {} ".format(dvid)
1179                         question += "for VPP [y/N]? "
1180                         answer = self._ask_user_yn(question, 'n')
1181                         if answer == 'y':
1182                             vppd[dvid] = device
1183                     for dit in vppd.items():
1184                         dvid = dit[0]
1185                         device = dit[1]
1186                         if 'unused' in device and len(
1187                                 device['unused']) != 0 and device['unused'][0] != '':
1188                             driver = device['unused'][0]
1189                             question = "Would you like to bind the driver {} for {} [y/N]? ".format(driver, dvid)
1190                             answer = self._ask_user_yn(question, 'n')
1191                             if answer == 'y':
1192                                 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1193                                 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1194                                 if ret:
1195                                     logging.debug('Could not bind device {}'.format(dvid))
1196                         dpdk_devices[dvid] = device
1197                         del kernel_devices[dvid]
1198
1199             dlen = len(dpdk_devices)
1200             if dlen > 0:
1201                 print("\nThese device(s) are already using DPDK.\n")
1202                 VppPCIUtil.show_vpp_devices(dpdk_devices,
1203                                             show_interfaces=False)
1204                 question = "\nWould you like to remove any of "
1205                 question += "these device(s) [y/N]? "
1206                 answer = self._ask_user_yn(question, 'n')
1207                 if answer == 'y':
1208                     vppdl = {}
1209                     for dit in dpdk_devices.items():
1210                         dvid = dit[0]
1211                         device = dit[1]
1212                         question = "Would you like to remove {} [y/N]? ". \
1213                             format(dvid)
1214                         answer = self._ask_user_yn(question, 'n')
1215                         if answer == 'y':
1216                             vppdl[dvid] = device
1217                     for dit in vppdl.items():
1218                         dvid = dit[0]
1219                         device = dit[1]
1220                         if 'unused' in device and len(
1221                                 device['unused']) != 0 and device['unused'][0] != '':
1222                             driver = device['unused'][0]
1223                             logging.debug(
1224                                 'Binding device {} to driver {}'.format(
1225                                     dvid, driver))
1226                             ret = VppPCIUtil.bind_vpp_device(node, driver,
1227                                                              dvid)
1228                             if ret:
1229                                 logging.debug(
1230                                     'Could not bind device {}'.format(dvid))
1231                             else:
1232                                 kernel_devices[dvid] = device
1233                                 del dpdk_devices[dvid]
1234
1235             interfaces = {}
1236             for dit in dpdk_devices.items():
1237                 dvid = dit[0]
1238                 device = dit[1]
1239                 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1240             node['interfaces'] = interfaces
1241
1242         self._update_auto_config()
1243         self.updateconfig()
1244
1245     def modify_huge_pages(self):
1246         """
1247         Modify the huge page configuration, asking for the user for the values.
1248
1249         """
1250
1251         for i in self._nodes.items():
1252             node = i[1]
1253
1254             total = node['hugepages']['actual_total']
1255             free = node['hugepages']['free']
1256             size = node['hugepages']['size']
1257             memfree = node['hugepages']['memfree'].split(' ')[0]
1258             hugesize = int(size.split(' ')[0])
1259             # The max number of huge pages should be no more than
1260             # 70% of total free memory
1261             maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES // 100) // hugesize
1262             print("\nThere currently {} {} huge pages free.".format(
1263                 free, size))
1264             question = "Do you want to reconfigure the number of " \
1265                        "huge pages [y/N]? "
1266             answer = self._ask_user_yn(question, 'n')
1267             if answer == 'n':
1268                 node['hugepages']['total'] = total
1269                 continue
1270
1271             print("\nThere currently a total of {} huge pages.".
1272                   format(total))
1273             question = "How many huge pages do you want [{} - {}][{}]? ". \
1274                 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1275             answer = self._ask_user_range(question, 1024, maxpages, 1024)
1276             node['hugepages']['total'] = str(answer)
1277
1278         # Update auto-config.yaml
1279         self._update_auto_config()
1280
1281         # Rediscover just the hugepages
1282         self.get_hugepages()
1283
1284     def get_tcp_params(self):
1285         """
1286         Get the tcp configuration
1287
1288         """
1289         # maybe nothing to do here?
1290         self.updateconfig()
1291
1292     def acquire_tcp_params(self):
1293         """
1294         Ask the user for TCP stack configuration parameters
1295
1296         """
1297
1298         for i in self._nodes.items():
1299             node = i[1]
1300
1301             question = "\nHow many active-open / tcp client sessions are " \
1302                        "expected [0-10000000][0]? "
1303             answer = self._ask_user_range(question, 0, 10000000, 0)
1304             # Less than 10K is equivalent to 0
1305             if int(answer) < 10000:
1306                 answer = 0
1307             node['tcp']['active_open_sessions'] = answer
1308
1309             question = "How many passive-open / tcp server sessions are " \
1310                        "expected [0-10000000][0]? "
1311             answer = self._ask_user_range(question, 0, 10000000, 0)
1312             # Less than 10K is equivalent to 0
1313             if int(answer) < 10000:
1314                 answer = 0
1315             node['tcp']['passive_open_sessions'] = answer
1316
1317         # Update auto-config.yaml
1318         self._update_auto_config()
1319
1320         # Rediscover tcp parameters
1321         self.get_tcp_params()
1322
1323     @staticmethod
1324     def patch_qemu(node):
1325         """
1326         Patch qemu with the correct patches.
1327
1328         :param node: Node dictionary
1329         :type node: dict
1330         """
1331
1332         print('\nWe are patching the node "{}":\n'.format(node['host']))
1333         QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1334
1335     @staticmethod
1336     def cpu_info(node):
1337         """
1338         print the CPU information
1339
1340         """
1341
1342         cpu = CpuUtils.get_cpu_info_per_node(node)
1343
1344         item = 'Model name'
1345         if item in cpu:
1346             print("{:>20}:    {}".format(item, cpu[item]))
1347         item = 'CPU(s)'
1348         if item in cpu:
1349             print("{:>20}:    {}".format(item, cpu[item]))
1350         item = 'Thread(s) per core'
1351         if item in cpu:
1352             print("{:>20}:    {}".format(item, cpu[item]))
1353         item = 'Core(s) per socket'
1354         if item in cpu:
1355             print("{:>20}:    {}".format(item, cpu[item]))
1356         item = 'Socket(s)'
1357         if item in cpu:
1358             print("{:>20}:    {}".format(item, cpu[item]))
1359         item = 'NUMA node(s)'
1360         numa_nodes = 0
1361         if item in cpu:
1362             numa_nodes = int(cpu[item])
1363         for i in range(0, numa_nodes):
1364             item = "NUMA node{} CPU(s)".format(i)
1365             print("{:>20}:    {}".format(item, cpu[item]))
1366         item = 'CPU max MHz'
1367         if item in cpu:
1368             print("{:>20}:    {}".format(item, cpu[item]))
1369         item = 'CPU min MHz'
1370         if item in cpu:
1371             print("{:>20}:    {}".format(item, cpu[item]))
1372
1373         if node['cpu']['smt_enabled']:
1374             smt = 'Enabled'
1375         else:
1376             smt = 'Disabled'
1377         print("{:>20}:    {}".format('SMT', smt))
1378
1379         # VPP Threads
1380         print("\nVPP Threads: (Name: Cpu Number)")
1381         vpp_processes = cpu['vpp_processes']
1382         for i in vpp_processes.items():
1383             print("  {:10}: {:4}".format(i[0], i[1]))
1384
1385     @staticmethod
1386     def device_info(node):
1387         """
1388         Show the device information.
1389
1390         """
1391
1392         if 'cpu' in node and 'total_mbufs' in node['cpu']:
1393             total_mbufs = node['cpu']['total_mbufs']
1394             if total_mbufs != 0:
1395                 print("Total Number of Buffers: {}".format(total_mbufs))
1396
1397         vpp = VppPCIUtil(node)
1398         vpp.get_all_devices()
1399         linkup_devs = vpp.get_link_up_devices()
1400         if len(linkup_devs):
1401             print("\nDevices with link up (can not be used with VPP):")
1402             vpp.show_vpp_devices(linkup_devs, show_header=False)
1403             # for dev in linkup_devs:
1404             #    print ("    " + dev)
1405         kernel_devs = vpp.get_kernel_devices()
1406         if len(kernel_devs):
1407             print("\nDevices bound to kernel drivers:")
1408             vpp.show_vpp_devices(kernel_devs, show_header=False)
1409         else:
1410             print("\nNo devices bound to kernel drivers")
1411
1412         dpdk_devs = vpp.get_dpdk_devices()
1413         if len(dpdk_devs):
1414             print("\nDevices bound to DPDK drivers:")
1415             vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1416                                  show_header=False)
1417         else:
1418             print("\nNo devices bound to DPDK drivers")
1419
1420         other_devs = vpp.get_other_devices()
1421         if len(other_devs):
1422             print("\nDevices not bound to Kernel or DPDK drivers:")
1423             vpp.show_vpp_devices(other_devs, show_interfaces=True,
1424                                  show_header=False)
1425         else:
1426             print("\nNo devices not bound to Kernel or DPDK drivers")
1427
1428         vpputl = VPPUtil()
1429         interfaces = vpputl.get_hardware(node)
1430         if interfaces == {}:
1431             return
1432
1433         print("\nDevices in use by VPP:")
1434
1435         if len(interfaces.items()) < 2:
1436             print("None")
1437             return
1438
1439         print("{:30} {:4} {:4} {:7} {:4} {:7}".
1440               format('Name', 'Numa', 'RXQs',
1441                      'RXDescs', 'TXQs', 'TXDescs'))
1442         for intf in sorted(interfaces.items()):
1443             name = intf[0]
1444             value = intf[1]
1445             if name == 'local0':
1446                 continue
1447             numa = rx_qs = rx_ds = tx_qs = tx_ds = ''
1448             if 'numa' in value:
1449                 numa = int(value['numa'])
1450             if 'rx queues' in value:
1451                 rx_qs = int(value['rx queues'])
1452             if 'rx descs' in value:
1453                 rx_ds = int(value['rx descs'])
1454             if 'tx queues' in value:
1455                 tx_qs = int(value['tx queues'])
1456             if 'tx descs' in value:
1457                 tx_ds = int(value['tx descs'])
1458
1459             print("{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".
1460                   format(name, numa, rx_qs, rx_ds, tx_qs, tx_ds))
1461
1462     @staticmethod
1463     def hugepage_info(node):
1464         """
1465         Show the huge page information.
1466
1467         """
1468
1469         hpg = VppHugePageUtil(node)
1470         hpg.show_huge_pages()
1471
1472     @staticmethod
1473     def has_interfaces(node):
1474         """
1475         Check for interfaces, return tru if there is at least one
1476
1477         :returns: boolean
1478         """
1479         if 'interfaces' in node and len(node['interfaces']):
1480             return True
1481         else:
1482             return False
1483
1484     @staticmethod
1485     def min_system_resources(node):
1486         """
1487         Check the system for basic minimum resources, return true if
1488         there is enough.
1489
1490         :returns: boolean
1491         """
1492
1493         min_sys_res = True
1494
1495         # CPUs
1496         if 'layout' in node['cpu']:
1497             total_cpus = len(node['cpu']['layout'])
1498             if total_cpus < 2:
1499                 print("\nThere is only {} CPU(s) available on this system. "
1500                       "This is not enough to run VPP.".format(total_cpus))
1501                 min_sys_res = False
1502
1503         # System Memory
1504         if 'free' in node['hugepages'] and \
1505                 'memfree' in node['hugepages'] and \
1506                 'size' in node['hugepages']:
1507             free = node['hugepages']['free']
1508             memfree = float(node['hugepages']['memfree'].split(' ')[0])
1509             hugesize = float(node['hugepages']['size'].split(' ')[0])
1510
1511             memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1512             percentmemhugepages = (memhugepages / memfree) * 100
1513             if free is '0' and \
1514                     percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1515                 print(
1516                     "\nThe System has only {} of free memory. You will not "
1517                     "be able to allocate enough Huge Pages for VPP.".format(
1518                         int(
1519                             memfree))
1520                 )
1521                 min_sys_res = False
1522
1523         return min_sys_res
1524
1525     def sys_info(self):
1526         """
1527         Print the system information
1528
1529         """
1530
1531         for i in self._nodes.items():
1532             print("\n==============================")
1533             name = i[0]
1534             node = i[1]
1535
1536             print("NODE: {}\n".format(name))
1537
1538             # CPU
1539             print("CPU:")
1540             self.cpu_info(node)
1541
1542             # Grub
1543             print("\nGrub Command Line:")
1544             if 'grub' in node:
1545                 print("  Current: {}".format(
1546                     node['grub']['current_cmdline']))
1547                 print("  Configured: {}".format(
1548                     node['grub']['default_cmdline']))
1549
1550             # Huge Pages
1551             print("\nHuge Pages:")
1552             self.hugepage_info(node)
1553
1554             # Devices
1555             print("\nDevices:")
1556             self.device_info(node)
1557
1558             # Status
1559             print("\nVPP Service Status:")
1560             state, errors = VPPUtil.status(node)
1561             print("  {}".format(state))
1562             for e in errors:
1563                 print("  {}".format(e))
1564
1565             # Minimum system resources
1566             self.min_system_resources(node)
1567
1568             print("\n==============================")
1569
1570     def _ipv4_interface_setup_questions(self, node):
1571         """
1572         Ask the user some questions and get a list of interfaces
1573         and IPv4 addresses associated with those interfaces
1574
1575         :param node: Node dictionary.
1576         :type node: dict
1577         :returns: A list or interfaces with ip addresses
1578         :rtype: dict
1579         """
1580
1581         vpputl = VPPUtil()
1582         interfaces = vpputl.get_hardware(node)
1583         if interfaces == {}:
1584             return
1585
1586         interfaces_with_ip = []
1587         for intf in sorted(interfaces.items()):
1588             name = intf[0]
1589             if name == 'local0':
1590                 continue
1591
1592             question = "Would you like add address to " \
1593                        "interface {} [Y/n]? ".format(name)
1594             answer = self._ask_user_yn(question, 'y')
1595             if answer == 'y':
1596                 address = {}
1597                 addr = self._ask_user_ipv4()
1598                 address['name'] = name
1599                 address['addr'] = addr
1600                 interfaces_with_ip.append(address)
1601
1602         return interfaces_with_ip
1603
1604     def ipv4_interface_setup(self):
1605         """
1606         After asking the user some questions, get a list of interfaces
1607         and IPv4 addresses associated with those interfaces
1608
1609         """
1610
1611         for i in self._nodes.items():
1612             node = i[1]
1613
1614             # Show the current interfaces with IP addresses
1615             current_ints = VPPUtil.get_int_ip(node)
1616             if current_ints != {}:
1617                 print("\nThese are the current interfaces with IP addresses:")
1618                 for items in sorted(current_ints.items()):
1619                     name = items[0]
1620                     value = items[1]
1621                     if 'address' not in value:
1622                         address = 'Not Set'
1623                     else:
1624                         address = value['address']
1625                     print("{:30} {:20} {:10}".format(name, address,
1626                                                      value['state']))
1627                 question = "\nWould you like to keep this configuration " \
1628                            "[Y/n]? "
1629                 answer = self._ask_user_yn(question, 'y')
1630                 if answer == 'y':
1631                     continue
1632             else:
1633                 print("\nThere are currently no interfaces with IP "
1634                       "addresses.")
1635
1636             # Create a script that add the ip addresses to the interfaces
1637             # and brings the interfaces up
1638             ints_with_addrs = self._ipv4_interface_setup_questions(node)
1639             content = ''
1640             for ints in ints_with_addrs:
1641                 name = ints['name']
1642                 addr = ints['addr']
1643                 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1644                 setintupstr = 'set int state {} up\n'.format(name)
1645                 content += setipstr + setintupstr
1646
1647             # Write the content to the script
1648             rootdir = node['rootdir']
1649             filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1650             with open(filename, 'w+') as sfile:
1651                 sfile.write(content)
1652
1653             # Execute the script
1654             cmd = 'vppctl exec {}'.format(filename)
1655             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1656             if ret != 0:
1657                 logging.debug(stderr)
1658
1659             print("\nA script as been created at {}".format(filename))
1660             print("This script can be run using the following:")
1661             print("vppctl exec {}\n".format(filename))
1662
1663     def _create_vints_questions(self, node):
1664         """
1665         Ask the user some questions and get a list of interfaces
1666         and IPv4 addresses associated with those interfaces
1667
1668         :param node: Node dictionary.
1669         :type node: dict
1670         :returns: A list or interfaces with ip addresses
1671         :rtype: list
1672         """
1673
1674         vpputl = VPPUtil()
1675         interfaces = vpputl.get_hardware(node)
1676         if interfaces == {}:
1677             return []
1678
1679         # First delete all the Virtual interfaces
1680         for intf in sorted(interfaces.items()):
1681             name = intf[0]
1682             if name[:7] == 'Virtual':
1683                 cmd = 'vppctl delete vhost-user {}'.format(name)
1684                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1685                 if ret != 0:
1686                     logging.debug('{} failed on node {} {}'.format(
1687                         cmd, node['host'], stderr))
1688
1689         # Create a virtual interface, for each interface the user wants to use
1690         interfaces = vpputl.get_hardware(node)
1691         if interfaces == {}:
1692             return []
1693         interfaces_with_virtual_interfaces = []
1694         inum = 1
1695         for intf in sorted(interfaces.items()):
1696             name = intf[0]
1697             if name == 'local0':
1698                 continue
1699
1700             question = "Would you like connect this interface {} to " \
1701                        "the VM [Y/n]? ".format(name)
1702             answer = self._ask_user_yn(question, 'y')
1703             if answer == 'y':
1704                 sockfilename = '/var/run/vpp/{}.sock'.format(
1705                     name.replace('/', '_'))
1706                 if os.path.exists(sockfilename):
1707                     os.remove(sockfilename)
1708                 cmd = 'vppctl create vhost-user socket {} server'.format(
1709                     sockfilename)
1710                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1711                 if ret != 0:
1712                     raise RuntimeError(
1713                         "Couldn't execute the command {}, {}.".format(cmd,
1714                                                                       stderr))
1715                 vintname = stdout.rstrip('\r\n')
1716
1717                 cmd = 'chmod 777 {}'.format(sockfilename)
1718                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1719                 if ret != 0:
1720                     raise RuntimeError(
1721                         "Couldn't execute the command {}, {}.".format(cmd,
1722                                                                       stderr))
1723
1724                 interface = {'name': name,
1725                              'virtualinterface': '{}'.format(vintname),
1726                              'bridge': '{}'.format(inum)}
1727                 inum += 1
1728                 interfaces_with_virtual_interfaces.append(interface)
1729
1730         return interfaces_with_virtual_interfaces
1731
1732     def create_and_bridge_virtual_interfaces(self):
1733         """
1734         After asking the user some questions, create a VM and connect
1735         the interfaces to VPP interfaces
1736
1737         """
1738
1739         for i in self._nodes.items():
1740             node = i[1]
1741
1742             # Show the current bridge and interface configuration
1743             print("\nThis the current bridge configuration:")
1744             VPPUtil.show_bridge(node)
1745             question = "\nWould you like to keep this configuration [Y/n]? "
1746             answer = self._ask_user_yn(question, 'y')
1747             if answer == 'y':
1748                 continue
1749
1750             # Create a script that builds a bridge configuration with
1751             # physical interfaces and virtual interfaces
1752             ints_with_vints = self._create_vints_questions(node)
1753             content = ''
1754             for intf in ints_with_vints:
1755                 vhoststr = '\n'.join([
1756                     'comment { The following command creates the socket }',
1757                     'comment { and returns a virtual interface }',
1758                     'comment {{ create vhost-user socket '
1759                     '/var/run/vpp/sock{}.sock server }}\n'.format(
1760                         intf['bridge'])
1761                 ])
1762
1763                 setintdnstr = 'set interface state {} down\n'.format(
1764                     intf['name'])
1765
1766                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1767                     intf['name'], intf['bridge'])
1768                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1769                     intf['virtualinterface'], intf['bridge'])
1770
1771                 # set interface state VirtualEthernet/0/0/0 up
1772                 setintvststr = 'set interface state {} up\n'.format(
1773                     intf['virtualinterface'])
1774
1775                 # set interface state VirtualEthernet/0/0/0 down
1776                 setintupstr = 'set interface state {} up\n'.format(
1777                     intf['name'])
1778
1779                 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1780
1781             # Write the content to the script
1782             rootdir = node['rootdir']
1783             filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1784             with open(filename, 'w+') as sfile:
1785                 sfile.write(content)
1786
1787             # Execute the script
1788             cmd = 'vppctl exec {}'.format(filename)
1789             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1790             if ret != 0:
1791                 logging.debug(stderr)
1792
1793             print("\nA script as been created at {}".format(filename))
1794             print("This script can be run using the following:")
1795             print("vppctl exec {}\n".format(filename))
1796
1797     def _iperf_vm_questions(self, node):
1798         """
1799         Ask the user some questions and get a list of interfaces
1800         and IPv4 addresses associated with those interfaces
1801
1802         :param node: Node dictionary.
1803         :type node: dict
1804         :returns: A list or interfaces with ip addresses
1805         :rtype: list
1806         """
1807
1808         vpputl = VPPUtil()
1809         interfaces = vpputl.get_hardware(node)
1810         if interfaces == {}:
1811             return []
1812
1813         # First delete all the Virtual interfaces
1814         for intf in sorted(interfaces.items()):
1815             name = intf[0]
1816             if name[:7] == 'Virtual':
1817                 cmd = 'vppctl delete vhost-user {}'.format(name)
1818                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1819                 if ret != 0:
1820                     logging.debug('{} failed on node {} {}'.format(
1821                         cmd, node['host'], stderr))
1822
1823         # Create a virtual interface, for each interface the user wants to use
1824         interfaces = vpputl.get_hardware(node)
1825         if interfaces == {}:
1826             return []
1827         interfaces_with_virtual_interfaces = []
1828         inum = 1
1829
1830         while True:
1831             print('\nPlease pick one interface to connect to the iperf VM.')
1832             for intf in sorted(interfaces.items()):
1833                 name = intf[0]
1834                 if name == 'local0':
1835                     continue
1836
1837                 question = "Would you like connect this interface {} to " \
1838                            "the VM [y/N]? ".format(name)
1839                 answer = self._ask_user_yn(question, 'n')
1840                 if answer == 'y':
1841                     self._sockfilename = '/var/run/vpp/{}.sock'.format(
1842                         name.replace('/', '_'))
1843                     if os.path.exists(self._sockfilename):
1844                         os.remove(self._sockfilename)
1845                     cmd = 'vppctl create vhost-user socket {} server'.format(
1846                         self._sockfilename)
1847                     (ret, stdout, stderr) = vpputl.exec_command(cmd)
1848                     if ret != 0:
1849                         raise RuntimeError(
1850                             "Couldn't execute the command {}, {}.".format(
1851                                 cmd, stderr))
1852                     vintname = stdout.rstrip('\r\n')
1853
1854                     cmd = 'chmod 777 {}'.format(self._sockfilename)
1855                     (ret, stdout, stderr) = vpputl.exec_command(cmd)
1856                     if ret != 0:
1857                         raise RuntimeError(
1858                             "Couldn't execute the command {}, {}.".format(
1859                                 cmd, stderr))
1860
1861                     interface = {'name': name,
1862                                  'virtualinterface': '{}'.format(vintname),
1863                                  'bridge': '{}'.format(inum)}
1864                     inum += 1
1865                     interfaces_with_virtual_interfaces.append(interface)
1866                     return interfaces_with_virtual_interfaces
1867
1868     def create_and_bridge_iperf_virtual_interface(self):
1869         """
1870         After asking the user some questions, and create and bridge a
1871         virtual interface to be used with iperf VM
1872
1873         """
1874
1875         for i in self._nodes.items():
1876             node = i[1]
1877
1878             # Show the current bridge and interface configuration
1879             print("\nThis the current bridge configuration:")
1880             ifaces = VPPUtil.show_bridge(node)
1881             question = "\nWould you like to keep this configuration [Y/n]? "
1882             answer = self._ask_user_yn(question, 'y')
1883             if answer == 'y':
1884                 self._sockfilename = '/var/run/vpp/{}.sock'.format(
1885                     ifaces[0]['name'].replace('/', '_'))
1886                 if os.path.exists(self._sockfilename):
1887                     continue
1888
1889             # Create a script that builds a bridge configuration with
1890             # physical interfaces and virtual interfaces
1891             ints_with_vints = self._iperf_vm_questions(node)
1892             content = ''
1893             for intf in ints_with_vints:
1894                 vhoststr = '\n'.join([
1895                     'comment { The following command creates the socket }',
1896                     'comment { and returns a virtual interface }',
1897                     'comment {{ create vhost-user socket '
1898                     '/var/run/vpp/sock{}.sock server }}\n'.format(
1899                         intf['bridge'])
1900                 ])
1901
1902                 setintdnstr = 'set interface state {} down\n'.format(
1903                     intf['name'])
1904
1905                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1906                     intf['name'], intf['bridge'])
1907                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1908                     intf['virtualinterface'], intf['bridge'])
1909
1910                 # set interface state VirtualEthernet/0/0/0 up
1911                 setintvststr = 'set interface state {} up\n'.format(
1912                     intf['virtualinterface'])
1913
1914                 # set interface state VirtualEthernet/0/0/0 down
1915                 setintupstr = 'set interface state {} up\n'.format(
1916                     intf['name'])
1917
1918                 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1919
1920             # Write the content to the script
1921             rootdir = node['rootdir']
1922             filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
1923             with open(filename, 'w+') as sfile:
1924                 sfile.write(content)
1925
1926             # Execute the script
1927             cmd = 'vppctl exec {}'.format(filename)
1928             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1929             if ret != 0:
1930                 logging.debug(stderr)
1931
1932             print("\nA script as been created at {}".format(filename))
1933             print("This script can be run using the following:")
1934             print("vppctl exec {}\n".format(filename))
1935
1936     @staticmethod
1937     def destroy_iperf_vm(name):
1938         """
1939         After asking the user some questions, create a VM and connect
1940         the interfaces to VPP interfaces
1941
1942         :param name: The name of the VM to be be destroyed
1943         :type name: str
1944         """
1945
1946         cmd = 'virsh list'
1947         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1948         if ret != 0:
1949             logging.debug(stderr)
1950             raise RuntimeError(
1951                 "Couldn't execute the command {} : {}".format(cmd, stderr))
1952
1953         if re.findall(name, stdout):
1954             cmd = 'virsh destroy {}'.format(name)
1955             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1956             if ret != 0:
1957                 logging.debug(stderr)
1958                 raise RuntimeError(
1959                     "Couldn't execute the command {} : {}".format(
1960                         cmd, stderr))
1961
1962     def create_iperf_vm(self, vmname):
1963         """
1964         After asking the user some questions, create a VM and connect
1965         the interfaces to VPP interfaces
1966
1967         """
1968
1969         # Read the iperf VM template file
1970         distro = VPPUtil.get_linux_distro()
1971         if distro[0] == 'Ubuntu':
1972             tfilename = \
1973                 '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(
1974                     self._rootdir)
1975         else:
1976             tfilename = \
1977                 '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(
1978                     self._rootdir)
1979
1980         with open(tfilename, 'r') as tfile:
1981             tcontents = tfile.read()
1982         tfile.close()
1983
1984         # Add the variables
1985         imagename = '{}/vpp/vpp-config/{}'.format(
1986             self._rootdir, IPERFVM_IMAGE)
1987         isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
1988         tcontents = tcontents.format(vmname=vmname, imagename=imagename,
1989                                      isoname=isoname,
1990                                      vhostsocketname=self._sockfilename)
1991
1992         # Write the xml
1993         ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
1994         with open(ifilename, 'w+') as ifile:
1995             ifile.write(tcontents)
1996         ifile.close()
1997
1998         cmd = 'virsh create {}'.format(ifilename)
1999         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
2000         if ret != 0:
2001             logging.debug(stderr)
2002             raise RuntimeError(
2003                 "Couldn't execute the command {} : {}".format(cmd, stderr))