c4777104c0bc8608deb66ac9168f02a6f6b07df2
[vpp.git] / extras / vpp_config / vpplib / AutoConfig.py
1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library that supports Auto Configuration."""
15 from __future__ import absolute_import, division, print_function
16
17 import logging
18 import os
19 import re
20 from ipaddress import ip_address
21
22 import yaml
23
24 from vpplib.VPPUtil import VPPUtil
25 from vpplib.VppPCIUtil import VppPCIUtil
26 from vpplib.VppHugePageUtil import VppHugePageUtil
27 from vpplib.CpuUtils import CpuUtils
28 from vpplib.VppGrubUtil import VppGrubUtil
29 from vpplib.QemuUtils import QemuUtils
30
31 #  Python2/3 compatible
32 try:
33     input = raw_input  # noqa
34 except NameError:
35     pass
36
37 __all__ = ["AutoConfig"]
38
39 # Constants
40 MIN_SYSTEM_CPUS = 2
41 MIN_TOTAL_HUGE_PAGES = 1024
42 MAX_PERCENT_FOR_HUGE_PAGES = 70
43
44 IPERFVM_XML = 'configs/iperf-vm.xml'
45 IPERFVM_IMAGE = 'images/xenial-mod.img'
46 IPERFVM_ISO = 'configs/cloud-config.iso'
47
48
49 class AutoConfig(object):
50     """Auto Configuration Tools"""
51
52     def __init__(self, rootdir, filename, clean=False):
53         """
54         The Auto Configure class.
55
56         :param rootdir: The root directory for all the auto configuration files
57         :param filename: The autoconfiguration file
58         :param clean: When set initialize the nodes from the auto-config file
59         :type rootdir: str
60         :type filename: str
61         :type clean: bool
62         """
63         self._autoconfig_filename = rootdir + filename
64         self._rootdir = rootdir
65         self._metadata = {}
66         self._nodes = {}
67         self._vpp_devices_node = {}
68         self._hugepage_config = ""
69         self._clean = clean
70         self._loadconfig()
71         self._sockfilename = ""
72
73     def get_nodes(self):
74         """
75         Returns the nodes dictionary.
76
77         :returns: The nodes
78         :rtype: dictionary
79         """
80
81         return self._nodes
82
83     @staticmethod
84     def _autoconfig_backup_file(filename):
85         """
86         Create a backup file.
87
88         :param filename: The file to backup
89         :type filename: str
90         """
91
92         # Does a copy of the file exist, if not create one
93         ofile = filename + '.orig'
94         (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
95         if ret != 0:
96             logging.debug(stderr)
97             if stdout.strip('\n') != ofile:
98                 cmd = 'sudo cp {} {}'.format(filename, ofile)
99                 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
100                 if ret != 0:
101                     logging.debug(stderr)
102
103     # noinspection PyBroadException
104     @staticmethod
105     def _ask_user_ipv4():
106         """
107         Asks the user for a number within a range.
108         default is returned if return is entered.
109
110         :returns: IP address with cidr
111         :rtype: str
112         """
113
114         while True:
115             answer = input("Please enter the IPv4 Address [n.n.n.n/n]: ")
116             try:
117                 ipinput = answer.split('/')
118                 ipaddr = ip_address(ipinput[0])
119                 if len(ipinput) > 1:
120                     plen = answer.split('/')[1]
121                 else:
122                     answer = input("Please enter the netmask [n.n.n.n]: ")
123                     plen = ip_address(answer).netmask_bits()
124                 return '{}/{}'.format(ipaddr, plen)
125             except None:
126                 print("Please enter a valid IPv4 address.")
127
128     @staticmethod
129     def _ask_user_range(question, first, last, default):
130         """
131         Asks the user for a number within a range.
132         default is returned if return is entered.
133
134         :param question: Text of a question.
135         :param first: First number in the range
136         :param last: Last number in the range
137         :param default: The value returned when return is entered
138         :type question: string
139         :type first: int
140         :type last: int
141         :type default: int
142         :returns: The answer to the question
143         :rtype: int
144         """
145
146         while True:
147             answer = input(question)
148             if answer == '':
149                 answer = default
150                 break
151             if re.findall(r'[0-9+]', answer):
152                 if int(answer) in range(first, last + 1):
153                     break
154                 else:
155                     print("Please a value between {} and {} or Return.".
156                           format(first, last))
157             else:
158                 print("Please a number between {} and {} or Return.".
159                       format(first, last))
160
161         return int(answer)
162
163     @staticmethod
164     def _ask_user_yn(question, default):
165         """
166         Asks the user for a yes or no question.
167
168         :param question: Text of a question.
169         :param default: The value returned when return is entered
170         :type question: string
171         :type default: string
172         :returns: The answer to the question
173         :rtype: string
174         """
175
176         input_valid = False
177         default = default.lower()
178         answer = ''
179         while not input_valid:
180             answer = input(question)
181             if answer == '':
182                 answer = default
183             if re.findall(r'[YyNn]', answer):
184                 input_valid = True
185                 answer = answer[0].lower()
186             else:
187                 print("Please answer Y, N or Return.")
188
189         return answer
190
191     def _loadconfig(self):
192         """
193         Load the testbed configuration, given the auto configuration file.
194
195         """
196
197         # Get the Topology, from the topology layout file
198         topo = {}
199         with open(self._autoconfig_filename, 'r') as stream:
200             try:
201                 topo = yaml.load(stream)
202                 if 'metadata' in topo:
203                     self._metadata = topo['metadata']
204             except yaml.YAMLError as exc:
205                 raise RuntimeError(
206                     "Couldn't read the Auto config file {}.".format(
207                         self._autoconfig_filename, exc))
208
209         systemfile = self._rootdir + self._metadata['system_config_file']
210         if self._clean is False and os.path.isfile(systemfile):
211             with open(systemfile, 'r') as sysstream:
212                 try:
213                     systopo = yaml.load(sysstream)
214                     if 'nodes' in systopo:
215                         self._nodes = systopo['nodes']
216                 except yaml.YAMLError as sysexc:
217                     raise RuntimeError(
218                         "Couldn't read the System config file {}.".format(
219                             systemfile, sysexc))
220         else:
221             # Get the nodes from Auto Config
222             if 'nodes' in topo:
223                 self._nodes = topo['nodes']
224
225         # Set the root directory in all the nodes
226         for i in self._nodes.items():
227             node = i[1]
228             node['rootdir'] = self._rootdir
229
230     def updateconfig(self):
231         """
232         Update the testbed configuration, given the auto configuration file.
233         We will write the system configuration file with the current node
234         information
235
236         """
237
238         # Initialize the yaml data
239         ydata = {'metadata': self._metadata, 'nodes': self._nodes}
240
241         # Write the system config file
242         filename = self._rootdir + self._metadata['system_config_file']
243         with open(filename, 'w') as yamlfile:
244             yaml.dump(ydata, yamlfile)
245
246     def _update_auto_config(self):
247         """
248         Write the auto configuration file with the new configuration data,
249         input from the user.
250
251         """
252
253         # Initialize the yaml data
254         nodes = {}
255         with open(self._autoconfig_filename, 'r') as stream:
256             try:
257                 ydata = yaml.load(stream)
258                 if 'nodes' in ydata:
259                     nodes = ydata['nodes']
260             except yaml.YAMLError as exc:
261                 print(exc)
262                 return
263
264         for i in nodes.items():
265             key = i[0]
266             node = i[1]
267
268             # Interfaces
269             node['interfaces'] = {}
270             for item in self._nodes[key]['interfaces'].items():
271                 port = item[0]
272                 interface = item[1]
273
274                 node['interfaces'][port] = {}
275                 addr = '{}'.format(interface['pci_address'])
276                 node['interfaces'][port]['pci_address'] = addr
277                 if 'mac_address' in interface:
278                     node['interfaces'][port]['mac_address'] = \
279                         interface['mac_address']
280
281             if 'total_other_cpus' in self._nodes[key]['cpu']:
282                 node['cpu']['total_other_cpus'] = \
283                     self._nodes[key]['cpu']['total_other_cpus']
284             if 'total_vpp_cpus' in self._nodes[key]['cpu']:
285                 node['cpu']['total_vpp_cpus'] = \
286                     self._nodes[key]['cpu']['total_vpp_cpus']
287             if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
288                 node['cpu']['reserve_vpp_main_core'] = \
289                     self._nodes[key]['cpu']['reserve_vpp_main_core']
290
291             # TCP
292             if 'active_open_sessions' in self._nodes[key]['tcp']:
293                 node['tcp']['active_open_sessions'] = \
294                     self._nodes[key]['tcp']['active_open_sessions']
295             if 'passive_open_sessions' in self._nodes[key]['tcp']:
296                 node['tcp']['passive_open_sessions'] = \
297                     self._nodes[key]['tcp']['passive_open_sessions']
298
299             # Huge pages
300             node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
301
302         # Write the auto config config file
303         with open(self._autoconfig_filename, 'w') as yamlfile:
304             yaml.dump(ydata, yamlfile)
305
306     def apply_huge_pages(self):
307         """
308         Apply the huge page config
309
310         """
311
312         for i in self._nodes.items():
313             node = i[1]
314
315             hpg = VppHugePageUtil(node)
316             hpg.hugepages_dryrun_apply()
317
318     @staticmethod
319     def _apply_vpp_unix(node):
320         """
321         Apply the VPP Unix config
322
323         :param node: Node dictionary with cpuinfo.
324         :type node: dict
325         """
326
327         unix = '  nodaemon\n'
328         if 'unix' not in node['vpp']:
329             return ''
330
331         unixv = node['vpp']['unix']
332         if 'interactive' in unixv:
333             interactive = unixv['interactive']
334             if interactive is True:
335                 unix = '  interactive\n'
336
337         return unix.rstrip('\n')
338
339     @staticmethod
340     def _apply_vpp_cpu(node):
341         """
342         Apply the VPP cpu config
343
344         :param node: Node dictionary with cpuinfo.
345         :type node: dict
346         """
347
348         # Get main core
349         cpu = '\n'
350         if 'vpp_main_core' in node['cpu']:
351             vpp_main_core = node['cpu']['vpp_main_core']
352         else:
353             vpp_main_core = 0
354         if vpp_main_core != 0:
355             cpu += '  main-core {}\n'.format(vpp_main_core)
356
357         # Get workers
358         vpp_workers = node['cpu']['vpp_workers']
359         vpp_worker_len = len(vpp_workers)
360         if vpp_worker_len > 0:
361             vpp_worker_str = ''
362             for i, worker in enumerate(vpp_workers):
363                 if i > 0:
364                     vpp_worker_str += ','
365                 if worker[0] == worker[1]:
366                     vpp_worker_str += "{}".format(worker[0])
367                 else:
368                     vpp_worker_str += "{}-{}".format(worker[0], worker[1])
369
370             cpu += '  corelist-workers {}\n'.format(vpp_worker_str)
371
372         return cpu
373
374     @staticmethod
375     def _apply_vpp_devices(node):
376         """
377         Apply VPP PCI Device configuration to vpp startup.
378
379         :param node: Node dictionary with cpuinfo.
380         :type node: dict
381         """
382
383         devices = ''
384         ports_per_numa = node['cpu']['ports_per_numa']
385         total_mbufs = node['cpu']['total_mbufs']
386
387         for item in ports_per_numa.items():
388             value = item[1]
389             interfaces = value['interfaces']
390
391             # if 0 was specified for the number of vpp workers, use 1 queue
392             num_rx_queues = None
393             num_tx_queues = None
394             if 'rx_queues' in value:
395                 num_rx_queues = value['rx_queues']
396             if 'tx_queues' in value:
397                 num_tx_queues = value['tx_queues']
398
399             num_rx_desc = None
400             num_tx_desc = None
401
402             # Create the devices string
403             for interface in interfaces:
404                 pci_address = interface['pci_address']
405                 pci_address = pci_address.lstrip("'").rstrip("'")
406                 devices += '\n'
407                 devices += '  dev {} {{ \n'.format(pci_address)
408                 if num_rx_queues:
409                     devices += '    num-rx-queues {}\n'.format(num_rx_queues)
410                 else:
411                     devices += '    num-rx-queues {}\n'.format(1)
412                 if num_tx_queues:
413                     devices += '    num-tx-queues {}\n'.format(num_tx_queues)
414                 if num_rx_desc:
415                     devices += '    num-rx-desc {}\n'.format(num_rx_desc)
416                 if num_tx_desc:
417                     devices += '    num-tx-desc {}\n'.format(num_tx_desc)
418                 devices += '  }'
419
420         # If the total mbufs is not 0 or less than the default, set num-bufs
421         logging.debug("Total mbufs: {}".format(total_mbufs))
422         if total_mbufs != 0 and total_mbufs > 16384:
423             devices += '\n  num-mbufs {}'.format(total_mbufs)
424
425         return devices
426
427     @staticmethod
428     def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end,
429                           total_vpp_workers,
430                           reserve_vpp_main_core):
431         """
432         Calculate the VPP worker information
433
434         :param node: Node dictionary
435         :param vpp_workers: List of VPP workers
436         :param numa_node: Numa node
437         :param other_cpus_end: The end of the cpus allocated for cores
438         other than vpp
439         :param total_vpp_workers: The number of vpp workers needed
440         :param reserve_vpp_main_core: Is there a core needed for
441         the vpp main core
442         :type node: dict
443         :type numa_node: int
444         :type other_cpus_end: int
445         :type total_vpp_workers: int
446         :type reserve_vpp_main_core: bool
447         :returns: Is a core still needed for the vpp main core
448         :rtype: bool
449         """
450
451         # Can we fit the workers in one of these slices
452         cpus = node['cpu']['cpus_per_node'][numa_node]
453         for cpu in cpus:
454             start = cpu[0]
455             end = cpu[1]
456             if start <= other_cpus_end:
457                 start = other_cpus_end + 1
458
459             if reserve_vpp_main_core:
460                 start += 1
461
462             workers_end = start + total_vpp_workers - 1
463
464             if workers_end <= end:
465                 if reserve_vpp_main_core:
466                     node['cpu']['vpp_main_core'] = start - 1
467                 reserve_vpp_main_core = False
468                 if total_vpp_workers:
469                     vpp_workers.append((start, workers_end))
470                 break
471
472         # We still need to reserve the main core
473         if reserve_vpp_main_core:
474             node['cpu']['vpp_main_core'] = other_cpus_end + 1
475
476         return reserve_vpp_main_core
477
478     @staticmethod
479     def _calc_desc_and_queues(total_numa_nodes,
480                               total_ports_per_numa,
481                               total_rx_queues,
482                               ports_per_numa_value):
483         """
484         Calculate the number of descriptors and queues
485
486         :param total_numa_nodes: The total number of numa nodes
487         :param total_ports_per_numa: The total number of ports for this
488         numa node
489         :param total_rx_queues: The total number of rx queues / port
490         :param ports_per_numa_value: The value from the ports_per_numa
491         dictionary
492         :type total_numa_nodes: int
493         :type total_ports_per_numa: int
494         :type total_rx_queues: int
495         :type ports_per_numa_value: dict
496         :returns The total number of message buffers
497         :rtype: int
498         """
499
500         # Get the number of rx queues
501         rx_queues = max(1, total_rx_queues)
502         tx_queues = rx_queues * total_numa_nodes + 1
503
504         # Get the descriptor entries
505         desc_entries = 1024
506         ports_per_numa_value['rx_queues'] = rx_queues
507         total_mbufs = (((rx_queues * desc_entries) +
508                         (tx_queues * desc_entries)) *
509                        total_ports_per_numa)
510         total_mbufs = total_mbufs
511
512         return total_mbufs
513
514     @staticmethod
515     def _create_ports_per_numa(node, interfaces):
516         """
517         Create a dictionary or ports per numa node
518         :param node: Node dictionary
519         :param interfaces: All the interfaces to be used by vpp
520         :type node: dict
521         :type interfaces: dict
522         :returns: The ports per numa dictionary
523         :rtype: dict
524         """
525
526         # Make a list of ports by numa node
527         ports_per_numa = {}
528         for item in interfaces.items():
529             i = item[1]
530             if i['numa_node'] not in ports_per_numa:
531                 ports_per_numa[i['numa_node']] = {'interfaces': []}
532                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
533             else:
534                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
535         node['cpu']['ports_per_numa'] = ports_per_numa
536
537         return ports_per_numa
538
539     def calculate_cpu_parameters(self):
540         """
541         Calculate the cpu configuration.
542
543         """
544
545         # Calculate the cpu parameters, needed for the
546         # vpp_startup and grub configuration
547         for i in self._nodes.items():
548             node = i[1]
549
550             # get total number of nic ports
551             interfaces = node['interfaces']
552
553             # Make a list of ports by numa node
554             ports_per_numa = self._create_ports_per_numa(node, interfaces)
555
556             # Get the number of cpus to skip, we never use the first cpu
557             other_cpus_start = 1
558             other_cpus_end = other_cpus_start + \
559                 node['cpu']['total_other_cpus'] - 1
560             other_workers = None
561             if other_cpus_end != 0:
562                 other_workers = (other_cpus_start, other_cpus_end)
563             node['cpu']['other_workers'] = other_workers
564
565             # Allocate the VPP main core and workers
566             vpp_workers = []
567             reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
568             total_vpp_cpus = node['cpu']['total_vpp_cpus']
569             total_rx_queues = node['cpu']['total_rx_queues']
570
571             # If total_vpp_cpus is 0 or is less than the numa nodes with ports
572             #  then we shouldn't get workers
573             total_workers_node = 0
574             if len(ports_per_numa):
575                 total_workers_node = total_vpp_cpus // len(ports_per_numa)
576             total_main = 0
577             if reserve_vpp_main_core:
578                 total_main = 1
579             total_mbufs = 0
580             if total_main + total_workers_node != 0:
581                 for item in ports_per_numa.items():
582                     numa_node = item[0]
583                     value = item[1]
584
585                     # Get the number of descriptors and queues
586                     mbufs = self._calc_desc_and_queues(
587                         len(ports_per_numa),
588                         len(value['interfaces']), total_rx_queues, value)
589                     total_mbufs += mbufs
590
591                     # Get the VPP workers
592                     reserve_vpp_main_core = self._calc_vpp_workers(
593                         node, vpp_workers, numa_node,
594                         other_cpus_end, total_workers_node,
595                         reserve_vpp_main_core)
596
597                 total_mbufs *= 2.5
598                 total_mbufs = int(total_mbufs)
599             else:
600                 total_mbufs = 0
601
602             # Save the info
603             node['cpu']['vpp_workers'] = vpp_workers
604             node['cpu']['total_mbufs'] = total_mbufs
605
606         # Write the config
607         self.updateconfig()
608
609     @staticmethod
610     def _apply_vpp_tcp(node):
611         """
612         Apply the VPP Unix config
613
614         :param node: Node dictionary with cpuinfo.
615         :type node: dict
616         """
617
618         active_open_sessions = node['tcp']['active_open_sessions']
619         aos = int(active_open_sessions)
620
621         passive_open_sessions = node['tcp']['passive_open_sessions']
622         pos = int(passive_open_sessions)
623
624         # Generate the api-segment gid vpp sheit in any case
625         if (aos + pos) == 0:
626             tcp = '\n'.join([
627                 "api-segment {",
628                 "  gid vpp",
629                 "}"
630             ])
631             return tcp.rstrip('\n')
632
633         tcp = '\n'.join([
634             "# TCP stack-related configuration parameters",
635             "# expecting {:d} client sessions, {:d} server sessions\n".format(
636                 aos, pos),
637             "heapsize 4g\n",
638             "api-segment {",
639             "  global-size 2000M",
640             "  api-size 1G",
641             "}\n",
642
643             "session {",
644             "  event-queue-length {:d}".format(aos + pos),
645             "  preallocated-sessions {:d}".format(aos + pos),
646             "  v4-session-table-buckets {:d}".format((aos + pos) // 4),
647             "  v4-session-table-memory 3g\n"
648         ])
649         if aos > 0:
650             tcp = tcp + "  v4-halfopen-table-buckets {:d}".format(
651                 (aos + pos) // 4) + "\n"
652             tcp = tcp + "  v4-halfopen-table-memory 3g\n"
653             tcp = tcp + "  local-endpoints-table-buckets {:d}".format(
654                 (aos + pos) // 4) + "\n"
655             tcp = tcp + "  local-endpoints-table-memory 3g\n"
656         tcp = tcp + "}\n\n"
657
658         tcp = tcp + "tcp {\n"
659         tcp = tcp + "  preallocated-connections {:d}".format(aos + pos) + "\n"
660         if aos > 0:
661             tcp = tcp + "  preallocated-half-open-connections {:d}".format(
662                 aos) + "\n"
663         tcp = tcp + "}\n\n"
664
665         return tcp.rstrip('\n')
666
667     def apply_vpp_startup(self):
668         """
669         Apply the vpp startup configration
670
671         """
672
673         # Apply the VPP startup configruation
674         for i in self._nodes.items():
675             node = i[1]
676
677             # Get the startup file
678             rootdir = node['rootdir']
679             sfile = rootdir + node['vpp']['startup_config_file']
680
681             # Get the devices
682             devices = self._apply_vpp_devices(node)
683
684             # Get the CPU config
685             cpu = self._apply_vpp_cpu(node)
686
687             # Get the unix config
688             unix = self._apply_vpp_unix(node)
689
690             # Get the TCP configuration, if any
691             tcp = self._apply_vpp_tcp(node)
692
693             # Make a backup if needed
694             self._autoconfig_backup_file(sfile)
695
696             # Get the template
697             tfile = sfile + '.template'
698             (ret, stdout, stderr) = \
699                 VPPUtil.exec_command('cat {}'.format(tfile))
700             if ret != 0:
701                 raise RuntimeError('Executing cat command failed to node {}'.
702                                    format(node['host']))
703             startup = stdout.format(unix=unix,
704                                     cpu=cpu,
705                                     devices=devices,
706                                     tcp=tcp)
707
708             (ret, stdout, stderr) = \
709                 VPPUtil.exec_command('rm {}'.format(sfile))
710             if ret != 0:
711                 logging.debug(stderr)
712
713             cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
714             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
715             if ret != 0:
716                 raise RuntimeError('Writing config failed node {}'.
717                                    format(node['host']))
718
719     def apply_grub_cmdline(self):
720         """
721         Apply the grub cmdline
722
723         """
724
725         for i in self._nodes.items():
726             node = i[1]
727
728             # Get the isolated CPUs
729             other_workers = node['cpu']['other_workers']
730             vpp_workers = node['cpu']['vpp_workers']
731             if 'vpp_main_core' in node['cpu']:
732                 vpp_main_core = node['cpu']['vpp_main_core']
733             else:
734                 vpp_main_core = 0
735             all_workers = []
736             if other_workers is not None:
737                 all_workers = [other_workers]
738             if vpp_main_core != 0:
739                 all_workers += [(vpp_main_core, vpp_main_core)]
740             all_workers += vpp_workers
741             isolated_cpus = ''
742             for idx, worker in enumerate(all_workers):
743                 if worker is None:
744                     continue
745                 if idx > 0:
746                     isolated_cpus += ','
747                 if worker[0] == worker[1]:
748                     isolated_cpus += "{}".format(worker[0])
749                 else:
750                     isolated_cpus += "{}-{}".format(worker[0], worker[1])
751
752             vppgrb = VppGrubUtil(node)
753             current_cmdline = vppgrb.get_current_cmdline()
754             if 'grub' not in node:
755                 node['grub'] = {}
756             node['grub']['current_cmdline'] = current_cmdline
757             node['grub']['default_cmdline'] = \
758                 vppgrb.apply_cmdline(node, isolated_cpus)
759
760         self.updateconfig()
761
762     def get_hugepages(self):
763         """
764         Get the hugepage configuration
765
766         """
767
768         for i in self._nodes.items():
769             node = i[1]
770
771             hpg = VppHugePageUtil(node)
772             max_map_count, shmmax = hpg.get_huge_page_config()
773             node['hugepages']['max_map_count'] = max_map_count
774             node['hugepages']['shmax'] = shmmax
775             total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
776             node['hugepages']['actual_total'] = total
777             node['hugepages']['free'] = free
778             node['hugepages']['size'] = size
779             node['hugepages']['memtotal'] = memtotal
780             node['hugepages']['memfree'] = memfree
781
782         self.updateconfig()
783
784     def get_grub(self):
785         """
786         Get the grub configuration
787
788         """
789
790         for i in self._nodes.items():
791             node = i[1]
792
793             vppgrb = VppGrubUtil(node)
794             current_cmdline = vppgrb.get_current_cmdline()
795             default_cmdline = vppgrb.get_default_cmdline()
796
797             # Get the total number of isolated CPUs
798             current_iso_cpus = 0
799             iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
800             iso_cpurl = len(iso_cpur)
801             if iso_cpurl > 0:
802                 iso_cpu_str = iso_cpur[0]
803                 iso_cpu_str = iso_cpu_str.split('=')[1]
804                 iso_cpul = iso_cpu_str.split(',')
805                 for iso_cpu in iso_cpul:
806                     isocpuspl = iso_cpu.split('-')
807                     if len(isocpuspl) == 1:
808                         current_iso_cpus += 1
809                     else:
810                         first = int(isocpuspl[0])
811                         second = int(isocpuspl[1])
812                         if first == second:
813                             current_iso_cpus += 1
814                         else:
815                             current_iso_cpus += second - first
816
817             if 'grub' not in node:
818                 node['grub'] = {}
819             node['grub']['current_cmdline'] = current_cmdline
820             node['grub']['default_cmdline'] = default_cmdline
821             node['grub']['current_iso_cpus'] = current_iso_cpus
822
823         self.updateconfig()
824
825     @staticmethod
826     def _get_device(node):
827         """
828         Get the device configuration for a single node
829
830         :param node: Node dictionary with cpuinfo.
831         :type node: dict
832
833         """
834
835         vpp = VppPCIUtil(node)
836         vpp.get_all_devices()
837
838         # Save the device information
839         node['devices'] = {}
840         node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
841         node['devices']['kernel_devices'] = vpp.get_kernel_devices()
842         node['devices']['other_devices'] = vpp.get_other_devices()
843         node['devices']['linkup_devices'] = vpp.get_link_up_devices()
844
845     def get_devices_per_node(self):
846         """
847         Get the device configuration for all the nodes
848
849         """
850
851         for i in self._nodes.items():
852             node = i[1]
853             # Update the interface data
854
855             self._get_device(node)
856
857         self.updateconfig()
858
859     @staticmethod
860     def get_cpu_layout(node):
861         """
862         Get the cpu layout
863
864         using lscpu -p get the cpu layout.
865         Returns a list with each item representing a single cpu.
866
867         :param node: Node dictionary.
868         :type node: dict
869         :returns: The cpu layout
870         :rtype: list
871         """
872
873         cmd = 'lscpu -p'
874         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
875         if ret != 0:
876             raise RuntimeError('{} failed on node {} {}'.
877                                format(cmd, node['host'], stderr))
878
879         pcpus = []
880         lines = stdout.split('\n')
881         for line in lines:
882             if line == '' or line[0] == '#':
883                 continue
884             linesplit = line.split(',')
885             layout = {'cpu': linesplit[0], 'core': linesplit[1],
886                       'socket': linesplit[2], 'node': linesplit[3]}
887
888             # cpu, core, socket, node
889             pcpus.append(layout)
890
891         return pcpus
892
893     def get_cpu(self):
894         """
895         Get the cpu configuration
896
897         """
898
899         # Get the CPU layout
900         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
901
902         for i in self._nodes.items():
903             node = i[1]
904
905             # Get the cpu layout
906             layout = self.get_cpu_layout(node)
907             node['cpu']['layout'] = layout
908
909             cpuinfo = node['cpuinfo']
910             smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
911             node['cpu']['smt_enabled'] = smt_enabled
912
913             # We don't want to write the cpuinfo
914             node['cpuinfo'] = ""
915
916         # Write the config
917         self.updateconfig()
918
919     def discover(self):
920         """
921         Get the current system configuration.
922
923         """
924
925         # Get the Huge Page configuration
926         self.get_hugepages()
927
928         # Get the device configuration
929         self.get_devices_per_node()
930
931         # Get the CPU configuration
932         self.get_cpu()
933
934         # Get the current grub cmdline
935         self.get_grub()
936
937     def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
938         """
939         Ask the user questions related to the cpu configuration.
940
941         :param node: Node dictionary
942         :param total_cpus: The total number of cpus in the system
943         :param numa_nodes: The list of numa nodes in the system
944         :type node: dict
945         :type total_cpus: int
946         :type numa_nodes: list
947         """
948
949         print("\nYour system has {} core(s) and {} Numa Nodes.".
950               format(total_cpus, len(numa_nodes)))
951         print("To begin, we suggest not reserving any cores for "
952               "VPP or other processes.")
953         print("Then to improve performance start reserving cores and "
954               "adding queues as needed.")
955
956         # Leave 1 for the general system
957         total_cpus -= 1
958         max_vpp_cpus = min(total_cpus, 4)
959         total_vpp_cpus = 0
960         if max_vpp_cpus > 0:
961             question = "\nHow many core(s) shall we reserve for " \
962                        "VPP [0-{}][0]? ".format(max_vpp_cpus)
963             total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
964             node['cpu']['total_vpp_cpus'] = total_vpp_cpus
965
966         total_other_cpus = 0
967         max_other_cores = total_cpus - total_vpp_cpus
968         if max_other_cores > 0:
969             question = 'How many core(s) do you want to reserve for ' \
970                        'processes other than VPP? [0-{}][0]? '. \
971                        format(str(max_other_cores))
972             total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
973             node['cpu']['total_other_cpus'] = total_other_cpus
974
975         max_main_cpus = total_cpus - total_vpp_cpus - total_other_cpus
976         reserve_vpp_main_core = False
977         if max_main_cpus > 0:
978             question = "Should we reserve 1 core for the VPP Main thread? "
979             question += "[y/N]? "
980             answer = self._ask_user_yn(question, 'n')
981             if answer == 'y':
982                 reserve_vpp_main_core = True
983             node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
984             node['cpu']['vpp_main_core'] = 0
985
986         question = "How many RX queues per port shall we use for " \
987                    "VPP [1-4][1]? ".format(max_vpp_cpus)
988         total_rx_queues = self._ask_user_range(question, 1, 4, 1)
989         node['cpu']['total_rx_queues'] = total_rx_queues
990
991     def modify_cpu(self, ask_questions=True):
992         """
993         Modify the cpu configuration, asking for the user for the values.
994
995         :param ask_questions: When true ask the user for config parameters
996
997         """
998
999         # Get the CPU layout
1000         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
1001
1002         for i in self._nodes.items():
1003             node = i[1]
1004             total_cpus = 0
1005             total_cpus_per_slice = 0
1006             cpus_per_node = {}
1007             numa_nodes = []
1008             cores = []
1009             cpu_layout = self.get_cpu_layout(node)
1010
1011             # Assume the number of cpus per slice is always the same as the
1012             # first slice
1013             first_node = '0'
1014             for cpu in cpu_layout:
1015                 if cpu['node'] != first_node:
1016                     break
1017                 total_cpus_per_slice += 1
1018
1019             # Get the total number of cpus, cores, and numa nodes from the
1020             # cpu layout
1021             for cpul in cpu_layout:
1022                 numa_node = cpul['node']
1023                 core = cpul['core']
1024                 cpu = cpul['cpu']
1025                 total_cpus += 1
1026
1027                 if numa_node not in cpus_per_node:
1028                     cpus_per_node[numa_node] = []
1029                 cpuperslice = int(cpu) % total_cpus_per_slice
1030                 if cpuperslice == 0:
1031                     cpus_per_node[numa_node].append((int(cpu), int(cpu) +
1032                                                      total_cpus_per_slice - 1))
1033                 if numa_node not in numa_nodes:
1034                     numa_nodes.append(numa_node)
1035                 if core not in cores:
1036                     cores.append(core)
1037             node['cpu']['cpus_per_node'] = cpus_per_node
1038
1039             # Ask the user some questions
1040             if ask_questions and total_cpus >= 4:
1041                 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1042
1043             # Populate the interfaces with the numa node
1044             if 'interfaces' in node:
1045                 ikeys = node['interfaces'].keys()
1046                 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1047
1048             # We don't want to write the cpuinfo
1049             node['cpuinfo'] = ""
1050
1051         # Write the configs
1052         self._update_auto_config()
1053         self.updateconfig()
1054
1055     def _modify_other_devices(self, node,
1056                               other_devices, kernel_devices, dpdk_devices):
1057         """
1058         Modify the devices configuration, asking for the user for the values.
1059
1060         """
1061
1062         odevices_len = len(other_devices)
1063         if odevices_len > 0:
1064             print("\nThese device(s) are currently NOT being used "
1065                   "by VPP or the OS.\n")
1066             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1067             question = "\nWould you like to give any of these devices"
1068             question += " back to the OS [Y/n]? "
1069             answer = self._ask_user_yn(question, 'Y')
1070             if answer == 'y':
1071                 vppd = {}
1072                 for dit in other_devices.items():
1073                     dvid = dit[0]
1074                     device = dit[1]
1075                     question = "Would you like to use device {} for". \
1076                         format(dvid)
1077                     question += " the OS [y/N]? "
1078                     answer = self._ask_user_yn(question, 'n')
1079                     if answer == 'y':
1080                         if 'unused' in device and len(
1081                                 device['unused']) != 0 and \
1082                                 device['unused'][0] != '':
1083                             driver = device['unused'][0]
1084                             ret = VppPCIUtil.bind_vpp_device(
1085                                 node, driver, dvid)
1086                             if ret:
1087                                 logging.debug(
1088                                     'Could not bind device {}'.format(dvid))
1089                             else:
1090                                 vppd[dvid] = device
1091                 for dit in vppd.items():
1092                     dvid = dit[0]
1093                     device = dit[1]
1094                     kernel_devices[dvid] = device
1095                     del other_devices[dvid]
1096
1097         odevices_len = len(other_devices)
1098         if odevices_len > 0:
1099             print("\nThese device(s) are still NOT being used "
1100                   "by VPP or the OS.\n")
1101             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1102             question = "\nWould you like use any of these for VPP [y/N]? "
1103             answer = self._ask_user_yn(question, 'N')
1104             if answer == 'y':
1105                 vppd = {}
1106                 for dit in other_devices.items():
1107                     dvid = dit[0]
1108                     device = dit[1]
1109                     question = "Would you like to use device {} ".format(dvid)
1110                     question += "for VPP [y/N]? "
1111                     answer = self._ask_user_yn(question, 'n')
1112                     if answer == 'y':
1113                         vppd[dvid] = device
1114                 for dit in vppd.items():
1115                     dvid = dit[0]
1116                     device = dit[1]
1117                     if 'unused' in device and len(device['unused']) != 0 and \
1118                             device['unused'][0] != '':
1119                         driver = device['unused'][0]
1120                         logging.debug(
1121                             'Binding device {} to driver {}'.format(dvid,
1122                                                                     driver))
1123                         ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1124                         if ret:
1125                             logging.debug(
1126                                 'Could not bind device {}'.format(dvid))
1127                         else:
1128                             dpdk_devices[dvid] = device
1129                             del other_devices[dvid]
1130
1131     def update_interfaces_config(self):
1132         """
1133         Modify the interfaces directly from the config file.
1134
1135         """
1136
1137         for i in self._nodes.items():
1138             node = i[1]
1139             devices = node['devices']
1140             all_devices = devices['other_devices']
1141             all_devices.update(devices['dpdk_devices'])
1142             all_devices.update(devices['kernel_devices'])
1143
1144             current_ifcs = {}
1145             interfaces = {}
1146             if 'interfaces' in node:
1147                 current_ifcs = node['interfaces']
1148             if current_ifcs:
1149                 for ifc in current_ifcs.values():
1150                     dvid = ifc['pci_address']
1151                     if dvid in all_devices:
1152                         VppPCIUtil.vpp_create_interface(interfaces, dvid,
1153                                                         all_devices[dvid])
1154             node['interfaces'] = interfaces
1155
1156         self.updateconfig()
1157
1158     def modify_devices(self):
1159         """
1160         Modify the devices configuration, asking for the user for the values.
1161
1162         """
1163
1164         for i in self._nodes.items():
1165             node = i[1]
1166             devices = node['devices']
1167             other_devices = devices['other_devices']
1168             kernel_devices = devices['kernel_devices']
1169             dpdk_devices = devices['dpdk_devices']
1170
1171             if other_devices:
1172                 self._modify_other_devices(node, other_devices,
1173                                            kernel_devices, dpdk_devices)
1174
1175                 # Get the devices again for this node
1176                 self._get_device(node)
1177                 devices = node['devices']
1178                 kernel_devices = devices['kernel_devices']
1179                 dpdk_devices = devices['dpdk_devices']
1180
1181             klen = len(kernel_devices)
1182             if klen > 0:
1183                 print("\nThese devices are safe to be used with VPP.\n")
1184                 VppPCIUtil.show_vpp_devices(kernel_devices)
1185                 question = "\nWould you like to use any of these " \
1186                            "device(s) for VPP [y/N]? "
1187                 answer = self._ask_user_yn(question, 'n')
1188                 if answer == 'y':
1189                     vppd = {}
1190                     for dit in kernel_devices.items():
1191                         dvid = dit[0]
1192                         device = dit[1]
1193                         question = "Would you like to use device {} ".format(dvid)
1194                         question += "for VPP [y/N]? "
1195                         answer = self._ask_user_yn(question, 'n')
1196                         if answer == 'y':
1197                             vppd[dvid] = device
1198                     for dit in vppd.items():
1199                         dvid = dit[0]
1200                         device = dit[1]
1201                         if 'unused' in device and len(
1202                                 device['unused']) != 0 and device['unused'][
1203                                 0] != '':
1204                             driver = device['unused'][0]
1205                             question = "Would you like to bind the driver {} for {} [y/N]? ".format(driver, dvid)
1206                             answer = self._ask_user_yn(question, 'n')
1207                             if answer == 'y':
1208                                 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1209                                 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1210                                 if ret:
1211                                     logging.debug('Could not bind device {}'.format(dvid))
1212                         dpdk_devices[dvid] = device
1213                         del kernel_devices[dvid]
1214
1215             dlen = len(dpdk_devices)
1216             if dlen > 0:
1217                 print("\nThese device(s) are already using DPDK.\n")
1218                 VppPCIUtil.show_vpp_devices(dpdk_devices,
1219                                             show_interfaces=False)
1220                 question = "\nWould you like to remove any of "
1221                 question += "these device(s) [y/N]? "
1222                 answer = self._ask_user_yn(question, 'n')
1223                 if answer == 'y':
1224                     vppdl = {}
1225                     for dit in dpdk_devices.items():
1226                         dvid = dit[0]
1227                         device = dit[1]
1228                         question = "Would you like to remove {} [y/N]? ". \
1229                             format(dvid)
1230                         answer = self._ask_user_yn(question, 'n')
1231                         if answer == 'y':
1232                             vppdl[dvid] = device
1233                     for dit in vppdl.items():
1234                         dvid = dit[0]
1235                         device = dit[1]
1236                         if 'unused' in device and len(
1237                                 device['unused']) != 0 and device['unused'][
1238                                 0] != '':
1239                             driver = device['unused'][0]
1240                             logging.debug(
1241                                 'Binding device {} to driver {}'.format(
1242                                     dvid, driver))
1243                             ret = VppPCIUtil.bind_vpp_device(node, driver,
1244                                                              dvid)
1245                             if ret:
1246                                 logging.debug(
1247                                     'Could not bind device {}'.format(dvid))
1248                             else:
1249                                 kernel_devices[dvid] = device
1250                                 del dpdk_devices[dvid]
1251
1252             interfaces = {}
1253             for dit in dpdk_devices.items():
1254                 dvid = dit[0]
1255                 device = dit[1]
1256                 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1257             node['interfaces'] = interfaces
1258
1259         self._update_auto_config()
1260         self.updateconfig()
1261
1262     def modify_huge_pages(self):
1263         """
1264         Modify the huge page configuration, asking for the user for the values.
1265
1266         """
1267
1268         for i in self._nodes.items():
1269             node = i[1]
1270
1271             total = node['hugepages']['actual_total']
1272             free = node['hugepages']['free']
1273             size = node['hugepages']['size']
1274             memfree = node['hugepages']['memfree'].split(' ')[0]
1275             hugesize = int(size.split(' ')[0])
1276             # The max number of huge pages should be no more than
1277             # 70% of total free memory
1278             maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES // 100) // \
1279                 hugesize
1280             print("\nThere currently {} {} huge pages free.".format(
1281                 free, size))
1282             question = "Do you want to reconfigure the number of " \
1283                        "huge pages [y/N]? "
1284             answer = self._ask_user_yn(question, 'n')
1285             if answer == 'n':
1286                 node['hugepages']['total'] = total
1287                 continue
1288
1289             print("\nThere currently a total of {} huge pages.".
1290                   format(total))
1291             question = "How many huge pages do you want [{} - {}][{}]? ". \
1292                 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1293             answer = self._ask_user_range(question, 1024, maxpages, 1024)
1294             node['hugepages']['total'] = str(answer)
1295
1296         # Update auto-config.yaml
1297         self._update_auto_config()
1298
1299         # Rediscover just the hugepages
1300         self.get_hugepages()
1301
1302     def get_tcp_params(self):
1303         """
1304         Get the tcp configuration
1305
1306         """
1307         # maybe nothing to do here?
1308         self.updateconfig()
1309
1310     def acquire_tcp_params(self):
1311         """
1312         Ask the user for TCP stack configuration parameters
1313
1314         """
1315
1316         for i in self._nodes.items():
1317             node = i[1]
1318
1319             question = "\nHow many active-open / tcp client sessions are " \
1320                        "expected [0-10000000][0]? "
1321             answer = self._ask_user_range(question, 0, 10000000, 0)
1322             # Less than 10K is equivalent to 0
1323             if int(answer) < 10000:
1324                 answer = 0
1325             node['tcp']['active_open_sessions'] = answer
1326
1327             question = "How many passive-open / tcp server sessions are " \
1328                        "expected [0-10000000][0]? "
1329             answer = self._ask_user_range(question, 0, 10000000, 0)
1330             # Less than 10K is equivalent to 0
1331             if int(answer) < 10000:
1332                 answer = 0
1333             node['tcp']['passive_open_sessions'] = answer
1334
1335         # Update auto-config.yaml
1336         self._update_auto_config()
1337
1338         # Rediscover tcp parameters
1339         self.get_tcp_params()
1340
1341     @staticmethod
1342     def patch_qemu(node):
1343         """
1344         Patch qemu with the correct patches.
1345
1346         :param node: Node dictionary
1347         :type node: dict
1348         """
1349
1350         print('\nWe are patching the node "{}":\n'.format(node['host']))
1351         QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1352
1353     @staticmethod
1354     def cpu_info(node):
1355         """
1356         print the CPU information
1357
1358         """
1359
1360         cpu = CpuUtils.get_cpu_info_per_node(node)
1361
1362         item = 'Model name'
1363         if item in cpu:
1364             print("{:>20}:    {}".format(item, cpu[item]))
1365         item = 'CPU(s)'
1366         if item in cpu:
1367             print("{:>20}:    {}".format(item, cpu[item]))
1368         item = 'Thread(s) per core'
1369         if item in cpu:
1370             print("{:>20}:    {}".format(item, cpu[item]))
1371         item = 'Core(s) per socket'
1372         if item in cpu:
1373             print("{:>20}:    {}".format(item, cpu[item]))
1374         item = 'Socket(s)'
1375         if item in cpu:
1376             print("{:>20}:    {}".format(item, cpu[item]))
1377         item = 'NUMA node(s)'
1378         numa_nodes = 0
1379         if item in cpu:
1380             numa_nodes = int(cpu[item])
1381         for i in range(0, numa_nodes):
1382             item = "NUMA node{} CPU(s)".format(i)
1383             print("{:>20}:    {}".format(item, cpu[item]))
1384         item = 'CPU max MHz'
1385         if item in cpu:
1386             print("{:>20}:    {}".format(item, cpu[item]))
1387         item = 'CPU min MHz'
1388         if item in cpu:
1389             print("{:>20}:    {}".format(item, cpu[item]))
1390
1391         if node['cpu']['smt_enabled']:
1392             smt = 'Enabled'
1393         else:
1394             smt = 'Disabled'
1395         print("{:>20}:    {}".format('SMT', smt))
1396
1397         # VPP Threads
1398         print("\nVPP Threads: (Name: Cpu Number)")
1399         vpp_processes = cpu['vpp_processes']
1400         for i in vpp_processes.items():
1401             print("  {:10}: {:4}".format(i[0], i[1]))
1402
1403     @staticmethod
1404     def device_info(node):
1405         """
1406         Show the device information.
1407
1408         """
1409
1410         if 'cpu' in node and 'total_mbufs' in node['cpu']:
1411             total_mbufs = node['cpu']['total_mbufs']
1412             if total_mbufs != 0:
1413                 print("Total Number of Buffers: {}".format(total_mbufs))
1414
1415         vpp = VppPCIUtil(node)
1416         vpp.get_all_devices()
1417         linkup_devs = vpp.get_link_up_devices()
1418         if len(linkup_devs):
1419             print("\nDevices with link up (can not be used with VPP):")
1420             vpp.show_vpp_devices(linkup_devs, show_header=False)
1421             # for dev in linkup_devs:
1422             #    print ("    " + dev)
1423         kernel_devs = vpp.get_kernel_devices()
1424         if len(kernel_devs):
1425             print("\nDevices bound to kernel drivers:")
1426             vpp.show_vpp_devices(kernel_devs, show_header=False)
1427         else:
1428             print("\nNo devices bound to kernel drivers")
1429
1430         dpdk_devs = vpp.get_dpdk_devices()
1431         if len(dpdk_devs):
1432             print("\nDevices bound to DPDK drivers:")
1433             vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1434                                  show_header=False)
1435         else:
1436             print("\nNo devices bound to DPDK drivers")
1437
1438         other_devs = vpp.get_other_devices()
1439         if len(other_devs):
1440             print("\nDevices not bound to Kernel or DPDK drivers:")
1441             vpp.show_vpp_devices(other_devs, show_interfaces=True,
1442                                  show_header=False)
1443         else:
1444             print("\nNo devices not bound to Kernel or DPDK drivers")
1445
1446         vpputl = VPPUtil()
1447         interfaces = vpputl.get_hardware(node)
1448         if interfaces == {}:
1449             return
1450
1451         print("\nDevices in use by VPP:")
1452
1453         if len(interfaces.items()) < 2:
1454             print("None")
1455             return
1456
1457         print("{:30} {:4} {:4} {:7} {:4} {:7}".
1458               format('Name', 'Numa', 'RXQs',
1459                      'RXDescs', 'TXQs', 'TXDescs'))
1460         for intf in sorted(interfaces.items()):
1461             name = intf[0]
1462             value = intf[1]
1463             if name == 'local0':
1464                 continue
1465             numa = rx_qs = rx_ds = tx_qs = tx_ds = ''
1466             if 'numa' in value:
1467                 numa = int(value['numa'])
1468             if 'rx queues' in value:
1469                 rx_qs = int(value['rx queues'])
1470             if 'rx descs' in value:
1471                 rx_ds = int(value['rx descs'])
1472             if 'tx queues' in value:
1473                 tx_qs = int(value['tx queues'])
1474             if 'tx descs' in value:
1475                 tx_ds = int(value['tx descs'])
1476
1477             print("{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".
1478                   format(name, numa, rx_qs, rx_ds, tx_qs, tx_ds))
1479
1480     @staticmethod
1481     def hugepage_info(node):
1482         """
1483         Show the huge page information.
1484
1485         """
1486
1487         hpg = VppHugePageUtil(node)
1488         hpg.show_huge_pages()
1489
1490     @staticmethod
1491     def has_interfaces(node):
1492         """
1493         Check for interfaces, return tru if there is at least one
1494
1495         :returns: boolean
1496         """
1497         if 'interfaces' in node and len(node['interfaces']):
1498             return True
1499         else:
1500             return False
1501
1502     @staticmethod
1503     def min_system_resources(node):
1504         """
1505         Check the system for basic minimum resources, return true if
1506         there is enough.
1507
1508         :returns: boolean
1509         """
1510
1511         min_sys_res = True
1512
1513         # CPUs
1514         if 'layout' in node['cpu']:
1515             total_cpus = len(node['cpu']['layout'])
1516             if total_cpus < 2:
1517                 print("\nThere is only {} CPU(s) available on this system. "
1518                       "This is not enough to run VPP.".format(total_cpus))
1519                 min_sys_res = False
1520
1521         # System Memory
1522         if 'free' in node['hugepages'] and \
1523                 'memfree' in node['hugepages'] and \
1524                 'size' in node['hugepages']:
1525             free = node['hugepages']['free']
1526             memfree = float(node['hugepages']['memfree'].split(' ')[0])
1527             hugesize = float(node['hugepages']['size'].split(' ')[0])
1528
1529             memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1530             percentmemhugepages = (memhugepages / memfree) * 100
1531             if free is '0' and \
1532                     percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1533                 print(
1534                     "\nThe System has only {} of free memory. You will not "
1535                     "be able to allocate enough Huge Pages for VPP.".format(
1536                         int(
1537                             memfree))
1538                 )
1539                 min_sys_res = False
1540
1541         return min_sys_res
1542
1543     def sys_info(self):
1544         """
1545         Print the system information
1546
1547         """
1548
1549         for i in self._nodes.items():
1550             print("\n==============================")
1551             name = i[0]
1552             node = i[1]
1553
1554             print("NODE: {}\n".format(name))
1555
1556             # CPU
1557             print("CPU:")
1558             self.cpu_info(node)
1559
1560             # Grub
1561             print("\nGrub Command Line:")
1562             if 'grub' in node:
1563                 print("  Current: {}".format(
1564                     node['grub']['current_cmdline']))
1565                 print("  Configured: {}".format(
1566                     node['grub']['default_cmdline']))
1567
1568             # Huge Pages
1569             print("\nHuge Pages:")
1570             self.hugepage_info(node)
1571
1572             # Devices
1573             print("\nDevices:")
1574             self.device_info(node)
1575
1576             # Status
1577             print("\nVPP Service Status:")
1578             state, errors = VPPUtil.status(node)
1579             print("  {}".format(state))
1580             for e in errors:
1581                 print("  {}".format(e))
1582
1583             # Minimum system resources
1584             self.min_system_resources(node)
1585
1586             print("\n==============================")
1587
1588     def _ipv4_interface_setup_questions(self, node):
1589         """
1590         Ask the user some questions and get a list of interfaces
1591         and IPv4 addresses associated with those interfaces
1592
1593         :param node: Node dictionary.
1594         :type node: dict
1595         :returns: A list or interfaces with ip addresses
1596         :rtype: dict
1597         """
1598
1599         vpputl = VPPUtil()
1600         interfaces = vpputl.get_hardware(node)
1601         if interfaces == {}:
1602             return
1603
1604         interfaces_with_ip = []
1605         for intf in sorted(interfaces.items()):
1606             name = intf[0]
1607             if name == 'local0':
1608                 continue
1609
1610             question = "Would you like add address to " \
1611                        "interface {} [Y/n]? ".format(name)
1612             answer = self._ask_user_yn(question, 'y')
1613             if answer == 'y':
1614                 address = {}
1615                 addr = self._ask_user_ipv4()
1616                 address['name'] = name
1617                 address['addr'] = addr
1618                 interfaces_with_ip.append(address)
1619
1620         return interfaces_with_ip
1621
1622     def ipv4_interface_setup(self):
1623         """
1624         After asking the user some questions, get a list of interfaces
1625         and IPv4 addresses associated with those interfaces
1626
1627         """
1628
1629         for i in self._nodes.items():
1630             node = i[1]
1631
1632             # Show the current interfaces with IP addresses
1633             current_ints = VPPUtil.get_int_ip(node)
1634             if current_ints != {}:
1635                 print("\nThese are the current interfaces with IP addresses:")
1636                 for items in sorted(current_ints.items()):
1637                     name = items[0]
1638                     value = items[1]
1639                     if 'address' not in value:
1640                         address = 'Not Set'
1641                     else:
1642                         address = value['address']
1643                     print("{:30} {:20} {:10}".format(name, address,
1644                                                      value['state']))
1645                 question = "\nWould you like to keep this configuration " \
1646                            "[Y/n]? "
1647                 answer = self._ask_user_yn(question, 'y')
1648                 if answer == 'y':
1649                     continue
1650             else:
1651                 print("\nThere are currently no interfaces with IP "
1652                       "addresses.")
1653
1654             # Create a script that add the ip addresses to the interfaces
1655             # and brings the interfaces up
1656             ints_with_addrs = self._ipv4_interface_setup_questions(node)
1657             content = ''
1658             for ints in ints_with_addrs:
1659                 name = ints['name']
1660                 addr = ints['addr']
1661                 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1662                 setintupstr = 'set int state {} up\n'.format(name)
1663                 content += setipstr + setintupstr
1664
1665             # Write the content to the script
1666             rootdir = node['rootdir']
1667             filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1668             with open(filename, 'w+') as sfile:
1669                 sfile.write(content)
1670
1671             # Execute the script
1672             cmd = 'vppctl exec {}'.format(filename)
1673             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1674             if ret != 0:
1675                 logging.debug(stderr)
1676
1677             print("\nA script as been created at {}".format(filename))
1678             print("This script can be run using the following:")
1679             print("vppctl exec {}\n".format(filename))
1680
1681     def _create_vints_questions(self, node):
1682         """
1683         Ask the user some questions and get a list of interfaces
1684         and IPv4 addresses associated with those interfaces
1685
1686         :param node: Node dictionary.
1687         :type node: dict
1688         :returns: A list or interfaces with ip addresses
1689         :rtype: list
1690         """
1691
1692         vpputl = VPPUtil()
1693         interfaces = vpputl.get_hardware(node)
1694         if interfaces == {}:
1695             return []
1696
1697         # First delete all the Virtual interfaces
1698         for intf in sorted(interfaces.items()):
1699             name = intf[0]
1700             if name[:7] == 'Virtual':
1701                 cmd = 'vppctl delete vhost-user {}'.format(name)
1702                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1703                 if ret != 0:
1704                     logging.debug('{} failed on node {} {}'.format(
1705                         cmd, node['host'], stderr))
1706
1707         # Create a virtual interface, for each interface the user wants to use
1708         interfaces = vpputl.get_hardware(node)
1709         if interfaces == {}:
1710             return []
1711         interfaces_with_virtual_interfaces = []
1712         inum = 1
1713         for intf in sorted(interfaces.items()):
1714             name = intf[0]
1715             if name == 'local0':
1716                 continue
1717
1718             question = "Would you like connect this interface {} to " \
1719                        "the VM [Y/n]? ".format(name)
1720             answer = self._ask_user_yn(question, 'y')
1721             if answer == 'y':
1722                 sockfilename = '/var/run/vpp/{}.sock'.format(
1723                     name.replace('/', '_'))
1724                 if os.path.exists(sockfilename):
1725                     os.remove(sockfilename)
1726                 cmd = 'vppctl create vhost-user socket {} server'.format(
1727                     sockfilename)
1728                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1729                 if ret != 0:
1730                     raise RuntimeError(
1731                         "Couldn't execute the command {}, {}.".format(cmd,
1732                                                                       stderr))
1733                 vintname = stdout.rstrip('\r\n')
1734
1735                 cmd = 'chmod 777 {}'.format(sockfilename)
1736                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1737                 if ret != 0:
1738                     raise RuntimeError(
1739                         "Couldn't execute the command {}, {}.".format(cmd,
1740                                                                       stderr))
1741
1742                 interface = {'name': name,
1743                              'virtualinterface': '{}'.format(vintname),
1744                              'bridge': '{}'.format(inum)}
1745                 inum += 1
1746                 interfaces_with_virtual_interfaces.append(interface)
1747
1748         return interfaces_with_virtual_interfaces
1749
1750     def create_and_bridge_virtual_interfaces(self):
1751         """
1752         After asking the user some questions, create a VM and connect
1753         the interfaces to VPP interfaces
1754
1755         """
1756
1757         for i in self._nodes.items():
1758             node = i[1]
1759
1760             # Show the current bridge and interface configuration
1761             print("\nThis the current bridge configuration:")
1762             VPPUtil.show_bridge(node)
1763             question = "\nWould you like to keep this configuration [Y/n]? "
1764             answer = self._ask_user_yn(question, 'y')
1765             if answer == 'y':
1766                 continue
1767
1768             # Create a script that builds a bridge configuration with
1769             # physical interfaces and virtual interfaces
1770             ints_with_vints = self._create_vints_questions(node)
1771             content = ''
1772             for intf in ints_with_vints:
1773                 vhoststr = '\n'.join([
1774                     'comment { The following command creates the socket }',
1775                     'comment { and returns a virtual interface }',
1776                     'comment {{ create vhost-user socket '
1777                     '/var/run/vpp/sock{}.sock server }}\n'.format(
1778                         intf['bridge'])
1779                 ])
1780
1781                 setintdnstr = 'set interface state {} down\n'.format(
1782                     intf['name'])
1783
1784                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1785                     intf['name'], intf['bridge'])
1786                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1787                     intf['virtualinterface'], intf['bridge'])
1788
1789                 # set interface state VirtualEthernet/0/0/0 up
1790                 setintvststr = 'set interface state {} up\n'.format(
1791                     intf['virtualinterface'])
1792
1793                 # set interface state VirtualEthernet/0/0/0 down
1794                 setintupstr = 'set interface state {} up\n'.format(
1795                     intf['name'])
1796
1797                 content += vhoststr + setintdnstr + setintbrstr + \
1798                     setvintbrstr + setintvststr + setintupstr
1799
1800             # Write the content to the script
1801             rootdir = node['rootdir']
1802             filename = rootdir + \
1803                 '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1804             with open(filename, 'w+') as sfile:
1805                 sfile.write(content)
1806
1807             # Execute the script
1808             cmd = 'vppctl exec {}'.format(filename)
1809             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1810             if ret != 0:
1811                 logging.debug(stderr)
1812
1813             print("\nA script as been created at {}".format(filename))
1814             print("This script can be run using the following:")
1815             print("vppctl exec {}\n".format(filename))
1816
1817     def _iperf_vm_questions(self, node):
1818         """
1819         Ask the user some questions and get a list of interfaces
1820         and IPv4 addresses associated with those interfaces
1821
1822         :param node: Node dictionary.
1823         :type node: dict
1824         :returns: A list or interfaces with ip addresses
1825         :rtype: list
1826         """
1827
1828         vpputl = VPPUtil()
1829         interfaces = vpputl.get_hardware(node)
1830         if interfaces == {}:
1831             return []
1832
1833         # First delete all the Virtual interfaces
1834         for intf in sorted(interfaces.items()):
1835             name = intf[0]
1836             if name[:7] == 'Virtual':
1837                 cmd = 'vppctl delete vhost-user {}'.format(name)
1838                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1839                 if ret != 0:
1840                     logging.debug('{} failed on node {} {}'.format(
1841                         cmd, node['host'], stderr))
1842
1843         # Create a virtual interface, for each interface the user wants to use
1844         interfaces = vpputl.get_hardware(node)
1845         if interfaces == {}:
1846             return []
1847         interfaces_with_virtual_interfaces = []
1848         inum = 1
1849
1850         while True:
1851             print('\nPlease pick one interface to connect to the iperf VM.')
1852             for intf in sorted(interfaces.items()):
1853                 name = intf[0]
1854                 if name == 'local0':
1855                     continue
1856
1857                 question = "Would you like connect this interface {} to " \
1858                            "the VM [y/N]? ".format(name)
1859                 answer = self._ask_user_yn(question, 'n')
1860                 if answer == 'y':
1861                     self._sockfilename = '/var/run/vpp/{}.sock'.format(
1862                         name.replace('/', '_'))
1863                     if os.path.exists(self._sockfilename):
1864                         os.remove(self._sockfilename)
1865                     cmd = 'vppctl create vhost-user socket {} server'.format(
1866                         self._sockfilename)
1867                     (ret, stdout, stderr) = vpputl.exec_command(cmd)
1868                     if ret != 0:
1869                         raise RuntimeError(
1870                             "Couldn't execute the command {}, {}.".format(
1871                                 cmd, stderr))
1872                     vintname = stdout.rstrip('\r\n')
1873
1874                     cmd = 'chmod 777 {}'.format(self._sockfilename)
1875                     (ret, stdout, stderr) = vpputl.exec_command(cmd)
1876                     if ret != 0:
1877                         raise RuntimeError(
1878                             "Couldn't execute the command {}, {}.".format(
1879                                 cmd, stderr))
1880
1881                     interface = {'name': name,
1882                                  'virtualinterface': '{}'.format(vintname),
1883                                  'bridge': '{}'.format(inum)}
1884                     inum += 1
1885                     interfaces_with_virtual_interfaces.append(interface)
1886                     return interfaces_with_virtual_interfaces
1887
1888     def create_and_bridge_iperf_virtual_interface(self):
1889         """
1890         After asking the user some questions, and create and bridge a
1891         virtual interface to be used with iperf VM
1892
1893         """
1894
1895         for i in self._nodes.items():
1896             node = i[1]
1897
1898             # Show the current bridge and interface configuration
1899             print("\nThis the current bridge configuration:")
1900             ifaces = VPPUtil.show_bridge(node)
1901             question = "\nWould you like to keep this configuration [Y/n]? "
1902             answer = self._ask_user_yn(question, 'y')
1903             if answer == 'y':
1904                 self._sockfilename = '/var/run/vpp/{}.sock'.format(
1905                     ifaces[0]['name'].replace('/', '_'))
1906                 if os.path.exists(self._sockfilename):
1907                     continue
1908
1909             # Create a script that builds a bridge configuration with
1910             # physical interfaces and virtual interfaces
1911             ints_with_vints = self._iperf_vm_questions(node)
1912             content = ''
1913             for intf in ints_with_vints:
1914                 vhoststr = '\n'.join([
1915                     'comment { The following command creates the socket }',
1916                     'comment { and returns a virtual interface }',
1917                     'comment {{ create vhost-user socket '
1918                     '/var/run/vpp/sock{}.sock server }}\n'.format(
1919                         intf['bridge'])
1920                 ])
1921
1922                 setintdnstr = 'set interface state {} down\n'.format(
1923                     intf['name'])
1924
1925                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1926                     intf['name'], intf['bridge'])
1927                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1928                     intf['virtualinterface'], intf['bridge'])
1929
1930                 # set interface state VirtualEthernet/0/0/0 up
1931                 setintvststr = 'set interface state {} up\n'.format(
1932                     intf['virtualinterface'])
1933
1934                 # set interface state VirtualEthernet/0/0/0 down
1935                 setintupstr = 'set interface state {} up\n'.format(
1936                     intf['name'])
1937
1938                 content += vhoststr + setintdnstr + setintbrstr + \
1939                     setvintbrstr + setintvststr + setintupstr
1940
1941             # Write the content to the script
1942             rootdir = node['rootdir']
1943             filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
1944             with open(filename, 'w+') as sfile:
1945                 sfile.write(content)
1946
1947             # Execute the script
1948             cmd = 'vppctl exec {}'.format(filename)
1949             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1950             if ret != 0:
1951                 logging.debug(stderr)
1952
1953             print("\nA script as been created at {}".format(filename))
1954             print("This script can be run using the following:")
1955             print("vppctl exec {}\n".format(filename))
1956
1957     @staticmethod
1958     def destroy_iperf_vm(name):
1959         """
1960         After asking the user some questions, create a VM and connect
1961         the interfaces to VPP interfaces
1962
1963         :param name: The name of the VM to be be destroyed
1964         :type name: str
1965         """
1966
1967         cmd = 'virsh list'
1968         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1969         if ret != 0:
1970             logging.debug(stderr)
1971             raise RuntimeError(
1972                 "Couldn't execute the command {} : {}".format(cmd, stderr))
1973
1974         if re.findall(name, stdout):
1975             cmd = 'virsh destroy {}'.format(name)
1976             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1977             if ret != 0:
1978                 logging.debug(stderr)
1979                 raise RuntimeError(
1980                     "Couldn't execute the command {} : {}".format(
1981                         cmd, stderr))
1982
1983     def create_iperf_vm(self, vmname):
1984         """
1985         After asking the user some questions, create a VM and connect
1986         the interfaces to VPP interfaces
1987
1988         """
1989
1990         # Read the iperf VM template file
1991         distro = VPPUtil.get_linux_distro()
1992         if distro[0] == 'Ubuntu':
1993             tfilename = \
1994                 '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(
1995                     self._rootdir)
1996         else:
1997             tfilename = \
1998                 '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(
1999                     self._rootdir)
2000
2001         with open(tfilename, 'r') as tfile:
2002             tcontents = tfile.read()
2003         tfile.close()
2004
2005         # Add the variables
2006         imagename = '{}/vpp/vpp-config/{}'.format(
2007             self._rootdir, IPERFVM_IMAGE)
2008         isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
2009         tcontents = tcontents.format(vmname=vmname, imagename=imagename,
2010                                      isoname=isoname,
2011                                      vhostsocketname=self._sockfilename)
2012
2013         # Write the xml
2014         ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
2015         with open(ifilename, 'w+') as ifile:
2016             ifile.write(tcontents)
2017         ifile.close()
2018
2019         cmd = 'virsh create {}'.format(ifilename)
2020         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
2021         if ret != 0:
2022             logging.debug(stderr)
2023             raise RuntimeError(
2024                 "Couldn't execute the command {} : {}".format(cmd, stderr))