Fix inspection for 18.10, requirements
[vpp.git] / extras / vpp_config / vpplib / AutoConfig.py
1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library that supports Auto Configuration."""
15
16 import logging
17 import os
18 import re
19 import yaml
20 from netaddr import IPAddress
21
22 from vpplib.VPPUtil import VPPUtil
23 from vpplib.VppPCIUtil import VppPCIUtil
24 from vpplib.VppHugePageUtil import VppHugePageUtil
25 from vpplib.CpuUtils import CpuUtils
26 from vpplib.VppGrubUtil import VppGrubUtil
27 from vpplib.QemuUtils import QemuUtils
28
29 __all__ = ["AutoConfig"]
30
31 # Constants
32 MIN_SYSTEM_CPUS = 2
33 MIN_TOTAL_HUGE_PAGES = 1024
34 MAX_PERCENT_FOR_HUGE_PAGES = 70
35
36 IPERFVM_XML = 'configs/iperf-vm.xml'
37 IPERFVM_IMAGE = 'images/xenial-mod.img'
38 IPERFVM_ISO = 'configs/cloud-config.iso'
39
40
41 class AutoConfig(object):
42     """Auto Configuration Tools"""
43
44     def __init__(self, rootdir, filename, clean=False):
45         """
46         The Auto Configure class.
47
48         :param rootdir: The root directory for all the auto configuration files
49         :param filename: The autoconfiguration file
50         :param clean: When set initialize the nodes from the auto-config file
51         :type rootdir: str
52         :type filename: str
53         :type clean: bool
54         """
55         self._autoconfig_filename = rootdir + filename
56         self._rootdir = rootdir
57         self._metadata = {}
58         self._nodes = {}
59         self._vpp_devices_node = {}
60         self._hugepage_config = ""
61         self._clean = clean
62         self._loadconfig()
63         self._sockfilename = ""
64
65     def get_nodes(self):
66         """
67         Returns the nodes dictionary.
68
69         :returns: The nodes
70         :rtype: dictionary
71         """
72
73         return self._nodes
74
75     @staticmethod
76     def _autoconfig_backup_file(filename):
77         """
78         Create a backup file.
79
80         :param filename: The file to backup
81         :type filename: str
82         """
83
84         # Does a copy of the file exist, if not create one
85         ofile = filename + '.orig'
86         (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
87         if ret != 0:
88             logging.debug(stderr)
89             if stdout.strip('\n') != ofile:
90                 cmd = 'sudo cp {} {}'.format(filename, ofile)
91                 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
92                 if ret != 0:
93                     logging.debug(stderr)
94
95     # noinspection PyBroadException
96     @staticmethod
97     def _ask_user_ipv4():
98         """
99         Asks the user for a number within a range.
100         default is returned if return is entered.
101
102         :returns: IP address with cidr
103         :rtype: str
104         """
105
106         while True:
107             answer = raw_input("Please enter the IPv4 Address [n.n.n.n/n]: ")
108             try:
109                 ipinput = answer.split('/')
110                 ipaddr = IPAddress(ipinput[0])
111                 if len(ipinput) > 1:
112                     plen = answer.split('/')[1]
113                 else:
114                     answer = raw_input("Please enter the netmask [n.n.n.n]: ")
115                     plen = IPAddress(answer).netmask_bits()
116                 return '{}/{}'.format(ipaddr, plen)
117             except None:
118                 print "Please enter a valid IPv4 address."
119
120     @staticmethod
121     def _ask_user_range(question, first, last, default):
122         """
123         Asks the user for a number within a range.
124         default is returned if return is entered.
125
126         :param question: Text of a question.
127         :param first: First number in the range
128         :param last: Last number in the range
129         :param default: The value returned when return is entered
130         :type question: string
131         :type first: int
132         :type last: int
133         :type default: int
134         :returns: The answer to the question
135         :rtype: int
136         """
137
138         while True:
139             answer = raw_input(question)
140             if answer == '':
141                 answer = default
142                 break
143             if re.findall(r'[0-9+]', answer):
144                 if int(answer) in range(first, last + 1):
145                     break
146                 else:
147                     print "Please a value between {} and {} or Return.". \
148                         format(first, last)
149             else:
150                 print "Please a number between {} and {} or Return.". \
151                     format(first, last)
152
153         return int(answer)
154
155     @staticmethod
156     def _ask_user_yn(question, default):
157         """
158         Asks the user for a yes or no question.
159
160         :param question: Text of a question.
161         :param default: The value returned when return is entered
162         :type question: string
163         :type default: string
164         :returns: The answer to the question
165         :rtype: string
166         """
167
168         input_valid = False
169         default = default.lower()
170         answer = ''
171         while not input_valid:
172             answer = raw_input(question)
173             if answer == '':
174                 answer = default
175             if re.findall(r'[YyNn]', answer):
176                 input_valid = True
177                 answer = answer[0].lower()
178             else:
179                 print "Please answer Y, N or Return."
180
181         return answer
182
183     def _loadconfig(self):
184         """
185         Load the testbed configuration, given the auto configuration file.
186
187         """
188
189         # Get the Topology, from the topology layout file
190         topo = {}
191         with open(self._autoconfig_filename, 'r') as stream:
192             try:
193                 topo = yaml.load(stream)
194                 if 'metadata' in topo:
195                     self._metadata = topo['metadata']
196             except yaml.YAMLError as exc:
197                 raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc))
198
199         systemfile = self._rootdir + self._metadata['system_config_file']
200         if self._clean is False and os.path.isfile(systemfile):
201             with open(systemfile, 'r') as sysstream:
202                 try:
203                     systopo = yaml.load(sysstream)
204                     if 'nodes' in systopo:
205                         self._nodes = systopo['nodes']
206                 except yaml.YAMLError as sysexc:
207                     raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc))
208         else:
209             # Get the nodes from Auto Config
210             if 'nodes' in topo:
211                 self._nodes = topo['nodes']
212
213         # Set the root directory in all the nodes
214         for i in self._nodes.items():
215             node = i[1]
216             node['rootdir'] = self._rootdir
217
218     def updateconfig(self):
219         """
220         Update the testbed configuration, given the auto configuration file.
221         We will write the system configuration file with the current node
222         information
223
224         """
225
226         # Initialize the yaml data
227         ydata = {'metadata': self._metadata, 'nodes': self._nodes}
228
229         # Write the system config file
230         filename = self._rootdir + self._metadata['system_config_file']
231         with open(filename, 'w') as yamlfile:
232             yaml.dump(ydata, yamlfile)
233
234     def _update_auto_config(self):
235         """
236         Write the auto configuration file with the new configuration data,
237         input from the user.
238
239         """
240
241         # Initialize the yaml data
242         nodes = {}
243         with open(self._autoconfig_filename, 'r') as stream:
244             try:
245                 ydata = yaml.load(stream)
246                 if 'nodes' in ydata:
247                     nodes = ydata['nodes']
248             except yaml.YAMLError as exc:
249                 print exc
250                 return
251
252         for i in nodes.items():
253             key = i[0]
254             node = i[1]
255
256             # Interfaces
257             node['interfaces'] = {}
258             for item in self._nodes[key]['interfaces'].items():
259                 port = item[0]
260                 interface = item[1]
261
262                 node['interfaces'][port] = {}
263                 addr = '{}'.format(interface['pci_address'])
264                 node['interfaces'][port]['pci_address'] = addr
265                 if 'mac_address' in interface:
266                     node['interfaces'][port]['mac_address'] = \
267                         interface['mac_address']
268
269             if 'total_other_cpus' in self._nodes[key]['cpu']:
270                 node['cpu']['total_other_cpus'] = \
271                     self._nodes[key]['cpu']['total_other_cpus']
272             if 'total_vpp_cpus' in self._nodes[key]['cpu']:
273                 node['cpu']['total_vpp_cpus'] = \
274                     self._nodes[key]['cpu']['total_vpp_cpus']
275             if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
276                 node['cpu']['reserve_vpp_main_core'] = \
277                     self._nodes[key]['cpu']['reserve_vpp_main_core']
278
279             # TCP
280             if 'active_open_sessions' in self._nodes[key]['tcp']:
281                 node['tcp']['active_open_sessions'] = \
282                     self._nodes[key]['tcp']['active_open_sessions']
283             if 'passive_open_sessions' in self._nodes[key]['tcp']:
284                 node['tcp']['passive_open_sessions'] = \
285                     self._nodes[key]['tcp']['passive_open_sessions']
286
287             # Huge pages
288             node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
289
290         # Write the auto config config file
291         with open(self._autoconfig_filename, 'w') as yamlfile:
292             yaml.dump(ydata, yamlfile)
293
294     def apply_huge_pages(self):
295         """
296         Apply the huge page config
297
298         """
299
300         for i in self._nodes.items():
301             node = i[1]
302
303             hpg = VppHugePageUtil(node)
304             hpg.hugepages_dryrun_apply()
305
306     @staticmethod
307     def _apply_vpp_unix(node):
308         """
309         Apply the VPP Unix config
310
311         :param node: Node dictionary with cpuinfo.
312         :type node: dict
313         """
314
315         unix = '  nodaemon\n'
316         if 'unix' not in node['vpp']:
317             return ''
318
319         unixv = node['vpp']['unix']
320         if 'interactive' in unixv:
321             interactive = unixv['interactive']
322             if interactive is True:
323                 unix = '  interactive\n'
324
325         return unix.rstrip('\n')
326
327     @staticmethod
328     def _apply_vpp_cpu(node):
329         """
330         Apply the VPP cpu config
331
332         :param node: Node dictionary with cpuinfo.
333         :type node: dict
334         """
335
336         # Get main core
337         cpu = '\n'
338         if 'vpp_main_core' in node['cpu']:
339             vpp_main_core = node['cpu']['vpp_main_core']
340         else:
341             vpp_main_core = 0
342         if vpp_main_core is not 0:
343             cpu += '  main-core {}\n'.format(vpp_main_core)
344
345         # Get workers
346         vpp_workers = node['cpu']['vpp_workers']
347         vpp_worker_len = len(vpp_workers)
348         if vpp_worker_len > 0:
349             vpp_worker_str = ''
350             for i, worker in enumerate(vpp_workers):
351                 if i > 0:
352                     vpp_worker_str += ','
353                 if worker[0] == worker[1]:
354                     vpp_worker_str += "{}".format(worker[0])
355                 else:
356                     vpp_worker_str += "{}-{}".format(worker[0], worker[1])
357
358             cpu += '  corelist-workers {}\n'.format(vpp_worker_str)
359
360         return cpu
361
362     @staticmethod
363     def _apply_vpp_devices(node):
364         """
365         Apply VPP PCI Device configuration to vpp startup.
366
367         :param node: Node dictionary with cpuinfo.
368         :type node: dict
369         """
370
371         devices = ''
372         ports_per_numa = node['cpu']['ports_per_numa']
373         total_mbufs = node['cpu']['total_mbufs']
374
375         for item in ports_per_numa.items():
376             value = item[1]
377             interfaces = value['interfaces']
378
379             # if 0 was specified for the number of vpp workers, use 1 queue
380             num_rx_queues = None
381             num_tx_queues = None
382             if 'rx_queues' in value:
383                 num_rx_queues = value['rx_queues']
384             if 'tx_queues' in value:
385                 num_tx_queues = value['tx_queues']
386
387             num_rx_desc = None
388             num_tx_desc = None
389
390             # Create the devices string
391             for interface in interfaces:
392                 pci_address = interface['pci_address']
393                 pci_address = pci_address.lstrip("'").rstrip("'")
394                 devices += '\n'
395                 devices += '  dev {} {{ \n'.format(pci_address)
396                 if num_rx_queues:
397                     devices += '    num-rx-queues {}\n'.format(num_rx_queues)
398                 else:
399                     devices += '    num-rx-queues {}\n'.format(1)
400                 if num_tx_queues:
401                     devices += '    num-tx-queues {}\n'.format(num_tx_queues)
402                 if num_rx_desc:
403                     devices += '    num-rx-desc {}\n'.format(num_rx_desc)
404                 if num_tx_desc:
405                     devices += '    num-tx-desc {}\n'.format(num_tx_desc)
406                 devices += '  }'
407
408         # If the total mbufs is not 0 or less than the default, set num-bufs
409         logging.debug("Total mbufs: {}".format(total_mbufs))
410         if total_mbufs is not 0 and total_mbufs > 16384:
411             devices += '\n  num-mbufs {}'.format(total_mbufs)
412
413         return devices
414
415     @staticmethod
416     def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end, total_vpp_workers,
417                           reserve_vpp_main_core):
418         """
419         Calculate the VPP worker information
420
421         :param node: Node dictionary
422         :param vpp_workers: List of VPP workers
423         :param numa_node: Numa node
424         :param other_cpus_end: The end of the cpus allocated for cores
425         other than vpp
426         :param total_vpp_workers: The number of vpp workers needed
427         :param reserve_vpp_main_core: Is there a core needed for
428         the vpp main core
429         :type node: dict
430         :type numa_node: int
431         :type other_cpus_end: int
432         :type total_vpp_workers: int
433         :type reserve_vpp_main_core: bool
434         :returns: Is a core still needed for the vpp main core
435         :rtype: bool
436         """
437
438         # Can we fit the workers in one of these slices
439         cpus = node['cpu']['cpus_per_node'][numa_node]
440         for cpu in cpus:
441             start = cpu[0]
442             end = cpu[1]
443             if start <= other_cpus_end:
444                 start = other_cpus_end + 1
445
446             if reserve_vpp_main_core:
447                 start += 1
448
449             workers_end = start + total_vpp_workers - 1
450
451             if workers_end <= end:
452                 if reserve_vpp_main_core:
453                     node['cpu']['vpp_main_core'] = start - 1
454                 reserve_vpp_main_core = False
455                 if total_vpp_workers:
456                     vpp_workers.append((start, workers_end))
457                 break
458
459         # We still need to reserve the main core
460         if reserve_vpp_main_core:
461             node['cpu']['vpp_main_core'] = other_cpus_end + 1
462
463         return reserve_vpp_main_core
464
465     @staticmethod
466     def _calc_desc_and_queues(total_numa_nodes,
467                               total_ports_per_numa,
468                               total_rx_queues,
469                               ports_per_numa_value):
470         """
471         Calculate the number of descriptors and queues
472
473         :param total_numa_nodes: The total number of numa nodes
474         :param total_ports_per_numa: The total number of ports for this
475         numa node
476         :param total_rx_queues: The total number of rx queues / port
477         :param ports_per_numa_value: The value from the ports_per_numa
478         dictionary
479         :type total_numa_nodes: int
480         :type total_ports_per_numa: int
481         :type total_rx_queues: int
482         :type ports_per_numa_value: dict
483         :returns The total number of message buffers
484         :rtype: int
485         """
486
487         # Get the number of rx queues
488         rx_queues = max(1, total_rx_queues)
489         tx_queues = rx_queues * total_numa_nodes + 1
490
491         # Get the descriptor entries
492         desc_entries = 1024
493         ports_per_numa_value['rx_queues'] = rx_queues
494         total_mbufs = (((rx_queues * desc_entries) +
495                         (tx_queues * desc_entries)) *
496                        total_ports_per_numa)
497         total_mbufs = total_mbufs
498
499         return total_mbufs
500
501     @staticmethod
502     def _create_ports_per_numa(node, interfaces):
503         """
504         Create a dictionary or ports per numa node
505         :param node: Node dictionary
506         :param interfaces: All the interfaces to be used by vpp
507         :type node: dict
508         :type interfaces: dict
509         :returns: The ports per numa dictionary
510         :rtype: dict
511         """
512
513         # Make a list of ports by numa node
514         ports_per_numa = {}
515         for item in interfaces.items():
516             i = item[1]
517             if i['numa_node'] not in ports_per_numa:
518                 ports_per_numa[i['numa_node']] = {'interfaces': []}
519                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
520             else:
521                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
522         node['cpu']['ports_per_numa'] = ports_per_numa
523
524         return ports_per_numa
525
526     def calculate_cpu_parameters(self):
527         """
528         Calculate the cpu configuration.
529
530         """
531
532         # Calculate the cpu parameters, needed for the
533         # vpp_startup and grub configuration
534         for i in self._nodes.items():
535             node = i[1]
536
537             # get total number of nic ports
538             interfaces = node['interfaces']
539
540             # Make a list of ports by numa node
541             ports_per_numa = self._create_ports_per_numa(node, interfaces)
542
543             # Get the number of cpus to skip, we never use the first cpu
544             other_cpus_start = 1
545             other_cpus_end = other_cpus_start + node['cpu']['total_other_cpus'] - 1
546             other_workers = None
547             if other_cpus_end is not 0:
548                 other_workers = (other_cpus_start, other_cpus_end)
549             node['cpu']['other_workers'] = other_workers
550
551             # Allocate the VPP main core and workers
552             vpp_workers = []
553             reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
554             total_vpp_cpus = node['cpu']['total_vpp_cpus']
555             total_rx_queues = node['cpu']['total_rx_queues']
556
557             # If total_vpp_cpus is 0 or is less than the numa nodes with ports
558             #  then we shouldn't get workers
559             total_workers_node = 0
560             if len(ports_per_numa):
561                 total_workers_node = total_vpp_cpus / len(ports_per_numa)
562             total_main = 0
563             if reserve_vpp_main_core:
564                 total_main = 1
565             total_mbufs = 0
566             if total_main + total_workers_node is not 0:
567                 for item in ports_per_numa.items():
568                     numa_node = item[0]
569                     value = item[1]
570
571                     # Get the number of descriptors and queues
572                     mbufs = self._calc_desc_and_queues(len(ports_per_numa),
573                                                        len(value['interfaces']), total_rx_queues, value)
574                     total_mbufs += mbufs
575
576                     # Get the VPP workers
577                     reserve_vpp_main_core = self._calc_vpp_workers(node, vpp_workers, numa_node,
578                                                                    other_cpus_end, total_workers_node,
579                                                                    reserve_vpp_main_core)
580
581                 total_mbufs *= 2.5
582                 total_mbufs = int(total_mbufs)
583             else:
584                 total_mbufs = 0
585
586             # Save the info
587             node['cpu']['vpp_workers'] = vpp_workers
588             node['cpu']['total_mbufs'] = total_mbufs
589
590         # Write the config
591         self.updateconfig()
592
593     @staticmethod
594     def _apply_vpp_tcp(node):
595         """
596         Apply the VPP Unix config
597
598         :param node: Node dictionary with cpuinfo.
599         :type node: dict
600         """
601
602         active_open_sessions = node['tcp']['active_open_sessions']
603         aos = int(active_open_sessions)
604
605         passive_open_sessions = node['tcp']['passive_open_sessions']
606         pos = int(passive_open_sessions)
607
608         # Generate the api-segment gid vpp sheit in any case
609         if (aos + pos) == 0:
610             tcp = "api-segment {\n"
611             tcp = tcp + "  gid vpp\n"
612             tcp = tcp + "}\n"
613             return tcp.rstrip('\n')
614
615         tcp = "# TCP stack-related configuration parameters\n"
616         tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos)
617         tcp = tcp + "heapsize 4g\n\n"
618         tcp = tcp + "api-segment {\n"
619         tcp = tcp + "  global-size 2000M\n"
620         tcp = tcp + "  api-size 1G\n"
621         tcp = tcp + "}\n\n"
622
623         tcp = tcp + "session {\n"
624         tcp = tcp + "  event-queue-length " + "{:d}".format(aos + pos) + "\n"
625         tcp = tcp + "  preallocated-sessions " + "{:d}".format(aos + pos) + "\n"
626         tcp = tcp + "  v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
627         tcp = tcp + "  v4-session-table-memory 3g\n"
628         if aos > 0:
629             tcp = tcp + "  v4-halfopen-table-buckets " + \
630                   "{:d}".format((aos + pos) / 4) + "\n"
631             tcp = tcp + "  v4-halfopen-table-memory 3g\n"
632             tcp = tcp + "  local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
633             tcp = tcp + "  local-endpoints-table-memory 3g\n"
634         tcp = tcp + "}\n\n"
635
636         tcp = tcp + "tcp {\n"
637         tcp = tcp + "  preallocated-connections " + "{:d}".format(aos + pos) + "\n"
638         if aos > 0:
639             tcp = tcp + "  preallocated-half-open-connections " + "{:d}".format(aos) + "\n"
640         tcp = tcp + "}\n\n"
641
642         return tcp.rstrip('\n')
643
644     def apply_vpp_startup(self):
645         """
646         Apply the vpp startup configration
647
648         """
649
650         # Apply the VPP startup configruation
651         for i in self._nodes.items():
652             node = i[1]
653
654             # Get the startup file
655             rootdir = node['rootdir']
656             sfile = rootdir + node['vpp']['startup_config_file']
657
658             # Get the devices
659             devices = self._apply_vpp_devices(node)
660
661             # Get the CPU config
662             cpu = self._apply_vpp_cpu(node)
663
664             # Get the unix config
665             unix = self._apply_vpp_unix(node)
666
667             # Get the TCP configuration, if any
668             tcp = self._apply_vpp_tcp(node)
669
670             # Make a backup if needed
671             self._autoconfig_backup_file(sfile)
672
673             # Get the template
674             tfile = sfile + '.template'
675             (ret, stdout, stderr) = \
676                 VPPUtil.exec_command('cat {}'.format(tfile))
677             if ret != 0:
678                 raise RuntimeError('Executing cat command failed to node {}'.
679                                    format(node['host']))
680             startup = stdout.format(unix=unix,
681                                     cpu=cpu,
682                                     devices=devices,
683                                     tcp=tcp)
684
685             (ret, stdout, stderr) = \
686                 VPPUtil.exec_command('rm {}'.format(sfile))
687             if ret != 0:
688                 logging.debug(stderr)
689
690             cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
691             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
692             if ret != 0:
693                 raise RuntimeError('Writing config failed node {}'.
694                                    format(node['host']))
695
696     def apply_grub_cmdline(self):
697         """
698         Apply the grub cmdline
699
700         """
701
702         for i in self._nodes.items():
703             node = i[1]
704
705             # Get the isolated CPUs
706             other_workers = node['cpu']['other_workers']
707             vpp_workers = node['cpu']['vpp_workers']
708             if 'vpp_main_core' in node['cpu']:
709                 vpp_main_core = node['cpu']['vpp_main_core']
710             else:
711                 vpp_main_core = 0
712             all_workers = []
713             if other_workers is not None:
714                 all_workers = [other_workers]
715             if vpp_main_core is not 0:
716                 all_workers += [(vpp_main_core, vpp_main_core)]
717             all_workers += vpp_workers
718             isolated_cpus = ''
719             for idx, worker in enumerate(all_workers):
720                 if worker is None:
721                     continue
722                 if idx > 0:
723                     isolated_cpus += ','
724                 if worker[0] == worker[1]:
725                     isolated_cpus += "{}".format(worker[0])
726                 else:
727                     isolated_cpus += "{}-{}".format(worker[0], worker[1])
728
729             vppgrb = VppGrubUtil(node)
730             current_cmdline = vppgrb.get_current_cmdline()
731             if 'grub' not in node:
732                 node['grub'] = {}
733             node['grub']['current_cmdline'] = current_cmdline
734             node['grub']['default_cmdline'] = \
735                 vppgrb.apply_cmdline(node, isolated_cpus)
736
737         self.updateconfig()
738
739     def get_hugepages(self):
740         """
741         Get the hugepage configuration
742
743         """
744
745         for i in self._nodes.items():
746             node = i[1]
747
748             hpg = VppHugePageUtil(node)
749             max_map_count, shmmax = hpg.get_huge_page_config()
750             node['hugepages']['max_map_count'] = max_map_count
751             node['hugepages']['shmax'] = shmmax
752             total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
753             node['hugepages']['actual_total'] = total
754             node['hugepages']['free'] = free
755             node['hugepages']['size'] = size
756             node['hugepages']['memtotal'] = memtotal
757             node['hugepages']['memfree'] = memfree
758
759         self.updateconfig()
760
761     def get_grub(self):
762         """
763         Get the grub configuration
764
765         """
766
767         for i in self._nodes.items():
768             node = i[1]
769
770             vppgrb = VppGrubUtil(node)
771             current_cmdline = vppgrb.get_current_cmdline()
772             default_cmdline = vppgrb.get_default_cmdline()
773
774             # Get the total number of isolated CPUs
775             current_iso_cpus = 0
776             iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
777             iso_cpurl = len(iso_cpur)
778             if iso_cpurl > 0:
779                 iso_cpu_str = iso_cpur[0]
780                 iso_cpu_str = iso_cpu_str.split('=')[1]
781                 iso_cpul = iso_cpu_str.split(',')
782                 for iso_cpu in iso_cpul:
783                     isocpuspl = iso_cpu.split('-')
784                     if len(isocpuspl) is 1:
785                         current_iso_cpus += 1
786                     else:
787                         first = int(isocpuspl[0])
788                         second = int(isocpuspl[1])
789                         if first == second:
790                             current_iso_cpus += 1
791                         else:
792                             current_iso_cpus += second - first
793
794             if 'grub' not in node:
795                 node['grub'] = {}
796             node['grub']['current_cmdline'] = current_cmdline
797             node['grub']['default_cmdline'] = default_cmdline
798             node['grub']['current_iso_cpus'] = current_iso_cpus
799
800         self.updateconfig()
801
802     @staticmethod
803     def _get_device(node):
804         """
805         Get the device configuration for a single node
806
807         :param node: Node dictionary with cpuinfo.
808         :type node: dict
809
810         """
811
812         vpp = VppPCIUtil(node)
813         vpp.get_all_devices()
814
815         # Save the device information
816         node['devices'] = {}
817         node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
818         node['devices']['kernel_devices'] = vpp.get_kernel_devices()
819         node['devices']['other_devices'] = vpp.get_other_devices()
820         node['devices']['linkup_devices'] = vpp.get_link_up_devices()
821
822     def get_devices_per_node(self):
823         """
824         Get the device configuration for all the nodes
825
826         """
827
828         for i in self._nodes.items():
829             node = i[1]
830             # Update the interface data
831
832             self._get_device(node)
833
834         self.updateconfig()
835
836     @staticmethod
837     def get_cpu_layout(node):
838         """
839         Get the cpu layout
840
841         using lscpu -p get the cpu layout.
842         Returns a list with each item representing a single cpu.
843
844         :param node: Node dictionary.
845         :type node: dict
846         :returns: The cpu layout
847         :rtype: list
848         """
849
850         cmd = 'lscpu -p'
851         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
852         if ret != 0:
853             raise RuntimeError('{} failed on node {} {}'.
854                                format(cmd, node['host'], stderr))
855
856         pcpus = []
857         lines = stdout.split('\n')
858         for line in lines:
859             if line == '' or line[0] == '#':
860                 continue
861             linesplit = line.split(',')
862             layout = {'cpu': linesplit[0], 'core': linesplit[1],
863                       'socket': linesplit[2], 'node': linesplit[3]}
864
865             # cpu, core, socket, node
866             pcpus.append(layout)
867
868         return pcpus
869
870     def get_cpu(self):
871         """
872         Get the cpu configuration
873
874         """
875
876         # Get the CPU layout
877         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
878
879         for i in self._nodes.items():
880             node = i[1]
881
882             # Get the cpu layout
883             layout = self.get_cpu_layout(node)
884             node['cpu']['layout'] = layout
885
886             cpuinfo = node['cpuinfo']
887             smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
888             node['cpu']['smt_enabled'] = smt_enabled
889
890             # We don't want to write the cpuinfo
891             node['cpuinfo'] = ""
892
893         # Write the config
894         self.updateconfig()
895
896     def discover(self):
897         """
898         Get the current system configuration.
899
900         """
901
902         # Get the Huge Page configuration
903         self.get_hugepages()
904
905         # Get the device configuration
906         self.get_devices_per_node()
907
908         # Get the CPU configuration
909         self.get_cpu()
910
911         # Get the current grub cmdline
912         self.get_grub()
913
914     def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
915         """
916         Ask the user questions related to the cpu configuration.
917
918         :param node: Node dictionary
919         :param total_cpus: The total number of cpus in the system
920         :param numa_nodes: The list of numa nodes in the system
921         :type node: dict
922         :type total_cpus: int
923         :type numa_nodes: list
924         """
925
926         print "\nYour system has {} core(s) and {} Numa Nodes.". \
927             format(total_cpus, len(numa_nodes))
928         print "To begin, we suggest not reserving any cores for VPP or other processes."
929         print "Then to improve performance start reserving cores and adding queues as needed. "
930
931         max_vpp_cpus = 4
932         total_vpp_cpus = 0
933         if max_vpp_cpus > 0:
934             question = "\nHow many core(s) shall we reserve for VPP [0-{}][0]? ".format(max_vpp_cpus)
935             total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
936             node['cpu']['total_vpp_cpus'] = total_vpp_cpus
937
938         max_other_cores = (total_cpus - total_vpp_cpus) / 2
939         question = 'How many core(s) do you want to reserve for processes other than VPP? [0-{}][0]? '. \
940             format(str(max_other_cores))
941         total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
942         node['cpu']['total_other_cpus'] = total_other_cpus
943
944         max_main_cpus = max_vpp_cpus + 1 - total_vpp_cpus
945         reserve_vpp_main_core = False
946         if max_main_cpus > 0:
947             question = "Should we reserve 1 core for the VPP Main thread? "
948             question += "[y/N]? "
949             answer = self._ask_user_yn(question, 'n')
950             if answer == 'y':
951                 reserve_vpp_main_core = True
952             node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
953             node['cpu']['vpp_main_core'] = 0
954
955         question = "How many RX queues per port shall we use for VPP [1-4][1]? ". \
956             format(max_vpp_cpus)
957         total_rx_queues = self._ask_user_range(question, 1, 4, 1)
958         node['cpu']['total_rx_queues'] = total_rx_queues
959
960     def modify_cpu(self, ask_questions=True):
961         """
962         Modify the cpu configuration, asking for the user for the values.
963
964         :param ask_questions: When true ask the user for config parameters
965
966         """
967
968         # Get the CPU layout
969         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
970
971         for i in self._nodes.items():
972             node = i[1]
973             total_cpus = 0
974             total_cpus_per_slice = 0
975             cpus_per_node = {}
976             numa_nodes = []
977             cores = []
978             cpu_layout = self.get_cpu_layout(node)
979
980             # Assume the number of cpus per slice is always the same as the
981             # first slice
982             first_node = '0'
983             for cpu in cpu_layout:
984                 if cpu['node'] != first_node:
985                     break
986                 total_cpus_per_slice += 1
987
988             # Get the total number of cpus, cores, and numa nodes from the
989             # cpu layout
990             for cpul in cpu_layout:
991                 numa_node = cpul['node']
992                 core = cpul['core']
993                 cpu = cpul['cpu']
994                 total_cpus += 1
995
996                 if numa_node not in cpus_per_node:
997                     cpus_per_node[numa_node] = []
998                 cpuperslice = int(cpu) % total_cpus_per_slice
999                 if cpuperslice == 0:
1000                     cpus_per_node[numa_node].append((int(cpu), int(cpu) +
1001                                                      total_cpus_per_slice - 1))
1002                 if numa_node not in numa_nodes:
1003                     numa_nodes.append(numa_node)
1004                 if core not in cores:
1005                     cores.append(core)
1006             node['cpu']['cpus_per_node'] = cpus_per_node
1007
1008             # Ask the user some questions
1009             if ask_questions and total_cpus >= 8:
1010                 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1011
1012             # Populate the interfaces with the numa node
1013             if 'interfaces' in node:
1014                 ikeys = node['interfaces'].keys()
1015                 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1016
1017             # We don't want to write the cpuinfo
1018             node['cpuinfo'] = ""
1019
1020         # Write the configs
1021         self._update_auto_config()
1022         self.updateconfig()
1023
1024     def _modify_other_devices(self, node,
1025                               other_devices, kernel_devices, dpdk_devices):
1026         """
1027         Modify the devices configuration, asking for the user for the values.
1028
1029         """
1030
1031         odevices_len = len(other_devices)
1032         if odevices_len > 0:
1033             print "\nThese device(s) are currently NOT being used",
1034             print "by VPP or the OS.\n"
1035             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1036             question = "\nWould you like to give any of these devices"
1037             question += " back to the OS [Y/n]? "
1038             answer = self._ask_user_yn(question, 'Y')
1039             if answer == 'y':
1040                 vppd = {}
1041                 for dit in other_devices.items():
1042                     dvid = dit[0]
1043                     device = dit[1]
1044                     question = "Would you like to use device {} for". \
1045                         format(dvid)
1046                     question += " the OS [y/N]? "
1047                     answer = self._ask_user_yn(question, 'n')
1048                     if answer == 'y':
1049                         if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1050                             driver = device['unused'][0]
1051                             ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1052                             if ret:
1053                                 logging.debug('Could not bind device {}'.format(dvid))
1054                             else:
1055                                 vppd[dvid] = device
1056                 for dit in vppd.items():
1057                     dvid = dit[0]
1058                     device = dit[1]
1059                     kernel_devices[dvid] = device
1060                     del other_devices[dvid]
1061
1062         odevices_len = len(other_devices)
1063         if odevices_len > 0:
1064             print "\nThese device(s) are still NOT being used ",
1065             print "by VPP or the OS.\n"
1066             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1067             question = "\nWould you like use any of these for VPP [y/N]? "
1068             answer = self._ask_user_yn(question, 'N')
1069             if answer == 'y':
1070                 vppd = {}
1071                 for dit in other_devices.items():
1072                     dvid = dit[0]
1073                     device = dit[1]
1074                     question = "Would you like to use device {} ".format(dvid)
1075                     question += "for VPP [y/N]? "
1076                     answer = self._ask_user_yn(question, 'n')
1077                     if answer == 'y':
1078                         vppd[dvid] = device
1079                 for dit in vppd.items():
1080                     dvid = dit[0]
1081                     device = dit[1]
1082                     if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1083                         driver = device['unused'][0]
1084                         logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1085                         ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1086                         if ret:
1087                             logging.debug('Could not bind device {}'.format(dvid))
1088                         else:
1089                             dpdk_devices[dvid] = device
1090                             del other_devices[dvid]
1091
1092     def update_interfaces_config(self):
1093         """
1094         Modify the interfaces directly from the config file.
1095
1096         """
1097
1098         for i in self._nodes.items():
1099             node = i[1]
1100             devices = node['devices']
1101             all_devices = devices['other_devices']
1102             all_devices.update(devices['dpdk_devices'])
1103             all_devices.update(devices['kernel_devices'])
1104
1105             current_ifcs = {}
1106             interfaces = {}
1107             if 'interfaces' in node:
1108                 current_ifcs = node['interfaces']
1109             if current_ifcs:
1110                 for ifc in current_ifcs.values():
1111                     dvid = ifc['pci_address']
1112                     if dvid in all_devices:
1113                         VppPCIUtil.vpp_create_interface(interfaces, dvid,
1114                                                         all_devices[dvid])
1115             node['interfaces'] = interfaces
1116
1117         self.updateconfig()
1118
1119     def modify_devices(self):
1120         """
1121         Modify the devices configuration, asking for the user for the values.
1122
1123         """
1124
1125         for i in self._nodes.items():
1126             node = i[1]
1127             devices = node['devices']
1128             other_devices = devices['other_devices']
1129             kernel_devices = devices['kernel_devices']
1130             dpdk_devices = devices['dpdk_devices']
1131
1132             if other_devices:
1133                 self._modify_other_devices(node, other_devices,
1134                                            kernel_devices, dpdk_devices)
1135
1136                 # Get the devices again for this node
1137                 self._get_device(node)
1138                 devices = node['devices']
1139                 kernel_devices = devices['kernel_devices']
1140                 dpdk_devices = devices['dpdk_devices']
1141
1142             klen = len(kernel_devices)
1143             if klen > 0:
1144                 print "\nThese devices have kernel interfaces, but",
1145                 print "appear to be safe to use with VPP.\n"
1146                 VppPCIUtil.show_vpp_devices(kernel_devices)
1147                 question = "\nWould you like to use any of these "
1148                 question += "device(s) for VPP [y/N]? "
1149                 answer = self._ask_user_yn(question, 'n')
1150                 if answer == 'y':
1151                     vppd = {}
1152                     for dit in kernel_devices.items():
1153                         dvid = dit[0]
1154                         device = dit[1]
1155                         question = "Would you like to use device {} ". \
1156                             format(dvid)
1157                         question += "for VPP [y/N]? "
1158                         answer = self._ask_user_yn(question, 'n')
1159                         if answer == 'y':
1160                             vppd[dvid] = device
1161                     for dit in vppd.items():
1162                         dvid = dit[0]
1163                         device = dit[1]
1164                         if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1165                             driver = device['unused'][0]
1166                             logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1167                             ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1168                             if ret:
1169                                 logging.debug('Could not bind device {}'.format(dvid))
1170                             else:
1171                                 dpdk_devices[dvid] = device
1172                                 del kernel_devices[dvid]
1173
1174             dlen = len(dpdk_devices)
1175             if dlen > 0:
1176                 print "\nThese device(s) will be used by VPP.\n"
1177                 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1178                 question = "\nWould you like to remove any of "
1179                 question += "these device(s) [y/N]? "
1180                 answer = self._ask_user_yn(question, 'n')
1181                 if answer == 'y':
1182                     vppd = {}
1183                     for dit in dpdk_devices.items():
1184                         dvid = dit[0]
1185                         device = dit[1]
1186                         question = "Would you like to remove {} [y/N]? ". \
1187                             format(dvid)
1188                         answer = self._ask_user_yn(question, 'n')
1189                         if answer == 'y':
1190                             vppd[dvid] = device
1191                     for dit in vppd.items():
1192                         dvid = dit[0]
1193                         device = dit[1]
1194                         if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1195                             driver = device['unused'][0]
1196                             logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1197                             ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1198                             if ret:
1199                                 logging.debug('Could not bind device {}'.format(dvid))
1200                             else:
1201                                 kernel_devices[dvid] = device
1202                                 del dpdk_devices[dvid]
1203
1204             interfaces = {}
1205             for dit in dpdk_devices.items():
1206                 dvid = dit[0]
1207                 device = dit[1]
1208                 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1209             node['interfaces'] = interfaces
1210
1211             print "\nThese device(s) will be used by VPP, please",
1212             print "rerun this option if this is incorrect.\n"
1213             VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1214
1215         self._update_auto_config()
1216         self.updateconfig()
1217
1218     def modify_huge_pages(self):
1219         """
1220         Modify the huge page configuration, asking for the user for the values.
1221
1222         """
1223
1224         for i in self._nodes.items():
1225             node = i[1]
1226
1227             total = node['hugepages']['actual_total']
1228             free = node['hugepages']['free']
1229             size = node['hugepages']['size']
1230             memfree = node['hugepages']['memfree'].split(' ')[0]
1231             hugesize = int(size.split(' ')[0])
1232             # The max number of huge pages should be no more than
1233             # 70% of total free memory
1234             maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize
1235             print "\nThere currently {} {} huge pages free.". \
1236                 format(free, size)
1237             question = "Do you want to reconfigure the number of "
1238             question += "huge pages [y/N]? "
1239             answer = self._ask_user_yn(question, 'n')
1240             if answer == 'n':
1241                 node['hugepages']['total'] = total
1242                 continue
1243
1244             print "\nThere currently a total of {} huge pages.". \
1245                 format(total)
1246             question = "How many huge pages do you want [{} - {}][{}]? ". \
1247                 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1248             answer = self._ask_user_range(question, 1024, maxpages, 1024)
1249             node['hugepages']['total'] = str(answer)
1250
1251         # Update auto-config.yaml
1252         self._update_auto_config()
1253
1254         # Rediscover just the hugepages
1255         self.get_hugepages()
1256
1257     def get_tcp_params(self):
1258         """
1259         Get the tcp configuration
1260
1261         """
1262         # maybe nothing to do here?
1263         self.updateconfig()
1264
1265     def acquire_tcp_params(self):
1266         """
1267         Ask the user for TCP stack configuration parameters
1268
1269         """
1270
1271         for i in self._nodes.items():
1272             node = i[1]
1273
1274             question = "\nHow many active-open / tcp client sessions are expected "
1275             question = question + "[0-10000000][0]? "
1276             answer = self._ask_user_range(question, 0, 10000000, 0)
1277             # Less than 10K is equivalent to 0
1278             if int(answer) < 10000:
1279                 answer = 0
1280             node['tcp']['active_open_sessions'] = answer
1281
1282             question = "How many passive-open / tcp server sessions are expected "
1283             question = question + "[0-10000000][0]? "
1284             answer = self._ask_user_range(question, 0, 10000000, 0)
1285             # Less than 10K is equivalent to 0
1286             if int(answer) < 10000:
1287                 answer = 0
1288             node['tcp']['passive_open_sessions'] = answer
1289
1290         # Update auto-config.yaml
1291         self._update_auto_config()
1292
1293         # Rediscover tcp parameters
1294         self.get_tcp_params()
1295
1296     @staticmethod
1297     def patch_qemu(node):
1298         """
1299         Patch qemu with the correct patches.
1300
1301         :param node: Node dictionary
1302         :type node: dict
1303         """
1304
1305         print '\nWe are patching the node "{}":\n'.format(node['host'])
1306         QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1307
1308     @staticmethod
1309     def cpu_info(node):
1310         """
1311         print the CPU information
1312
1313         """
1314
1315         cpu = CpuUtils.get_cpu_info_per_node(node)
1316
1317         item = 'Model name'
1318         if item in cpu:
1319             print "{:>20}:    {}".format(item, cpu[item])
1320         item = 'CPU(s)'
1321         if item in cpu:
1322             print "{:>20}:    {}".format(item, cpu[item])
1323         item = 'Thread(s) per core'
1324         if item in cpu:
1325             print "{:>20}:    {}".format(item, cpu[item])
1326         item = 'Core(s) per socket'
1327         if item in cpu:
1328             print "{:>20}:    {}".format(item, cpu[item])
1329         item = 'Socket(s)'
1330         if item in cpu:
1331             print "{:>20}:    {}".format(item, cpu[item])
1332         item = 'NUMA node(s)'
1333         numa_nodes = 0
1334         if item in cpu:
1335             numa_nodes = int(cpu[item])
1336         for i in xrange(0, numa_nodes):
1337             item = "NUMA node{} CPU(s)".format(i)
1338             print "{:>20}:    {}".format(item, cpu[item])
1339         item = 'CPU max MHz'
1340         if item in cpu:
1341             print "{:>20}:    {}".format(item, cpu[item])
1342         item = 'CPU min MHz'
1343         if item in cpu:
1344             print "{:>20}:    {}".format(item, cpu[item])
1345
1346         if node['cpu']['smt_enabled']:
1347             smt = 'Enabled'
1348         else:
1349             smt = 'Disabled'
1350         print "{:>20}:    {}".format('SMT', smt)
1351
1352         # VPP Threads
1353         print "\nVPP Threads: (Name: Cpu Number)"
1354         vpp_processes = cpu['vpp_processes']
1355         for i in vpp_processes.items():
1356             print "  {:10}: {:4}".format(i[0], i[1])
1357
1358     @staticmethod
1359     def device_info(node):
1360         """
1361         Show the device information.
1362
1363         """
1364
1365         if 'cpu' in node and 'total_mbufs' in node['cpu']:
1366             total_mbufs = node['cpu']['total_mbufs']
1367             if total_mbufs is not 0:
1368                 print "Total Number of Buffers: {}".format(total_mbufs)
1369
1370         vpp = VppPCIUtil(node)
1371         vpp.get_all_devices()
1372         linkup_devs = vpp.get_link_up_devices()
1373         if len(linkup_devs):
1374             print ("\nDevices with link up (can not be used with VPP):")
1375             vpp.show_vpp_devices(linkup_devs, show_header=False)
1376             # for dev in linkup_devs:
1377             #    print ("    " + dev)
1378         kernel_devs = vpp.get_kernel_devices()
1379         if len(kernel_devs):
1380             print ("\nDevices bound to kernel drivers:")
1381             vpp.show_vpp_devices(kernel_devs, show_header=False)
1382         else:
1383             print ("\nNo devices bound to kernel drivers")
1384
1385         dpdk_devs = vpp.get_dpdk_devices()
1386         if len(dpdk_devs):
1387             print ("\nDevices bound to DPDK drivers:")
1388             vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1389                                  show_header=False)
1390         else:
1391             print ("\nNo devices bound to DPDK drivers")
1392
1393         other_devs = vpp.get_other_devices()
1394         if len(other_devs):
1395             print ("\nDevices not bound to Kernel or DPDK drivers:")
1396             vpp.show_vpp_devices(other_devs, show_interfaces=True,
1397                                  show_header=False)
1398         else:
1399             print ("\nNo devices not bound to Kernel or DPDK drivers")
1400
1401         vpputl = VPPUtil()
1402         interfaces = vpputl.get_hardware(node)
1403         if interfaces == {}:
1404             return
1405
1406         print ("\nDevices in use by VPP:")
1407
1408         if len(interfaces.items()) < 2:
1409             print ("None")
1410             return
1411
1412         print "{:30} {:4} {:4} {:7} {:4} {:7}". \
1413             format('Name', 'Numa', 'RXQs',
1414                    'RXDescs', 'TXQs', 'TXDescs')
1415         for intf in sorted(interfaces.items()):
1416             name = intf[0]
1417             value = intf[1]
1418             if name == 'local0':
1419                 continue
1420             numa = rx_qs = rx_ds = tx_qs = tx_ds = ''
1421             if 'numa' in value:
1422                 numa = int(value['numa'])
1423             if 'rx queues' in value:
1424                 rx_qs = int(value['rx queues'])
1425             if 'rx descs' in value:
1426                 rx_ds = int(value['rx descs'])
1427             if 'tx queues' in value:
1428                 tx_qs = int(value['tx queues'])
1429             if 'tx descs' in value:
1430                 tx_ds = int(value['tx descs'])
1431
1432             print ("{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".
1433                    format(name, numa, rx_qs, rx_ds, tx_qs, tx_ds))
1434
1435     @staticmethod
1436     def hugepage_info(node):
1437         """
1438         Show the huge page information.
1439
1440         """
1441
1442         hpg = VppHugePageUtil(node)
1443         hpg.show_huge_pages()
1444
1445     @staticmethod
1446     def min_system_resources(node):
1447         """
1448         Check the system for basic minimum resources, return true if
1449         there is enough.
1450
1451         :returns: boolean
1452         :rtype: dict
1453         """
1454
1455         min_sys_res = True
1456
1457         # CPUs
1458         if 'layout' in node['cpu']:
1459             total_cpus = len(node['cpu']['layout'])
1460             if total_cpus < 2:
1461                 print "\nThere is only {} CPU(s) available on this system.".format(total_cpus)
1462                 print "This is not enough to run VPP."
1463                 min_sys_res = False
1464
1465         # System Memory
1466         if 'free' in node['hugepages'] and \
1467                 'memfree' in node['hugepages'] and \
1468                 'size' in node['hugepages']:
1469             free = node['hugepages']['free']
1470             memfree = float(node['hugepages']['memfree'].split(' ')[0])
1471             hugesize = float(node['hugepages']['size'].split(' ')[0])
1472
1473             memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1474             percentmemhugepages = (memhugepages / memfree) * 100
1475             if free is '0' and \
1476                     percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1477                 print "\nThe System has only {} of free memory.".format(int(memfree))
1478                 print "You will not be able to allocate enough Huge Pages for VPP."
1479                 min_sys_res = False
1480
1481         return min_sys_res
1482
1483     def sys_info(self):
1484         """
1485         Print the system information
1486
1487         """
1488
1489         for i in self._nodes.items():
1490             print "\n=============================="
1491             name = i[0]
1492             node = i[1]
1493
1494             print "NODE: {}\n".format(name)
1495
1496             # CPU
1497             print "CPU:"
1498             self.cpu_info(node)
1499
1500             # Grub
1501             print "\nGrub Command Line:"
1502             if 'grub' in node:
1503                 print \
1504                     "  Current: {}".format(
1505                         node['grub']['current_cmdline'])
1506                 print \
1507                     "  Configured: {}".format(
1508                         node['grub']['default_cmdline'])
1509
1510             # Huge Pages
1511             print "\nHuge Pages:"
1512             self.hugepage_info(node)
1513
1514             # Devices
1515             print "\nDevices:"
1516             self.device_info(node)
1517
1518             # Status
1519             print "\nVPP Service Status:"
1520             state, errors = VPPUtil.status(node)
1521             print "  {}".format(state)
1522             for e in errors:
1523                 print "  {}".format(e)
1524
1525             # Minimum system resources
1526             self.min_system_resources(node)
1527
1528             print "\n=============================="
1529
1530     def _ipv4_interface_setup_questions(self, node):
1531         """
1532         Ask the user some questions and get a list of interfaces
1533         and IPv4 addresses associated with those interfaces
1534
1535         :param node: Node dictionary.
1536         :type node: dict
1537         :returns: A list or interfaces with ip addresses
1538         :rtype: dict
1539         """
1540
1541         vpputl = VPPUtil()
1542         interfaces = vpputl.get_hardware(node)
1543         if interfaces == {}:
1544             return
1545
1546         interfaces_with_ip = []
1547         for intf in sorted(interfaces.items()):
1548             name = intf[0]
1549             if name == 'local0':
1550                 continue
1551
1552             question = "Would you like add address to interface {} [Y/n]? ".format(name)
1553             answer = self._ask_user_yn(question, 'y')
1554             if answer == 'y':
1555                 address = {}
1556                 addr = self._ask_user_ipv4()
1557                 address['name'] = name
1558                 address['addr'] = addr
1559                 interfaces_with_ip.append(address)
1560
1561         return interfaces_with_ip
1562
1563     def ipv4_interface_setup(self):
1564         """
1565         After asking the user some questions, get a list of interfaces
1566         and IPv4 addresses associated with those interfaces
1567
1568         """
1569
1570         for i in self._nodes.items():
1571             node = i[1]
1572
1573             # Show the current interfaces with IP addresses
1574             current_ints = VPPUtil.get_int_ip(node)
1575             if current_ints is not {}:
1576                 print ("\nThese are the current interfaces with IP addresses:")
1577                 for items in sorted(current_ints.items()):
1578                     name = items[0]
1579                     value = items[1]
1580                     if 'address' not in value:
1581                         address = 'Not Set'
1582                     else:
1583                         address = value['address']
1584                     print ("{:30} {:20} {:10}".format(name, address, value['state']))
1585                 question = "\nWould you like to keep this configuration [Y/n]? "
1586                 answer = self._ask_user_yn(question, 'y')
1587                 if answer == 'y':
1588                     continue
1589             else:
1590                 print ("\nThere are currently no interfaces with IP addresses.")
1591
1592             # Create a script that add the ip addresses to the interfaces
1593             # and brings the interfaces up
1594             ints_with_addrs = self._ipv4_interface_setup_questions(node)
1595             content = ''
1596             for ints in ints_with_addrs:
1597                 name = ints['name']
1598                 addr = ints['addr']
1599                 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1600                 setintupstr = 'set int state {} up\n'.format(name)
1601                 content += setipstr + setintupstr
1602
1603             # Write the content to the script
1604             rootdir = node['rootdir']
1605             filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1606             with open(filename, 'w+') as sfile:
1607                 sfile.write(content)
1608
1609             # Execute the script
1610             cmd = 'vppctl exec {}'.format(filename)
1611             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1612             if ret != 0:
1613                 logging.debug(stderr)
1614
1615             print("\nA script as been created at {}".format(filename))
1616             print("This script can be run using the following:")
1617             print("vppctl exec {}\n".format(filename))
1618
1619     def _create_vints_questions(self, node):
1620         """
1621         Ask the user some questions and get a list of interfaces
1622         and IPv4 addresses associated with those interfaces
1623
1624         :param node: Node dictionary.
1625         :type node: dict
1626         :returns: A list or interfaces with ip addresses
1627         :rtype: list
1628         """
1629
1630         vpputl = VPPUtil()
1631         interfaces = vpputl.get_hardware(node)
1632         if interfaces == {}:
1633             return []
1634
1635         # First delete all the Virtual interfaces
1636         for intf in sorted(interfaces.items()):
1637             name = intf[0]
1638             if name[:7] == 'Virtual':
1639                 cmd = 'vppctl delete vhost-user {}'.format(name)
1640                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1641                 if ret != 0:
1642                     logging.debug('{} failed on node {} {}'.format(
1643                         cmd, node['host'], stderr))
1644
1645         # Create a virtual interface, for each interface the user wants to use
1646         interfaces = vpputl.get_hardware(node)
1647         if interfaces == {}:
1648             return []
1649         interfaces_with_virtual_interfaces = []
1650         inum = 1
1651         for intf in sorted(interfaces.items()):
1652             name = intf[0]
1653             if name == 'local0':
1654                 continue
1655
1656             question = "Would you like connect this interface {} to the VM [Y/n]? ".format(name)
1657             answer = self._ask_user_yn(question, 'y')
1658             if answer == 'y':
1659                 sockfilename = '/var/run/vpp/{}.sock'.format(name.replace('/', '_'))
1660                 if os.path.exists(sockfilename):
1661                     os.remove(sockfilename)
1662                 cmd = 'vppctl create vhost-user socket {} server'.format(sockfilename)
1663                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1664                 if ret != 0:
1665                     raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1666                 vintname = stdout.rstrip('\r\n')
1667
1668                 cmd = 'chmod 777 {}'.format(sockfilename)
1669                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1670                 if ret != 0:
1671                     raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1672
1673                 interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1674                              'bridge': '{}'.format(inum)}
1675                 inum += 1
1676                 interfaces_with_virtual_interfaces.append(interface)
1677
1678         return interfaces_with_virtual_interfaces
1679
1680     def create_and_bridge_virtual_interfaces(self):
1681         """
1682         After asking the user some questions, create a VM and connect the interfaces
1683         to VPP interfaces
1684
1685         """
1686
1687         for i in self._nodes.items():
1688             node = i[1]
1689
1690             # Show the current bridge and interface configuration
1691             print "\nThis the current bridge configuration:"
1692             VPPUtil.show_bridge(node)
1693             question = "\nWould you like to keep this configuration [Y/n]? "
1694             answer = self._ask_user_yn(question, 'y')
1695             if answer == 'y':
1696                 continue
1697
1698             # Create a script that builds a bridge configuration with physical interfaces
1699             # and virtual interfaces
1700             ints_with_vints = self._create_vints_questions(node)
1701             content = ''
1702             for intf in ints_with_vints:
1703                 vhoststr = 'comment { The following command creates the socket }\n'
1704                 vhoststr += 'comment { and returns a virtual interface }\n'
1705                 vhoststr += 'comment {{ create vhost-user socket /var/run/vpp/sock{}.sock server }}\n'. \
1706                     format(intf['bridge'])
1707
1708                 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1709
1710                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1711                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1712
1713                 # set interface state VirtualEthernet/0/0/0 up
1714                 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1715
1716                 # set interface state VirtualEthernet/0/0/0 down
1717                 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1718
1719                 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1720
1721             # Write the content to the script
1722             rootdir = node['rootdir']
1723             filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1724             with open(filename, 'w+') as sfile:
1725                 sfile.write(content)
1726
1727             # Execute the script
1728             cmd = 'vppctl exec {}'.format(filename)
1729             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1730             if ret != 0:
1731                 logging.debug(stderr)
1732
1733             print("\nA script as been created at {}".format(filename))
1734             print("This script can be run using the following:")
1735             print("vppctl exec {}\n".format(filename))
1736
1737     def _iperf_vm_questions(self, node):
1738         """
1739         Ask the user some questions and get a list of interfaces
1740         and IPv4 addresses associated with those interfaces
1741
1742         :param node: Node dictionary.
1743         :type node: dict
1744         :returns: A list or interfaces with ip addresses
1745         :rtype: list
1746         """
1747
1748         vpputl = VPPUtil()
1749         interfaces = vpputl.get_hardware(node)
1750         if interfaces == {}:
1751             return []
1752
1753         # First delete all the Virtual interfaces
1754         for intf in sorted(interfaces.items()):
1755             name = intf[0]
1756             if name[:7] == 'Virtual':
1757                 cmd = 'vppctl delete vhost-user {}'.format(name)
1758                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1759                 if ret != 0:
1760                     logging.debug('{} failed on node {} {}'.format(
1761                         cmd, node['host'], stderr))
1762
1763         # Create a virtual interface, for each interface the user wants to use
1764         interfaces = vpputl.get_hardware(node)
1765         if interfaces == {}:
1766             return []
1767         interfaces_with_virtual_interfaces = []
1768         inum = 1
1769
1770         while True:
1771             print '\nPlease pick one interface to connect to the iperf VM.'
1772             for intf in sorted(interfaces.items()):
1773                 name = intf[0]
1774                 if name == 'local0':
1775                     continue
1776
1777                 question = "Would you like connect this interface {} to the VM [y/N]? ".format(name)
1778                 answer = self._ask_user_yn(question, 'n')
1779                 if answer == 'y':
1780                     self._sockfilename = '/var/run/vpp/{}.sock'.format(name.replace('/', '_'))
1781                     if os.path.exists(self._sockfilename):
1782                         os.remove(self._sockfilename)
1783                     cmd = 'vppctl create vhost-user socket {} server'.format(self._sockfilename)
1784                     (ret, stdout, stderr) = vpputl.exec_command(cmd)
1785                     if ret != 0:
1786                         raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1787                     vintname = stdout.rstrip('\r\n')
1788
1789                     cmd = 'chmod 777 {}'.format(self._sockfilename)
1790                     (ret, stdout, stderr) = vpputl.exec_command(cmd)
1791                     if ret != 0:
1792                         raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1793
1794                     interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1795                                  'bridge': '{}'.format(inum)}
1796                     inum += 1
1797                     interfaces_with_virtual_interfaces.append(interface)
1798                     return interfaces_with_virtual_interfaces
1799
1800     def create_and_bridge_iperf_virtual_interface(self):
1801         """
1802         After asking the user some questions, and create and bridge a virtual interface
1803         to be used with iperf VM
1804
1805         """
1806
1807         for i in self._nodes.items():
1808             node = i[1]
1809
1810             # Show the current bridge and interface configuration
1811             print "\nThis the current bridge configuration:"
1812             ifaces = VPPUtil.show_bridge(node)
1813             question = "\nWould you like to keep this configuration [Y/n]? "
1814             answer = self._ask_user_yn(question, 'y')
1815             if answer == 'y':
1816                 self._sockfilename = '/var/run/vpp/{}.sock'.format(ifaces[0]['name'].replace('/', '_'))
1817                 if os.path.exists(self._sockfilename):
1818                     continue
1819
1820             # Create a script that builds a bridge configuration with physical interfaces
1821             # and virtual interfaces
1822             ints_with_vints = self._iperf_vm_questions(node)
1823             content = ''
1824             for intf in ints_with_vints:
1825                 vhoststr = 'comment { The following command creates the socket }\n'
1826                 vhoststr += 'comment { and returns a virtual interface }\n'
1827                 vhoststr += 'comment {{ create vhost-user socket /var/run/vpp/sock{}.sock server }}\n'. \
1828                     format(intf['bridge'])
1829
1830                 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1831
1832                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1833                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1834
1835                 # set interface state VirtualEthernet/0/0/0 up
1836                 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1837
1838                 # set interface state VirtualEthernet/0/0/0 down
1839                 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1840
1841                 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1842
1843             # Write the content to the script
1844             rootdir = node['rootdir']
1845             filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
1846             with open(filename, 'w+') as sfile:
1847                 sfile.write(content)
1848
1849             # Execute the script
1850             cmd = 'vppctl exec {}'.format(filename)
1851             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1852             if ret != 0:
1853                 logging.debug(stderr)
1854
1855             print("\nA script as been created at {}".format(filename))
1856             print("This script can be run using the following:")
1857             print("vppctl exec {}\n".format(filename))
1858
1859     @staticmethod
1860     def destroy_iperf_vm(name):
1861         """
1862         After asking the user some questions, create a VM and connect the interfaces
1863         to VPP interfaces
1864
1865         :param name: The name of the VM to be be destroyed
1866         :type name: str
1867         """
1868
1869         cmd = 'virsh list'
1870         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1871         if ret != 0:
1872             logging.debug(stderr)
1873             raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))
1874
1875         if re.findall(name, stdout):
1876             cmd = 'virsh destroy {}'.format(name)
1877             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1878             if ret != 0:
1879                 logging.debug(stderr)
1880                 raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))
1881
1882     def create_iperf_vm(self, vmname):
1883         """
1884         After asking the user some questions, create a VM and connect the interfaces
1885         to VPP interfaces
1886
1887         """
1888
1889         # Read the iperf VM template file
1890         distro = VPPUtil.get_linux_distro()
1891         if distro[0] == 'Ubuntu':
1892             tfilename = '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(self._rootdir)
1893         else:
1894             tfilename = '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(self._rootdir)
1895
1896         with open(tfilename, 'r') as tfile:
1897             tcontents = tfile.read()
1898         tfile.close()
1899
1900         # Add the variables
1901         imagename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_IMAGE)
1902         isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
1903         tcontents = tcontents.format(vmname=vmname, imagename=imagename, isoname=isoname,
1904                                      vhostsocketname=self._sockfilename)
1905
1906         # Write the xml
1907         ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
1908         with open(ifilename, 'w+') as ifile:
1909             ifile.write(tcontents)
1910         ifile.close()
1911
1912         cmd = 'virsh create {}'.format(ifilename)
1913         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1914         if ret != 0:
1915             logging.debug(stderr)
1916             raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))