c36cdd5accc6a089833929198c97f402ff3c8435
[vpp.git] / extras / vpp_config / vpplib / AutoConfig.py
1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library that supports Auto Configuration."""
15
16 import logging
17 import os
18 import re
19 import yaml
20 from netaddr import IPAddress
21
22 from vpplib.VPPUtil import VPPUtil
23 from vpplib.VppPCIUtil import VppPCIUtil
24 from vpplib.VppHugePageUtil import VppHugePageUtil
25 from vpplib.CpuUtils import CpuUtils
26 from vpplib.VppGrubUtil import VppGrubUtil
27 from vpplib.QemuUtils import QemuUtils
28
29 __all__ = ["AutoConfig"]
30
31 # Constants
32 MIN_SYSTEM_CPUS = 2
33 MIN_TOTAL_HUGE_PAGES = 1024
34 MAX_PERCENT_FOR_HUGE_PAGES = 70
35
36 IPERFVM_XML = 'configs/iperf-vm.xml'
37 IPERFVM_IMAGE = 'images/xenial-mod.img'
38 IPERFVM_ISO = 'configs/cloud-config.iso'
39
40
41 class AutoConfig(object):
42     """Auto Configuration Tools"""
43
44     def __init__(self, rootdir, filename, clean=False):
45         """
46         The Auto Configure class.
47
48         :param rootdir: The root directory for all the auto configuration files
49         :param filename: The autoconfiguration file
50         :param clean: When set initialize the nodes from the auto-config file
51         :type rootdir: str
52         :type filename: str
53         :type clean: bool
54         """
55         self._autoconfig_filename = rootdir + filename
56         self._rootdir = rootdir
57         self._metadata = {}
58         self._nodes = {}
59         self._vpp_devices_node = {}
60         self._hugepage_config = ""
61         self._clean = clean
62         self._loadconfig()
63         self._sockfilename = ""
64
65     def get_nodes(self):
66         """
67         Returns the nodes dictionary.
68
69         :returns: The nodes
70         :rtype: dictionary
71         """
72
73         return self._nodes
74
75     @staticmethod
76     def _autoconfig_backup_file(filename):
77         """
78         Create a backup file.
79
80         :param filename: The file to backup
81         :type filename: str
82         """
83
84         # Does a copy of the file exist, if not create one
85         ofile = filename + '.orig'
86         (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
87         if ret != 0:
88             logging.debug(stderr)
89             if stdout.strip('\n') != ofile:
90                 cmd = 'sudo cp {} {}'.format(filename, ofile)
91                 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
92                 if ret != 0:
93                     logging.debug(stderr)
94
95     # noinspection PyBroadException
96     @staticmethod
97     def _ask_user_ipv4():
98         """
99         Asks the user for a number within a range.
100         default is returned if return is entered.
101
102         :returns: IP address with cidr
103         :rtype: str
104         """
105
106         while True:
107             answer = raw_input("Please enter the IPv4 Address [n.n.n.n/n]: ")
108             try:
109                 ipinput = answer.split('/')
110                 ipaddr = IPAddress(ipinput[0])
111                 if len(ipinput) > 1:
112                     plen = answer.split('/')[1]
113                 else:
114                     answer = raw_input("Please enter the netmask [n.n.n.n]: ")
115                     plen = IPAddress(answer).netmask_bits()
116                 return '{}/{}'.format(ipaddr, plen)
117             except None:
118                 print "Please enter a valid IPv4 address."
119
120     @staticmethod
121     def _ask_user_range(question, first, last, default):
122         """
123         Asks the user for a number within a range.
124         default is returned if return is entered.
125
126         :param question: Text of a question.
127         :param first: First number in the range
128         :param last: Last number in the range
129         :param default: The value returned when return is entered
130         :type question: string
131         :type first: int
132         :type last: int
133         :type default: int
134         :returns: The answer to the question
135         :rtype: int
136         """
137
138         while True:
139             answer = raw_input(question)
140             if answer == '':
141                 answer = default
142                 break
143             if re.findall(r'[0-9+]', answer):
144                 if int(answer) in range(first, last + 1):
145                     break
146                 else:
147                     print "Please a value between {} and {} or Return.". \
148                         format(first, last)
149             else:
150                 print "Please a number between {} and {} or Return.". \
151                     format(first, last)
152
153         return int(answer)
154
155     @staticmethod
156     def _ask_user_yn(question, default):
157         """
158         Asks the user for a yes or no question.
159
160         :param question: Text of a question.
161         :param default: The value returned when return is entered
162         :type question: string
163         :type default: string
164         :returns: The answer to the question
165         :rtype: string
166         """
167
168         input_valid = False
169         default = default.lower()
170         answer = ''
171         while not input_valid:
172             answer = raw_input(question)
173             if answer == '':
174                 answer = default
175             if re.findall(r'[YyNn]', answer):
176                 input_valid = True
177                 answer = answer[0].lower()
178             else:
179                 print "Please answer Y, N or Return."
180
181         return answer
182
183     def _loadconfig(self):
184         """
185         Load the testbed configuration, given the auto configuration file.
186
187         """
188
189         # Get the Topology, from the topology layout file
190         topo = {}
191         with open(self._autoconfig_filename, 'r') as stream:
192             try:
193                 topo = yaml.load(stream)
194                 if 'metadata' in topo:
195                     self._metadata = topo['metadata']
196             except yaml.YAMLError as exc:
197                 raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc))
198
199         systemfile = self._rootdir + self._metadata['system_config_file']
200         if self._clean is False and os.path.isfile(systemfile):
201             with open(systemfile, 'r') as sysstream:
202                 try:
203                     systopo = yaml.load(sysstream)
204                     if 'nodes' in systopo:
205                         self._nodes = systopo['nodes']
206                 except yaml.YAMLError as sysexc:
207                     raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc))
208         else:
209             # Get the nodes from Auto Config
210             if 'nodes' in topo:
211                 self._nodes = topo['nodes']
212
213         # Set the root directory in all the nodes
214         for i in self._nodes.items():
215             node = i[1]
216             node['rootdir'] = self._rootdir
217
218     def updateconfig(self):
219         """
220         Update the testbed configuration, given the auto configuration file.
221         We will write the system configuration file with the current node
222         information
223
224         """
225
226         # Initialize the yaml data
227         ydata = {'metadata': self._metadata, 'nodes': self._nodes}
228
229         # Write the system config file
230         filename = self._rootdir + self._metadata['system_config_file']
231         with open(filename, 'w') as yamlfile:
232             yaml.dump(ydata, yamlfile)
233
234     def _update_auto_config(self):
235         """
236         Write the auto configuration file with the new configuration data,
237         input from the user.
238
239         """
240
241         # Initialize the yaml data
242         nodes = {}
243         with open(self._autoconfig_filename, 'r') as stream:
244             try:
245                 ydata = yaml.load(stream)
246                 if 'nodes' in ydata:
247                     nodes = ydata['nodes']
248             except yaml.YAMLError as exc:
249                 print exc
250                 return
251
252         for i in nodes.items():
253             key = i[0]
254             node = i[1]
255
256             # Interfaces
257             node['interfaces'] = {}
258             for item in self._nodes[key]['interfaces'].items():
259                 port = item[0]
260                 interface = item[1]
261
262                 node['interfaces'][port] = {}
263                 addr = '{}'.format(interface['pci_address'])
264                 node['interfaces'][port]['pci_address'] = addr
265                 if 'mac_address' in interface:
266                     node['interfaces'][port]['mac_address'] = \
267                         interface['mac_address']
268
269             if 'total_other_cpus' in self._nodes[key]['cpu']:
270                 node['cpu']['total_other_cpus'] = \
271                     self._nodes[key]['cpu']['total_other_cpus']
272             if 'total_vpp_cpus' in self._nodes[key]['cpu']:
273                 node['cpu']['total_vpp_cpus'] = \
274                     self._nodes[key]['cpu']['total_vpp_cpus']
275             if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
276                 node['cpu']['reserve_vpp_main_core'] = \
277                     self._nodes[key]['cpu']['reserve_vpp_main_core']
278
279             # TCP
280             if 'active_open_sessions' in self._nodes[key]['tcp']:
281                 node['tcp']['active_open_sessions'] = \
282                     self._nodes[key]['tcp']['active_open_sessions']
283             if 'passive_open_sessions' in self._nodes[key]['tcp']:
284                 node['tcp']['passive_open_sessions'] = \
285                     self._nodes[key]['tcp']['passive_open_sessions']
286
287             # Huge pages
288             node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
289
290         # Write the auto config config file
291         with open(self._autoconfig_filename, 'w') as yamlfile:
292             yaml.dump(ydata, yamlfile)
293
294     def apply_huge_pages(self):
295         """
296         Apply the huge page config
297
298         """
299
300         for i in self._nodes.items():
301             node = i[1]
302
303             hpg = VppHugePageUtil(node)
304             hpg.hugepages_dryrun_apply()
305
306     @staticmethod
307     def _apply_vpp_unix(node):
308         """
309         Apply the VPP Unix config
310
311         :param node: Node dictionary with cpuinfo.
312         :type node: dict
313         """
314
315         unix = '  nodaemon\n'
316         if 'unix' not in node['vpp']:
317             return ''
318
319         unixv = node['vpp']['unix']
320         if 'interactive' in unixv:
321             interactive = unixv['interactive']
322             if interactive is True:
323                 unix = '  interactive\n'
324
325         return unix.rstrip('\n')
326
327     @staticmethod
328     def _apply_vpp_cpu(node):
329         """
330         Apply the VPP cpu config
331
332         :param node: Node dictionary with cpuinfo.
333         :type node: dict
334         """
335
336         # Get main core
337         cpu = '\n'
338         if 'vpp_main_core' in node['cpu']:
339             vpp_main_core = node['cpu']['vpp_main_core']
340         else:
341             vpp_main_core = 0
342         if vpp_main_core is not 0:
343             cpu += '  main-core {}\n'.format(vpp_main_core)
344
345         # Get workers
346         vpp_workers = node['cpu']['vpp_workers']
347         vpp_worker_len = len(vpp_workers)
348         if vpp_worker_len > 0:
349             vpp_worker_str = ''
350             for i, worker in enumerate(vpp_workers):
351                 if i > 0:
352                     vpp_worker_str += ','
353                 if worker[0] == worker[1]:
354                     vpp_worker_str += "{}".format(worker[0])
355                 else:
356                     vpp_worker_str += "{}-{}".format(worker[0], worker[1])
357
358             cpu += '  corelist-workers {}\n'.format(vpp_worker_str)
359
360         return cpu
361
362     @staticmethod
363     def _apply_vpp_devices(node):
364         """
365         Apply VPP PCI Device configuration to vpp startup.
366
367         :param node: Node dictionary with cpuinfo.
368         :type node: dict
369         """
370
371         devices = ''
372         ports_per_numa = node['cpu']['ports_per_numa']
373         total_mbufs = node['cpu']['total_mbufs']
374
375         for item in ports_per_numa.items():
376             value = item[1]
377             interfaces = value['interfaces']
378
379             # if 0 was specified for the number of vpp workers, use 1 queue
380             num_rx_queues = None
381             num_tx_queues = None
382             if 'rx_queues' in value:
383                 num_rx_queues = value['rx_queues']
384             if 'tx_queues' in value:
385                 num_tx_queues = value['tx_queues']
386
387             num_rx_desc = None
388             num_tx_desc = None
389
390             # Create the devices string
391             for interface in interfaces:
392                 pci_address = interface['pci_address']
393                 pci_address = pci_address.lstrip("'").rstrip("'")
394                 devices += '\n'
395                 devices += '  dev {} {{ \n'.format(pci_address)
396                 if num_rx_queues:
397                     devices += '    num-rx-queues {}\n'.format(num_rx_queues)
398                 else:
399                     devices += '    num-rx-queues {}\n'.format(1)
400                 if num_tx_queues:
401                     devices += '    num-tx-queues {}\n'.format(num_tx_queues)
402                 if num_rx_desc:
403                     devices += '    num-rx-desc {}\n'.format(num_rx_desc)
404                 if num_tx_desc:
405                     devices += '    num-tx-desc {}\n'.format(num_tx_desc)
406                 devices += '  }'
407
408         # If the total mbufs is not 0 or less than the default, set num-bufs
409         logging.debug("Total mbufs: {}".format(total_mbufs))
410         if total_mbufs is not 0 and total_mbufs > 16384:
411             devices += '\n  num-mbufs {}'.format(total_mbufs)
412
413         return devices
414
415     @staticmethod
416     def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end, total_vpp_workers,
417                           reserve_vpp_main_core):
418         """
419         Calculate the VPP worker information
420
421         :param node: Node dictionary
422         :param vpp_workers: List of VPP workers
423         :param numa_node: Numa node
424         :param other_cpus_end: The end of the cpus allocated for cores
425         other than vpp
426         :param total_vpp_workers: The number of vpp workers needed
427         :param reserve_vpp_main_core: Is there a core needed for
428         the vpp main core
429         :type node: dict
430         :type numa_node: int
431         :type other_cpus_end: int
432         :type total_vpp_workers: int
433         :type reserve_vpp_main_core: bool
434         :returns: Is a core still needed for the vpp main core
435         :rtype: bool
436         """
437
438         # Can we fit the workers in one of these slices
439         cpus = node['cpu']['cpus_per_node'][numa_node]
440         for cpu in cpus:
441             start = cpu[0]
442             end = cpu[1]
443             if start <= other_cpus_end:
444                 start = other_cpus_end + 1
445
446             if reserve_vpp_main_core:
447                 start += 1
448
449             workers_end = start + total_vpp_workers - 1
450
451             if workers_end <= end:
452                 if reserve_vpp_main_core:
453                     node['cpu']['vpp_main_core'] = start - 1
454                 reserve_vpp_main_core = False
455                 if total_vpp_workers:
456                     vpp_workers.append((start, workers_end))
457                 break
458
459         # We still need to reserve the main core
460         if reserve_vpp_main_core:
461             node['cpu']['vpp_main_core'] = other_cpus_end + 1
462
463         return reserve_vpp_main_core
464
465     @staticmethod
466     def _calc_desc_and_queues(total_numa_nodes,
467                               total_ports_per_numa,
468                               total_rx_queues,
469                               ports_per_numa_value):
470         """
471         Calculate the number of descriptors and queues
472
473         :param total_numa_nodes: The total number of numa nodes
474         :param total_ports_per_numa: The total number of ports for this
475         numa node
476         :param total_rx_queues: The total number of rx queues / port
477         :param ports_per_numa_value: The value from the ports_per_numa
478         dictionary
479         :type total_numa_nodes: int
480         :type total_ports_per_numa: int
481         :type total_rx_queues: int
482         :type ports_per_numa_value: dict
483         :returns The total number of message buffers
484         :rtype: int
485         """
486
487         # Get the number of rx queues
488         rx_queues = max(1, total_rx_queues)
489         tx_queues = rx_queues * total_numa_nodes + 1
490
491         # Get the descriptor entries
492         desc_entries = 1024
493         ports_per_numa_value['rx_queues'] = rx_queues
494         total_mbufs = (((rx_queues * desc_entries) +
495                         (tx_queues * desc_entries)) *
496                        total_ports_per_numa)
497         total_mbufs = total_mbufs
498
499         return total_mbufs
500
501     @staticmethod
502     def _create_ports_per_numa(node, interfaces):
503         """
504         Create a dictionary or ports per numa node
505         :param node: Node dictionary
506         :param interfaces: All the interfaces to be used by vpp
507         :type node: dict
508         :type interfaces: dict
509         :returns: The ports per numa dictionary
510         :rtype: dict
511         """
512
513         # Make a list of ports by numa node
514         ports_per_numa = {}
515         for item in interfaces.items():
516             i = item[1]
517             if i['numa_node'] not in ports_per_numa:
518                 ports_per_numa[i['numa_node']] = {'interfaces': []}
519                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
520             else:
521                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
522         node['cpu']['ports_per_numa'] = ports_per_numa
523
524         return ports_per_numa
525
526     def calculate_cpu_parameters(self):
527         """
528         Calculate the cpu configuration.
529
530         """
531
532         # Calculate the cpu parameters, needed for the
533         # vpp_startup and grub configuration
534         for i in self._nodes.items():
535             node = i[1]
536
537             # get total number of nic ports
538             interfaces = node['interfaces']
539
540             # Make a list of ports by numa node
541             ports_per_numa = self._create_ports_per_numa(node, interfaces)
542
543             # Get the number of cpus to skip, we never use the first cpu
544             other_cpus_start = 1
545             other_cpus_end = other_cpus_start + node['cpu']['total_other_cpus'] - 1
546             other_workers = None
547             if other_cpus_end is not 0:
548                 other_workers = (other_cpus_start, other_cpus_end)
549             node['cpu']['other_workers'] = other_workers
550
551             # Allocate the VPP main core and workers
552             vpp_workers = []
553             reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
554             total_vpp_cpus = node['cpu']['total_vpp_cpus']
555             total_rx_queues = node['cpu']['total_rx_queues']
556
557             # If total_vpp_cpus is 0 or is less than the numa nodes with ports
558             #  then we shouldn't get workers
559             total_workers_node = 0
560             if len(ports_per_numa):
561                 total_workers_node = total_vpp_cpus / len(ports_per_numa)
562             total_main = 0
563             if reserve_vpp_main_core:
564                 total_main = 1
565             total_mbufs = 0
566             if total_main + total_workers_node is not 0:
567                 for item in ports_per_numa.items():
568                     numa_node = item[0]
569                     value = item[1]
570
571                     # Get the number of descriptors and queues
572                     mbufs = self._calc_desc_and_queues(len(ports_per_numa),
573                                                        len(value['interfaces']), total_rx_queues, value)
574                     total_mbufs += mbufs
575
576                     # Get the VPP workers
577                     reserve_vpp_main_core = self._calc_vpp_workers(node, vpp_workers, numa_node,
578                                                                    other_cpus_end, total_workers_node,
579                                                                    reserve_vpp_main_core)
580
581                 total_mbufs *= 2.5
582                 total_mbufs = int(total_mbufs)
583             else:
584                 total_mbufs = 0
585
586             # Save the info
587             node['cpu']['vpp_workers'] = vpp_workers
588             node['cpu']['total_mbufs'] = total_mbufs
589
590         # Write the config
591         self.updateconfig()
592
593     @staticmethod
594     def _apply_vpp_tcp(node):
595         """
596         Apply the VPP Unix config
597
598         :param node: Node dictionary with cpuinfo.
599         :type node: dict
600         """
601
602         active_open_sessions = node['tcp']['active_open_sessions']
603         aos = int(active_open_sessions)
604
605         passive_open_sessions = node['tcp']['passive_open_sessions']
606         pos = int(passive_open_sessions)
607
608         # Generate the api-segment gid vpp sheit in any case
609         if (aos + pos) == 0:
610             tcp = "api-segment {\n"
611             tcp = tcp + "  gid vpp\n"
612             tcp = tcp + "}\n"
613             return tcp.rstrip('\n')
614
615         tcp = "# TCP stack-related configuration parameters\n"
616         tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos)
617         tcp = tcp + "heapsize 4g\n\n"
618         tcp = tcp + "api-segment {\n"
619         tcp = tcp + "  global-size 2000M\n"
620         tcp = tcp + "  api-size 1G\n"
621         tcp = tcp + "}\n\n"
622
623         tcp = tcp + "session {\n"
624         tcp = tcp + "  event-queue-length " + "{:d}".format(aos + pos) + "\n"
625         tcp = tcp + "  preallocated-sessions " + "{:d}".format(aos + pos) + "\n"
626         tcp = tcp + "  v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
627         tcp = tcp + "  v4-session-table-memory 3g\n"
628         if aos > 0:
629             tcp = tcp + "  v4-halfopen-table-buckets " + \
630                   "{:d}".format((aos + pos) / 4) + "\n"
631             tcp = tcp + "  v4-halfopen-table-memory 3g\n"
632             tcp = tcp + "  local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
633             tcp = tcp + "  local-endpoints-table-memory 3g\n"
634         tcp = tcp + "}\n\n"
635
636         tcp = tcp + "tcp {\n"
637         tcp = tcp + "  preallocated-connections " + "{:d}".format(aos + pos) + "\n"
638         if aos > 0:
639             tcp = tcp + "  preallocated-half-open-connections " + "{:d}".format(aos) + "\n"
640         tcp = tcp + "}\n\n"
641
642         return tcp.rstrip('\n')
643
644     def apply_vpp_startup(self):
645         """
646         Apply the vpp startup configration
647
648         """
649
650         # Apply the VPP startup configruation
651         for i in self._nodes.items():
652             node = i[1]
653
654             # Get the startup file
655             rootdir = node['rootdir']
656             sfile = rootdir + node['vpp']['startup_config_file']
657
658             # Get the devices
659             devices = self._apply_vpp_devices(node)
660
661             # Get the CPU config
662             cpu = self._apply_vpp_cpu(node)
663
664             # Get the unix config
665             unix = self._apply_vpp_unix(node)
666
667             # Get the TCP configuration, if any
668             tcp = self._apply_vpp_tcp(node)
669
670             # Make a backup if needed
671             self._autoconfig_backup_file(sfile)
672
673             # Get the template
674             tfile = sfile + '.template'
675             (ret, stdout, stderr) = \
676                 VPPUtil.exec_command('cat {}'.format(tfile))
677             if ret != 0:
678                 raise RuntimeError('Executing cat command failed to node {}'.
679                                    format(node['host']))
680             startup = stdout.format(unix=unix,
681                                     cpu=cpu,
682                                     devices=devices,
683                                     tcp=tcp)
684
685             (ret, stdout, stderr) = \
686                 VPPUtil.exec_command('rm {}'.format(sfile))
687             if ret != 0:
688                 logging.debug(stderr)
689
690             cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
691             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
692             if ret != 0:
693                 raise RuntimeError('Writing config failed node {}'.
694                                    format(node['host']))
695
696     def apply_grub_cmdline(self):
697         """
698         Apply the grub cmdline
699
700         """
701
702         for i in self._nodes.items():
703             node = i[1]
704
705             # Get the isolated CPUs
706             other_workers = node['cpu']['other_workers']
707             vpp_workers = node['cpu']['vpp_workers']
708             if 'vpp_main_core' in node['cpu']:
709                 vpp_main_core = node['cpu']['vpp_main_core']
710             else:
711                 vpp_main_core = 0
712             all_workers = []
713             if other_workers is not None:
714                 all_workers = [other_workers]
715             if vpp_main_core is not 0:
716                 all_workers += [(vpp_main_core, vpp_main_core)]
717             all_workers += vpp_workers
718             isolated_cpus = ''
719             for idx, worker in enumerate(all_workers):
720                 if worker is None:
721                     continue
722                 if idx > 0:
723                     isolated_cpus += ','
724                 if worker[0] == worker[1]:
725                     isolated_cpus += "{}".format(worker[0])
726                 else:
727                     isolated_cpus += "{}-{}".format(worker[0], worker[1])
728
729             vppgrb = VppGrubUtil(node)
730             current_cmdline = vppgrb.get_current_cmdline()
731             if 'grub' not in node:
732                 node['grub'] = {}
733             node['grub']['current_cmdline'] = current_cmdline
734             node['grub']['default_cmdline'] = \
735                 vppgrb.apply_cmdline(node, isolated_cpus)
736
737         self.updateconfig()
738
739     def get_hugepages(self):
740         """
741         Get the hugepage configuration
742
743         """
744
745         for i in self._nodes.items():
746             node = i[1]
747
748             hpg = VppHugePageUtil(node)
749             max_map_count, shmmax = hpg.get_huge_page_config()
750             node['hugepages']['max_map_count'] = max_map_count
751             node['hugepages']['shmax'] = shmmax
752             total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
753             node['hugepages']['actual_total'] = total
754             node['hugepages']['free'] = free
755             node['hugepages']['size'] = size
756             node['hugepages']['memtotal'] = memtotal
757             node['hugepages']['memfree'] = memfree
758
759         self.updateconfig()
760
761     def get_grub(self):
762         """
763         Get the grub configuration
764
765         """
766
767         for i in self._nodes.items():
768             node = i[1]
769
770             vppgrb = VppGrubUtil(node)
771             current_cmdline = vppgrb.get_current_cmdline()
772             default_cmdline = vppgrb.get_default_cmdline()
773
774             # Get the total number of isolated CPUs
775             current_iso_cpus = 0
776             iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
777             iso_cpurl = len(iso_cpur)
778             if iso_cpurl > 0:
779                 iso_cpu_str = iso_cpur[0]
780                 iso_cpu_str = iso_cpu_str.split('=')[1]
781                 iso_cpul = iso_cpu_str.split(',')
782                 for iso_cpu in iso_cpul:
783                     isocpuspl = iso_cpu.split('-')
784                     if len(isocpuspl) is 1:
785                         current_iso_cpus += 1
786                     else:
787                         first = int(isocpuspl[0])
788                         second = int(isocpuspl[1])
789                         if first == second:
790                             current_iso_cpus += 1
791                         else:
792                             current_iso_cpus += second - first
793
794             if 'grub' not in node:
795                 node['grub'] = {}
796             node['grub']['current_cmdline'] = current_cmdline
797             node['grub']['default_cmdline'] = default_cmdline
798             node['grub']['current_iso_cpus'] = current_iso_cpus
799
800         self.updateconfig()
801
802     @staticmethod
803     def _get_device(node):
804         """
805         Get the device configuration for a single node
806
807         :param node: Node dictionary with cpuinfo.
808         :type node: dict
809
810         """
811
812         vpp = VppPCIUtil(node)
813         vpp.get_all_devices()
814
815         # Save the device information
816         node['devices'] = {}
817         node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
818         node['devices']['kernel_devices'] = vpp.get_kernel_devices()
819         node['devices']['other_devices'] = vpp.get_other_devices()
820         node['devices']['linkup_devices'] = vpp.get_link_up_devices()
821
822     def get_devices_per_node(self):
823         """
824         Get the device configuration for all the nodes
825
826         """
827
828         for i in self._nodes.items():
829             node = i[1]
830             # Update the interface data
831
832             self._get_device(node)
833
834         self.updateconfig()
835
836     @staticmethod
837     def get_cpu_layout(node):
838         """
839         Get the cpu layout
840
841         using lscpu -p get the cpu layout.
842         Returns a list with each item representing a single cpu.
843
844         :param node: Node dictionary.
845         :type node: dict
846         :returns: The cpu layout
847         :rtype: list
848         """
849
850         cmd = 'lscpu -p'
851         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
852         if ret != 0:
853             raise RuntimeError('{} failed on node {} {}'.
854                                format(cmd, node['host'], stderr))
855
856         pcpus = []
857         lines = stdout.split('\n')
858         for line in lines:
859             if line == '' or line[0] == '#':
860                 continue
861             linesplit = line.split(',')
862             layout = {'cpu': linesplit[0], 'core': linesplit[1],
863                       'socket': linesplit[2], 'node': linesplit[3]}
864
865             # cpu, core, socket, node
866             pcpus.append(layout)
867
868         return pcpus
869
870     def get_cpu(self):
871         """
872         Get the cpu configuration
873
874         """
875
876         # Get the CPU layout
877         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
878
879         for i in self._nodes.items():
880             node = i[1]
881
882             # Get the cpu layout
883             layout = self.get_cpu_layout(node)
884             node['cpu']['layout'] = layout
885
886             cpuinfo = node['cpuinfo']
887             smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
888             node['cpu']['smt_enabled'] = smt_enabled
889
890             # We don't want to write the cpuinfo
891             node['cpuinfo'] = ""
892
893         # Write the config
894         self.updateconfig()
895
896     def discover(self):
897         """
898         Get the current system configuration.
899
900         """
901
902         # Get the Huge Page configuration
903         self.get_hugepages()
904
905         # Get the device configuration
906         self.get_devices_per_node()
907
908         # Get the CPU configuration
909         self.get_cpu()
910
911         # Get the current grub cmdline
912         self.get_grub()
913
914     def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
915         """
916         Ask the user questions related to the cpu configuration.
917
918         :param node: Node dictionary
919         :param total_cpus: The total number of cpus in the system
920         :param numa_nodes: The list of numa nodes in the system
921         :type node: dict
922         :type total_cpus: int
923         :type numa_nodes: list
924         """
925
926         print "\nYour system has {} core(s) and {} Numa Nodes.". \
927             format(total_cpus, len(numa_nodes))
928         print "To begin, we suggest not reserving any cores for VPP or other processes."
929         print "Then to improve performance start reserving cores and adding queues as needed. "
930
931         max_vpp_cpus = 4
932         total_vpp_cpus = 0
933         if max_vpp_cpus > 0:
934             question = "\nHow many core(s) shall we reserve for VPP [0-{}][0]? ".format(max_vpp_cpus)
935             total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
936             node['cpu']['total_vpp_cpus'] = total_vpp_cpus
937
938         max_other_cores = (total_cpus - total_vpp_cpus) / 2
939         question = 'How many core(s) do you want to reserve for processes other than VPP? [0-{}][0]? '. \
940             format(str(max_other_cores))
941         total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
942         node['cpu']['total_other_cpus'] = total_other_cpus
943
944         max_main_cpus = max_vpp_cpus + 1 - total_vpp_cpus
945         reserve_vpp_main_core = False
946         if max_main_cpus > 0:
947             question = "Should we reserve 1 core for the VPP Main thread? "
948             question += "[y/N]? "
949             answer = self._ask_user_yn(question, 'n')
950             if answer == 'y':
951                 reserve_vpp_main_core = True
952             node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
953             node['cpu']['vpp_main_core'] = 0
954
955         question = "How many RX queues per port shall we use for VPP [1-4][1]? ". \
956             format(max_vpp_cpus)
957         total_rx_queues = self._ask_user_range(question, 1, 4, 1)
958         node['cpu']['total_rx_queues'] = total_rx_queues
959
960     def modify_cpu(self, ask_questions=True):
961         """
962         Modify the cpu configuration, asking for the user for the values.
963
964         :param ask_questions: When true ask the user for config parameters
965
966         """
967
968         # Get the CPU layout
969         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
970
971         for i in self._nodes.items():
972             node = i[1]
973             total_cpus = 0
974             total_cpus_per_slice = 0
975             cpus_per_node = {}
976             numa_nodes = []
977             cores = []
978             cpu_layout = self.get_cpu_layout(node)
979
980             # Assume the number of cpus per slice is always the same as the
981             # first slice
982             first_node = '0'
983             for cpu in cpu_layout:
984                 if cpu['node'] != first_node:
985                     break
986                 total_cpus_per_slice += 1
987
988             # Get the total number of cpus, cores, and numa nodes from the
989             # cpu layout
990             for cpul in cpu_layout:
991                 numa_node = cpul['node']
992                 core = cpul['core']
993                 cpu = cpul['cpu']
994                 total_cpus += 1
995
996                 if numa_node not in cpus_per_node:
997                     cpus_per_node[numa_node] = []
998                 cpuperslice = int(cpu) % total_cpus_per_slice
999                 if cpuperslice == 0:
1000                     cpus_per_node[numa_node].append((int(cpu), int(cpu) +
1001                                                      total_cpus_per_slice - 1))
1002                 if numa_node not in numa_nodes:
1003                     numa_nodes.append(numa_node)
1004                 if core not in cores:
1005                     cores.append(core)
1006             node['cpu']['cpus_per_node'] = cpus_per_node
1007
1008             # Ask the user some questions
1009             if ask_questions and total_cpus >= 8:
1010                 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1011
1012             # Populate the interfaces with the numa node
1013             if 'interfaces' in node:
1014                 ikeys = node['interfaces'].keys()
1015                 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1016
1017             # We don't want to write the cpuinfo
1018             node['cpuinfo'] = ""
1019
1020         # Write the configs
1021         self._update_auto_config()
1022         self.updateconfig()
1023
1024     def _modify_other_devices(self, node,
1025                               other_devices, kernel_devices, dpdk_devices):
1026         """
1027         Modify the devices configuration, asking for the user for the values.
1028
1029         """
1030
1031         odevices_len = len(other_devices)
1032         if odevices_len > 0:
1033             print "\nThese device(s) are currently NOT being used",
1034             print "by VPP or the OS.\n"
1035             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1036             question = "\nWould you like to give any of these devices"
1037             question += " back to the OS [Y/n]? "
1038             answer = self._ask_user_yn(question, 'Y')
1039             if answer == 'y':
1040                 vppd = {}
1041                 for dit in other_devices.items():
1042                     dvid = dit[0]
1043                     device = dit[1]
1044                     question = "Would you like to use device {} for". \
1045                         format(dvid)
1046                     question += " the OS [y/N]? "
1047                     answer = self._ask_user_yn(question, 'n')
1048                     if answer == 'y':
1049                         if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1050                             driver = device['unused'][0]
1051                             VppPCIUtil.bind_vpp_device(node, driver, dvid)
1052                         else:
1053                             logging.debug('Could not bind device {}'.format(dvid))
1054                         vppd[dvid] = device
1055                 for dit in vppd.items():
1056                     dvid = dit[0]
1057                     device = dit[1]
1058                     kernel_devices[dvid] = device
1059                     del other_devices[dvid]
1060
1061         odevices_len = len(other_devices)
1062         if odevices_len > 0:
1063             print "\nThese device(s) are still NOT being used ",
1064             print "by VPP or the OS.\n"
1065             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1066             question = "\nWould you like use any of these for VPP [y/N]? "
1067             answer = self._ask_user_yn(question, 'N')
1068             if answer == 'y':
1069                 vppd = {}
1070                 for dit in other_devices.items():
1071                     dvid = dit[0]
1072                     device = dit[1]
1073                     question = "Would you like to use device {} ".format(dvid)
1074                     question += "for VPP [y/N]? "
1075                     answer = self._ask_user_yn(question, 'n')
1076                     if answer == 'y':
1077                         vppd[dvid] = device
1078                 for dit in vppd.items():
1079                     dvid = dit[0]
1080                     device = dit[1]
1081                     if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1082                         driver = device['unused'][0]
1083                         logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1084                         VppPCIUtil.bind_vpp_device(node, driver, dvid)
1085                     else:
1086                         logging.debug('Could not bind device {}'.format(dvid))
1087                     dpdk_devices[dvid] = device
1088                     del other_devices[dvid]
1089
1090     def update_interfaces_config(self):
1091         """
1092         Modify the interfaces directly from the config file.
1093
1094         """
1095
1096         for i in self._nodes.items():
1097             node = i[1]
1098             devices = node['devices']
1099             all_devices = devices['other_devices']
1100             all_devices.update(devices['dpdk_devices'])
1101             all_devices.update(devices['kernel_devices'])
1102
1103             current_ifcs = {}
1104             interfaces = {}
1105             if 'interfaces' in node:
1106                 current_ifcs = node['interfaces']
1107             if current_ifcs:
1108                 for ifc in current_ifcs.values():
1109                     dvid = ifc['pci_address']
1110                     if dvid in all_devices:
1111                         VppPCIUtil.vpp_create_interface(interfaces, dvid,
1112                                                         all_devices[dvid])
1113             node['interfaces'] = interfaces
1114
1115         self.updateconfig()
1116
1117     def modify_devices(self):
1118         """
1119         Modify the devices configuration, asking for the user for the values.
1120
1121         """
1122
1123         for i in self._nodes.items():
1124             node = i[1]
1125             devices = node['devices']
1126             other_devices = devices['other_devices']
1127             kernel_devices = devices['kernel_devices']
1128             dpdk_devices = devices['dpdk_devices']
1129
1130             if other_devices:
1131                 self._modify_other_devices(node, other_devices,
1132                                            kernel_devices, dpdk_devices)
1133
1134                 # Get the devices again for this node
1135                 self._get_device(node)
1136                 devices = node['devices']
1137                 kernel_devices = devices['kernel_devices']
1138                 dpdk_devices = devices['dpdk_devices']
1139
1140             klen = len(kernel_devices)
1141             if klen > 0:
1142                 print "\nThese devices have kernel interfaces, but",
1143                 print "appear to be safe to use with VPP.\n"
1144                 VppPCIUtil.show_vpp_devices(kernel_devices)
1145                 question = "\nWould you like to use any of these "
1146                 question += "device(s) for VPP [y/N]? "
1147                 answer = self._ask_user_yn(question, 'n')
1148                 if answer == 'y':
1149                     vppd = {}
1150                     for dit in kernel_devices.items():
1151                         dvid = dit[0]
1152                         device = dit[1]
1153                         question = "Would you like to use device {} ". \
1154                             format(dvid)
1155                         question += "for VPP [y/N]? "
1156                         answer = self._ask_user_yn(question, 'n')
1157                         if answer == 'y':
1158                             vppd[dvid] = device
1159                     for dit in vppd.items():
1160                         dvid = dit[0]
1161                         device = dit[1]
1162                         if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1163                             driver = device['unused'][0]
1164                             logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1165                             VppPCIUtil.bind_vpp_device(node, driver, dvid)
1166                         else:
1167                             logging.debug('Could not bind device {}'.format(dvid))
1168                         dpdk_devices[dvid] = device
1169                         del kernel_devices[dvid]
1170
1171             dlen = len(dpdk_devices)
1172             if dlen > 0:
1173                 print "\nThese device(s) will be used by VPP.\n"
1174                 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1175                 question = "\nWould you like to remove any of "
1176                 question += "these device(s) [y/N]? "
1177                 answer = self._ask_user_yn(question, 'n')
1178                 if answer == 'y':
1179                     vppd = {}
1180                     for dit in dpdk_devices.items():
1181                         dvid = dit[0]
1182                         device = dit[1]
1183                         question = "Would you like to remove {} [y/N]? ". \
1184                             format(dvid)
1185                         answer = self._ask_user_yn(question, 'n')
1186                         if answer == 'y':
1187                             vppd[dvid] = device
1188                     for dit in vppd.items():
1189                         dvid = dit[0]
1190                         device = dit[1]
1191                         if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1192                             driver = device['unused'][0]
1193                             logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1194                             VppPCIUtil.bind_vpp_device(node, driver, dvid)
1195                         else:
1196                             logging.debug('Could not bind device {}'.format(dvid))
1197                         kernel_devices[dvid] = device
1198                         del dpdk_devices[dvid]
1199
1200             interfaces = {}
1201             for dit in dpdk_devices.items():
1202                 dvid = dit[0]
1203                 device = dit[1]
1204                 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1205             node['interfaces'] = interfaces
1206
1207             print "\nThese device(s) will be used by VPP, please",
1208             print "rerun this option if this is incorrect.\n"
1209             VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1210
1211         self._update_auto_config()
1212         self.updateconfig()
1213
1214     def modify_huge_pages(self):
1215         """
1216         Modify the huge page configuration, asking for the user for the values.
1217
1218         """
1219
1220         for i in self._nodes.items():
1221             node = i[1]
1222
1223             total = node['hugepages']['actual_total']
1224             free = node['hugepages']['free']
1225             size = node['hugepages']['size']
1226             memfree = node['hugepages']['memfree'].split(' ')[0]
1227             hugesize = int(size.split(' ')[0])
1228             # The max number of huge pages should be no more than
1229             # 70% of total free memory
1230             maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize
1231             print "\nThere currently {} {} huge pages free.". \
1232                 format(free, size)
1233             question = "Do you want to reconfigure the number of "
1234             question += "huge pages [y/N]? "
1235             answer = self._ask_user_yn(question, 'n')
1236             if answer == 'n':
1237                 node['hugepages']['total'] = total
1238                 continue
1239
1240             print "\nThere currently a total of {} huge pages.". \
1241                 format(total)
1242             question = "How many huge pages do you want [{} - {}][{}]? ". \
1243                 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1244             answer = self._ask_user_range(question, 1024, maxpages, 1024)
1245             node['hugepages']['total'] = str(answer)
1246
1247         # Update auto-config.yaml
1248         self._update_auto_config()
1249
1250         # Rediscover just the hugepages
1251         self.get_hugepages()
1252
1253     def get_tcp_params(self):
1254         """
1255         Get the tcp configuration
1256
1257         """
1258         # maybe nothing to do here?
1259         self.updateconfig()
1260
1261     def acquire_tcp_params(self):
1262         """
1263         Ask the user for TCP stack configuration parameters
1264
1265         """
1266
1267         for i in self._nodes.items():
1268             node = i[1]
1269
1270             question = "\nHow many active-open / tcp client sessions are expected "
1271             question = question + "[0-10000000][0]? "
1272             answer = self._ask_user_range(question, 0, 10000000, 0)
1273             # Less than 10K is equivalent to 0
1274             if int(answer) < 10000:
1275                 answer = 0
1276             node['tcp']['active_open_sessions'] = answer
1277
1278             question = "How many passive-open / tcp server sessions are expected "
1279             question = question + "[0-10000000][0]? "
1280             answer = self._ask_user_range(question, 0, 10000000, 0)
1281             # Less than 10K is equivalent to 0
1282             if int(answer) < 10000:
1283                 answer = 0
1284             node['tcp']['passive_open_sessions'] = answer
1285
1286         # Update auto-config.yaml
1287         self._update_auto_config()
1288
1289         # Rediscover tcp parameters
1290         self.get_tcp_params()
1291
1292     @staticmethod
1293     def patch_qemu(node):
1294         """
1295         Patch qemu with the correct patches.
1296
1297         :param node: Node dictionary
1298         :type node: dict
1299         """
1300
1301         print '\nWe are patching the node "{}":\n'.format(node['host'])
1302         QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1303
1304     @staticmethod
1305     def cpu_info(node):
1306         """
1307         print the CPU information
1308
1309         """
1310
1311         cpu = CpuUtils.get_cpu_info_per_node(node)
1312
1313         item = 'Model name'
1314         if item in cpu:
1315             print "{:>20}:    {}".format(item, cpu[item])
1316         item = 'CPU(s)'
1317         if item in cpu:
1318             print "{:>20}:    {}".format(item, cpu[item])
1319         item = 'Thread(s) per core'
1320         if item in cpu:
1321             print "{:>20}:    {}".format(item, cpu[item])
1322         item = 'Core(s) per socket'
1323         if item in cpu:
1324             print "{:>20}:    {}".format(item, cpu[item])
1325         item = 'Socket(s)'
1326         if item in cpu:
1327             print "{:>20}:    {}".format(item, cpu[item])
1328         item = 'NUMA node(s)'
1329         numa_nodes = 0
1330         if item in cpu:
1331             numa_nodes = int(cpu[item])
1332         for i in xrange(0, numa_nodes):
1333             item = "NUMA node{} CPU(s)".format(i)
1334             print "{:>20}:    {}".format(item, cpu[item])
1335         item = 'CPU max MHz'
1336         if item in cpu:
1337             print "{:>20}:    {}".format(item, cpu[item])
1338         item = 'CPU min MHz'
1339         if item in cpu:
1340             print "{:>20}:    {}".format(item, cpu[item])
1341
1342         if node['cpu']['smt_enabled']:
1343             smt = 'Enabled'
1344         else:
1345             smt = 'Disabled'
1346         print "{:>20}:    {}".format('SMT', smt)
1347
1348         # VPP Threads
1349         print "\nVPP Threads: (Name: Cpu Number)"
1350         vpp_processes = cpu['vpp_processes']
1351         for i in vpp_processes.items():
1352             print "  {:10}: {:4}".format(i[0], i[1])
1353
1354     @staticmethod
1355     def device_info(node):
1356         """
1357         Show the device information.
1358
1359         """
1360
1361         if 'cpu' in node and 'total_mbufs' in node['cpu']:
1362             total_mbufs = node['cpu']['total_mbufs']
1363             if total_mbufs is not 0:
1364                 print "Total Number of Buffers: {}".format(total_mbufs)
1365
1366         vpp = VppPCIUtil(node)
1367         vpp.get_all_devices()
1368         linkup_devs = vpp.get_link_up_devices()
1369         if len(linkup_devs):
1370             print ("\nDevices with link up (can not be used with VPP):")
1371             vpp.show_vpp_devices(linkup_devs, show_header=False)
1372             # for dev in linkup_devs:
1373             #    print ("    " + dev)
1374         kernel_devs = vpp.get_kernel_devices()
1375         if len(kernel_devs):
1376             print ("\nDevices bound to kernel drivers:")
1377             vpp.show_vpp_devices(kernel_devs, show_header=False)
1378         else:
1379             print ("\nNo devices bound to kernel drivers")
1380
1381         dpdk_devs = vpp.get_dpdk_devices()
1382         if len(dpdk_devs):
1383             print ("\nDevices bound to DPDK drivers:")
1384             vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1385                                  show_header=False)
1386         else:
1387             print ("\nNo devices bound to DPDK drivers")
1388
1389         vpputl = VPPUtil()
1390         interfaces = vpputl.get_hardware(node)
1391         if interfaces == {}:
1392             return
1393
1394         print ("\nDevices in use by VPP:")
1395
1396         if len(interfaces.items()) < 2:
1397             print ("None")
1398             return
1399
1400         print "{:30} {:6} {:4} {:7} {:4} {:7}". \
1401             format('Name', 'Socket', 'RXQs',
1402                    'RXDescs', 'TXQs', 'TXDescs')
1403         for intf in sorted(interfaces.items()):
1404             name = intf[0]
1405             value = intf[1]
1406             if name == 'local0':
1407                 continue
1408             socket = rx_qs = rx_ds = tx_qs = tx_ds = ''
1409             if 'cpu socket' in value:
1410                 socket = int(value['cpu socket'])
1411             if 'rx queues' in value:
1412                 rx_qs = int(value['rx queues'])
1413             if 'rx descs' in value:
1414                 rx_ds = int(value['rx descs'])
1415             if 'tx queues' in value:
1416                 tx_qs = int(value['tx queues'])
1417             if 'tx descs' in value:
1418                 tx_ds = int(value['tx descs'])
1419
1420             print ("{:30} {:>6} {:>4} {:>7} {:>4} {:>7}".
1421                    format(name, socket, rx_qs, rx_ds, tx_qs, tx_ds))
1422
1423     @staticmethod
1424     def hugepage_info(node):
1425         """
1426         Show the huge page information.
1427
1428         """
1429
1430         hpg = VppHugePageUtil(node)
1431         hpg.show_huge_pages()
1432
1433     @staticmethod
1434     def min_system_resources(node):
1435         """
1436         Check the system for basic minimum resources, return true if
1437         there is enough.
1438
1439         :returns: boolean
1440         :rtype: dict
1441         """
1442
1443         min_sys_res = True
1444
1445         # CPUs
1446         if 'layout' in node['cpu']:
1447             total_cpus = len(node['cpu']['layout'])
1448             if total_cpus < 2:
1449                 print "\nThere is only {} CPU(s) available on this system.".format(total_cpus)
1450                 print "This is not enough to run VPP."
1451                 min_sys_res = False
1452
1453         # System Memory
1454         if 'free' in node['hugepages'] and \
1455                 'memfree' in node['hugepages'] and \
1456                 'size' in node['hugepages']:
1457             free = node['hugepages']['free']
1458             memfree = float(node['hugepages']['memfree'].split(' ')[0])
1459             hugesize = float(node['hugepages']['size'].split(' ')[0])
1460
1461             memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1462             percentmemhugepages = (memhugepages / memfree) * 100
1463             if free is '0' and \
1464                     percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1465                 print "\nThe System has only {} of free memory.".format(int(memfree))
1466                 print "You will not be able to allocate enough Huge Pages for VPP."
1467                 min_sys_res = False
1468
1469         return min_sys_res
1470
1471     def sys_info(self):
1472         """
1473         Print the system information
1474
1475         """
1476
1477         for i in self._nodes.items():
1478             print "\n=============================="
1479             name = i[0]
1480             node = i[1]
1481
1482             print "NODE: {}\n".format(name)
1483
1484             # CPU
1485             print "CPU:"
1486             self.cpu_info(node)
1487
1488             # Grub
1489             print "\nGrub Command Line:"
1490             if 'grub' in node:
1491                 print \
1492                     "  Current: {}".format(
1493                         node['grub']['current_cmdline'])
1494                 print \
1495                     "  Configured: {}".format(
1496                         node['grub']['default_cmdline'])
1497
1498             # Huge Pages
1499             print "\nHuge Pages:"
1500             self.hugepage_info(node)
1501
1502             # Devices
1503             print "\nDevices:"
1504             self.device_info(node)
1505
1506             # Status
1507             print "\nVPP Service Status:"
1508             state, errors = VPPUtil.status(node)
1509             print "  {}".format(state)
1510             for e in errors:
1511                 print "  {}".format(e)
1512
1513             # Minimum system resources
1514             self.min_system_resources(node)
1515
1516             print "\n=============================="
1517
1518     def _ipv4_interface_setup_questions(self, node):
1519         """
1520         Ask the user some questions and get a list of interfaces
1521         and IPv4 addresses associated with those interfaces
1522
1523         :param node: Node dictionary.
1524         :type node: dict
1525         :returns: A list or interfaces with ip addresses
1526         :rtype: dict
1527         """
1528
1529         vpputl = VPPUtil()
1530         interfaces = vpputl.get_hardware(node)
1531         if interfaces == {}:
1532             return
1533
1534         interfaces_with_ip = []
1535         for intf in sorted(interfaces.items()):
1536             name = intf[0]
1537             if name == 'local0':
1538                 continue
1539
1540             question = "Would you like add address to interface {} [Y/n]? ".format(name)
1541             answer = self._ask_user_yn(question, 'y')
1542             if answer == 'y':
1543                 address = {}
1544                 addr = self._ask_user_ipv4()
1545                 address['name'] = name
1546                 address['addr'] = addr
1547                 interfaces_with_ip.append(address)
1548
1549         return interfaces_with_ip
1550
1551     def ipv4_interface_setup(self):
1552         """
1553         After asking the user some questions, get a list of interfaces
1554         and IPv4 addresses associated with those interfaces
1555
1556         """
1557
1558         for i in self._nodes.items():
1559             node = i[1]
1560
1561             # Show the current interfaces with IP addresses
1562             current_ints = VPPUtil.get_int_ip(node)
1563             if current_ints is not {}:
1564                 print ("\nThese are the current interfaces with IP addresses:")
1565                 for items in sorted(current_ints.items()):
1566                     name = items[0]
1567                     value = items[1]
1568                     if 'address' not in value:
1569                         address = 'Not Set'
1570                     else:
1571                         address = value['address']
1572                     print ("{:30} {:20} {:10}".format(name, address, value['state']))
1573                 question = "\nWould you like to keep this configuration [Y/n]? "
1574                 answer = self._ask_user_yn(question, 'y')
1575                 if answer == 'y':
1576                     continue
1577             else:
1578                 print ("\nThere are currently no interfaces with IP addresses.")
1579
1580             # Create a script that add the ip addresses to the interfaces
1581             # and brings the interfaces up
1582             ints_with_addrs = self._ipv4_interface_setup_questions(node)
1583             content = ''
1584             for ints in ints_with_addrs:
1585                 name = ints['name']
1586                 addr = ints['addr']
1587                 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1588                 setintupstr = 'set int state {} up\n'.format(name)
1589                 content += setipstr + setintupstr
1590
1591             # Write the content to the script
1592             rootdir = node['rootdir']
1593             filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1594             with open(filename, 'w+') as sfile:
1595                 sfile.write(content)
1596
1597             # Execute the script
1598             cmd = 'vppctl exec {}'.format(filename)
1599             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1600             if ret != 0:
1601                 logging.debug(stderr)
1602
1603             print("\nA script as been created at {}".format(filename))
1604             print("This script can be run using the following:")
1605             print("vppctl exec {}\n".format(filename))
1606
1607     def _create_vints_questions(self, node):
1608         """
1609         Ask the user some questions and get a list of interfaces
1610         and IPv4 addresses associated with those interfaces
1611
1612         :param node: Node dictionary.
1613         :type node: dict
1614         :returns: A list or interfaces with ip addresses
1615         :rtype: list
1616         """
1617
1618         vpputl = VPPUtil()
1619         interfaces = vpputl.get_hardware(node)
1620         if interfaces == {}:
1621             return []
1622
1623         # First delete all the Virtual interfaces
1624         for intf in sorted(interfaces.items()):
1625             name = intf[0]
1626             if name[:7] == 'Virtual':
1627                 cmd = 'vppctl delete vhost-user {}'.format(name)
1628                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1629                 if ret != 0:
1630                     logging.debug('{} failed on node {} {}'.format(
1631                         cmd, node['host'], stderr))
1632
1633         # Create a virtual interface, for each interface the user wants to use
1634         interfaces = vpputl.get_hardware(node)
1635         if interfaces == {}:
1636             return []
1637         interfaces_with_virtual_interfaces = []
1638         inum = 1
1639         for intf in sorted(interfaces.items()):
1640             name = intf[0]
1641             if name == 'local0':
1642                 continue
1643
1644             question = "Would you like connect this interface {} to the VM [Y/n]? ".format(name)
1645             answer = self._ask_user_yn(question, 'y')
1646             if answer == 'y':
1647                 sockfilename = '/var/run/vpp/{}.sock'.format(name.replace('/', '_'))
1648                 if os.path.exists(sockfilename):
1649                     os.remove(sockfilename)
1650                 cmd = 'vppctl create vhost-user socket {} server'.format(sockfilename)
1651                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1652                 if ret != 0:
1653                     raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1654                 vintname = stdout.rstrip('\r\n')
1655
1656                 cmd = 'chmod 777 {}'.format(sockfilename)
1657                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1658                 if ret != 0:
1659                     raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1660
1661                 interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1662                              'bridge': '{}'.format(inum)}
1663                 inum += 1
1664                 interfaces_with_virtual_interfaces.append(interface)
1665
1666         return interfaces_with_virtual_interfaces
1667
1668     def create_and_bridge_virtual_interfaces(self):
1669         """
1670         After asking the user some questions, create a VM and connect the interfaces
1671         to VPP interfaces
1672
1673         """
1674
1675         for i in self._nodes.items():
1676             node = i[1]
1677
1678             # Show the current bridge and interface configuration
1679             print "\nThis the current bridge configuration:"
1680             VPPUtil.show_bridge(node)
1681             question = "\nWould you like to keep this configuration [Y/n]? "
1682             answer = self._ask_user_yn(question, 'y')
1683             if answer == 'y':
1684                 continue
1685
1686             # Create a script that builds a bridge configuration with physical interfaces
1687             # and virtual interfaces
1688             ints_with_vints = self._create_vints_questions(node)
1689             content = ''
1690             for intf in ints_with_vints:
1691                 vhoststr = 'comment { The following command creates the socket }\n'
1692                 vhoststr += 'comment { and returns a virtual interface }\n'
1693                 vhoststr += 'comment {{ create vhost-user socket /var/run/vpp/sock{}.sock server }}\n'. \
1694                     format(intf['bridge'])
1695
1696                 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1697
1698                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1699                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1700
1701                 # set interface state VirtualEthernet/0/0/0 up
1702                 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1703
1704                 # set interface state VirtualEthernet/0/0/0 down
1705                 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1706
1707                 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1708
1709             # Write the content to the script
1710             rootdir = node['rootdir']
1711             filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1712             with open(filename, 'w+') as sfile:
1713                 sfile.write(content)
1714
1715             # Execute the script
1716             cmd = 'vppctl exec {}'.format(filename)
1717             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1718             if ret != 0:
1719                 logging.debug(stderr)
1720
1721             print("\nA script as been created at {}".format(filename))
1722             print("This script can be run using the following:")
1723             print("vppctl exec {}\n".format(filename))
1724
1725     def _iperf_vm_questions(self, node):
1726         """
1727         Ask the user some questions and get a list of interfaces
1728         and IPv4 addresses associated with those interfaces
1729
1730         :param node: Node dictionary.
1731         :type node: dict
1732         :returns: A list or interfaces with ip addresses
1733         :rtype: list
1734         """
1735
1736         vpputl = VPPUtil()
1737         interfaces = vpputl.get_hardware(node)
1738         if interfaces == {}:
1739             return []
1740
1741         # First delete all the Virtual interfaces
1742         for intf in sorted(interfaces.items()):
1743             name = intf[0]
1744             if name[:7] == 'Virtual':
1745                 cmd = 'vppctl delete vhost-user {}'.format(name)
1746                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1747                 if ret != 0:
1748                     logging.debug('{} failed on node {} {}'.format(
1749                         cmd, node['host'], stderr))
1750
1751         # Create a virtual interface, for each interface the user wants to use
1752         interfaces = vpputl.get_hardware(node)
1753         if interfaces == {}:
1754             return []
1755         interfaces_with_virtual_interfaces = []
1756         inum = 1
1757
1758         while True:
1759             print '\nPlease pick one interface to connect to the iperf VM.'
1760             for intf in sorted(interfaces.items()):
1761                 name = intf[0]
1762                 if name == 'local0':
1763                     continue
1764
1765                 question = "Would you like connect this interface {} to the VM [y/N]? ".format(name)
1766                 answer = self._ask_user_yn(question, 'n')
1767                 if answer == 'y':
1768                     self._sockfilename = '/var/run/vpp/{}.sock'.format(name.replace('/', '_'))
1769                     if os.path.exists(self._sockfilename):
1770                         os.remove(self._sockfilename)
1771                     cmd = 'vppctl create vhost-user socket {} server'.format(self._sockfilename)
1772                     (ret, stdout, stderr) = vpputl.exec_command(cmd)
1773                     if ret != 0:
1774                         raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1775                     vintname = stdout.rstrip('\r\n')
1776
1777                     cmd = 'chmod 777 {}'.format(self._sockfilename)
1778                     (ret, stdout, stderr) = vpputl.exec_command(cmd)
1779                     if ret != 0:
1780                         raise RuntimeError("Couldn't execute the command {}, {}.".format(cmd, stderr))
1781
1782                     interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1783                                  'bridge': '{}'.format(inum)}
1784                     inum += 1
1785                     interfaces_with_virtual_interfaces.append(interface)
1786                     return interfaces_with_virtual_interfaces
1787
1788     def create_and_bridge_iperf_virtual_interface(self):
1789         """
1790         After asking the user some questions, and create and bridge a virtual interface
1791         to be used with iperf VM
1792
1793         """
1794
1795         for i in self._nodes.items():
1796             node = i[1]
1797
1798             # Show the current bridge and interface configuration
1799             print "\nThis the current bridge configuration:"
1800             ifaces = VPPUtil.show_bridge(node)
1801             question = "\nWould you like to keep this configuration [Y/n]? "
1802             answer = self._ask_user_yn(question, 'y')
1803             if answer == 'y':
1804                 self._sockfilename = '/var/run/vpp/{}.sock'.format(ifaces[0]['name'].replace('/', '_'))
1805                 if os.path.exists(self._sockfilename):
1806                     continue
1807
1808             # Create a script that builds a bridge configuration with physical interfaces
1809             # and virtual interfaces
1810             ints_with_vints = self._iperf_vm_questions(node)
1811             content = ''
1812             for intf in ints_with_vints:
1813                 vhoststr = 'comment { The following command creates the socket }\n'
1814                 vhoststr += 'comment { and returns a virtual interface }\n'
1815                 vhoststr += 'comment {{ create vhost-user socket /var/run/vpp/sock{}.sock server }}\n'. \
1816                     format(intf['bridge'])
1817
1818                 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1819
1820                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1821                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1822
1823                 # set interface state VirtualEthernet/0/0/0 up
1824                 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1825
1826                 # set interface state VirtualEthernet/0/0/0 down
1827                 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1828
1829                 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1830
1831             # Write the content to the script
1832             rootdir = node['rootdir']
1833             filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
1834             with open(filename, 'w+') as sfile:
1835                 sfile.write(content)
1836
1837             # Execute the script
1838             cmd = 'vppctl exec {}'.format(filename)
1839             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1840             if ret != 0:
1841                 logging.debug(stderr)
1842
1843             print("\nA script as been created at {}".format(filename))
1844             print("This script can be run using the following:")
1845             print("vppctl exec {}\n".format(filename))
1846
1847     @staticmethod
1848     def destroy_iperf_vm(name):
1849         """
1850         After asking the user some questions, create a VM and connect the interfaces
1851         to VPP interfaces
1852
1853         :param name: The name of the VM to be be destroyed
1854         :type name: str
1855         """
1856
1857         cmd = 'virsh list'
1858         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1859         if ret != 0:
1860             logging.debug(stderr)
1861             raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))
1862
1863         if re.findall(name, stdout):
1864             cmd = 'virsh destroy {}'.format(name)
1865             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1866             if ret != 0:
1867                 logging.debug(stderr)
1868                 raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))
1869
1870     def create_iperf_vm(self, vmname):
1871         """
1872         After asking the user some questions, create a VM and connect the interfaces
1873         to VPP interfaces
1874
1875         """
1876
1877         # Read the iperf VM template file
1878         distro = VPPUtil.get_linux_distro()
1879         if distro[0] == 'Ubuntu':
1880             tfilename = '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(self._rootdir)
1881         else:
1882             tfilename = '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(self._rootdir)
1883
1884         with open(tfilename, 'r') as tfile:
1885             tcontents = tfile.read()
1886         tfile.close()
1887
1888         # Add the variables
1889         imagename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_IMAGE)
1890         isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
1891         tcontents = tcontents.format(vmname=vmname, imagename=imagename, isoname=isoname,
1892                                      vhostsocketname=self._sockfilename)
1893
1894         # Write the xml
1895         ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
1896         with open(ifilename, 'w+') as ifile:
1897             ifile.write(tcontents)
1898         ifile.close()
1899
1900         cmd = 'virsh create {}'.format(ifilename)
1901         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1902         if ret != 0:
1903             logging.debug(stderr)
1904             raise RuntimeError("Couldn't execute the command {} : {}".format(cmd, stderr))