VPP-899: Run VPP under SELinux
[vpp.git] / extras / vpp_config / vpplib / AutoConfig.py
1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library that supports Auto Configuration."""
15
16 import logging
17 import os
18 import re
19 import yaml
20 from netaddr import IPAddress
21
22 from vpplib.VPPUtil import VPPUtil
23 from vpplib.VppPCIUtil import VppPCIUtil
24 from vpplib.VppHugePageUtil import VppHugePageUtil
25 from vpplib.CpuUtils import CpuUtils
26 from vpplib.VppGrubUtil import VppGrubUtil
27 from vpplib.QemuUtils import QemuUtils
28
29 __all__ = ["AutoConfig"]
30
31 # Constants
32 MIN_SYSTEM_CPUS = 2
33 MIN_TOTAL_HUGE_PAGES = 1024
34 MAX_PERCENT_FOR_HUGE_PAGES = 70
35
36
37 class AutoConfig(object):
38     """Auto Configuration Tools"""
39
40     def __init__(self, rootdir, filename, clean=False):
41         """
42         The Auto Configure class.
43
44         :param rootdir: The root directory for all the auto configuration files
45         :param filename: The autoconfiguration file
46         :param clean: When set initialize the nodes from the auto-config file
47         :type rootdir: str
48         :type filename: str
49         :type clean: bool
50         """
51         self._autoconfig_filename = rootdir + filename
52         self._rootdir = rootdir
53         self._metadata = {}
54         self._nodes = {}
55         self._vpp_devices_node = {}
56         self._hugepage_config = ""
57         self._clean = clean
58         self._loadconfig()
59
60     def get_nodes(self):
61         """
62         Returns the nodes dictionary.
63
64         :returns: The nodes
65         :rtype: dictionary
66         """
67
68         return self._nodes
69
70     @staticmethod
71     def _autoconfig_backup_file(filename):
72         """
73         Create a backup file.
74
75         :param filename: The file to backup
76         :type filename: str
77         """
78
79         # Does a copy of the file exist, if not create one
80         ofile = filename + '.orig'
81         (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
82         if ret != 0:
83             logging.debug(stderr)
84             if stdout.strip('\n') != ofile:
85                 cmd = 'sudo cp {} {}'.format(filename, ofile)
86                 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
87                 if ret != 0:
88                     logging.debug(stderr)
89
90     # noinspection PyBroadException
91     @staticmethod
92     def _ask_user_ipv4():
93         """
94         Asks the user for a number within a range.
95         default is returned if return is entered.
96
97         :returns: IP address with cidr
98         :rtype: str
99         """
100
101         while True:
102             answer = raw_input("Please enter the IPv4 Address [n.n.n.n/n]: ")
103             try:
104                 ipinput = answer.split('/')
105                 ipaddr = IPAddress(ipinput[0])
106                 if len(ipinput) > 1:
107                     plen = answer.split('/')[1]
108                 else:
109                     answer = raw_input("Please enter the netmask [n.n.n.n]: ")
110                     plen = IPAddress(answer).netmask_bits()
111                 return '{}/{}'.format(ipaddr, plen)
112             except None:
113                 print "Please enter a valid IPv4 address."
114
115     @staticmethod
116     def _ask_user_range(question, first, last, default):
117         """
118         Asks the user for a number within a range.
119         default is returned if return is entered.
120
121         :param question: Text of a question.
122         :param first: First number in the range
123         :param last: Last number in the range
124         :param default: The value returned when return is entered
125         :type question: string
126         :type first: int
127         :type last: int
128         :type default: int
129         :returns: The answer to the question
130         :rtype: int
131         """
132
133         while True:
134             answer = raw_input(question)
135             if answer == '':
136                 answer = default
137                 break
138             if re.findall(r'[0-9+]', answer):
139                 if int(answer) in range(first, last + 1):
140                     break
141                 else:
142                     print "Please a value between {} and {} or Return.". \
143                         format(first, last)
144             else:
145                 print "Please a number between {} and {} or Return.". \
146                     format(first, last)
147
148         return int(answer)
149
150     @staticmethod
151     def _ask_user_yn(question, default):
152         """
153         Asks the user for a yes or no question.
154
155         :param question: Text of a question.
156         :param default: The value returned when return is entered
157         :type question: string
158         :type default: string
159         :returns: The answer to the question
160         :rtype: string
161         """
162
163         input_valid = False
164         default = default.lower()
165         answer = ''
166         while not input_valid:
167             answer = raw_input(question)
168             if answer == '':
169                 answer = default
170             if re.findall(r'[YyNn]', answer):
171                 input_valid = True
172                 answer = answer[0].lower()
173             else:
174                 print "Please answer Y, N or Return."
175
176         return answer
177
178     def _loadconfig(self):
179         """
180         Load the testbed configuration, given the auto configuration file.
181
182         """
183
184         # Get the Topology, from the topology layout file
185         topo = {}
186         with open(self._autoconfig_filename, 'r') as stream:
187             try:
188                 topo = yaml.load(stream)
189                 if 'metadata' in topo:
190                     self._metadata = topo['metadata']
191             except yaml.YAMLError as exc:
192                 raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc))
193
194         systemfile = self._rootdir + self._metadata['system_config_file']
195         if self._clean is False and os.path.isfile(systemfile):
196             with open(systemfile, 'r') as sysstream:
197                 try:
198                     systopo = yaml.load(sysstream)
199                     if 'nodes' in systopo:
200                         self._nodes = systopo['nodes']
201                 except yaml.YAMLError as sysexc:
202                     raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc))
203         else:
204             # Get the nodes from Auto Config
205             if 'nodes' in topo:
206                 self._nodes = topo['nodes']
207
208         # Set the root directory in all the nodes
209         for i in self._nodes.items():
210             node = i[1]
211             node['rootdir'] = self._rootdir
212
213     def updateconfig(self):
214         """
215         Update the testbed configuration, given the auto configuration file.
216         We will write the system configuration file with the current node
217         information
218
219         """
220
221         # Initialize the yaml data
222         ydata = {'metadata': self._metadata, 'nodes': self._nodes}
223
224         # Write the system config file
225         filename = self._rootdir + self._metadata['system_config_file']
226         with open(filename, 'w') as yamlfile:
227             yaml.dump(ydata, yamlfile)
228
229     def _update_auto_config(self):
230         """
231         Write the auto configuration file with the new configuration data,
232         input from the user.
233
234         """
235
236         # Initialize the yaml data
237         nodes = {}
238         with open(self._autoconfig_filename, 'r') as stream:
239             try:
240                 ydata = yaml.load(stream)
241                 if 'nodes' in ydata:
242                     nodes = ydata['nodes']
243             except yaml.YAMLError as exc:
244                 print exc
245                 return
246
247         for i in nodes.items():
248             key = i[0]
249             node = i[1]
250
251             # Interfaces
252             node['interfaces'] = {}
253             for item in self._nodes[key]['interfaces'].items():
254                 port = item[0]
255                 interface = item[1]
256
257                 node['interfaces'][port] = {}
258                 addr = '{}'.format(interface['pci_address'])
259                 node['interfaces'][port]['pci_address'] = addr
260                 if 'mac_address' in interface:
261                     node['interfaces'][port]['mac_address'] = \
262                         interface['mac_address']
263
264             if 'total_other_cpus' in self._nodes[key]['cpu']:
265                 node['cpu']['total_other_cpus'] = \
266                     self._nodes[key]['cpu']['total_other_cpus']
267             if 'total_vpp_cpus' in self._nodes[key]['cpu']:
268                 node['cpu']['total_vpp_cpus'] = \
269                     self._nodes[key]['cpu']['total_vpp_cpus']
270             if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
271                 node['cpu']['reserve_vpp_main_core'] = \
272                     self._nodes[key]['cpu']['reserve_vpp_main_core']
273
274             # TCP
275             if 'active_open_sessions' in self._nodes[key]['tcp']:
276                 node['tcp']['active_open_sessions'] = \
277                     self._nodes[key]['tcp']['active_open_sessions']
278             if 'passive_open_sessions' in self._nodes[key]['tcp']:
279                 node['tcp']['passive_open_sessions'] = \
280                     self._nodes[key]['tcp']['passive_open_sessions']
281
282             # Huge pages
283             node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
284
285         # Write the auto config config file
286         with open(self._autoconfig_filename, 'w') as yamlfile:
287             yaml.dump(ydata, yamlfile)
288
289     def apply_huge_pages(self):
290         """
291         Apply the huge page config
292
293         """
294
295         for i in self._nodes.items():
296             node = i[1]
297
298             hpg = VppHugePageUtil(node)
299             hpg.hugepages_dryrun_apply()
300
301     @staticmethod
302     def _apply_vpp_unix(node):
303         """
304         Apply the VPP Unix config
305
306         :param node: Node dictionary with cpuinfo.
307         :type node: dict
308         """
309
310         unix = '  nodaemon\n'
311         if 'unix' not in node['vpp']:
312             return ''
313
314         unixv = node['vpp']['unix']
315         if 'interactive' in unixv:
316             interactive = unixv['interactive']
317             if interactive is True:
318                 unix = '  interactive\n'
319
320         return unix.rstrip('\n')
321
322     @staticmethod
323     def _apply_vpp_cpu(node):
324         """
325         Apply the VPP cpu config
326
327         :param node: Node dictionary with cpuinfo.
328         :type node: dict
329         """
330
331         # Get main core
332         cpu = '\n'
333         if 'vpp_main_core' in node['cpu']:
334             vpp_main_core = node['cpu']['vpp_main_core']
335         else:
336             vpp_main_core = 0
337         if vpp_main_core is not 0:
338             cpu += '  main-core {}\n'.format(vpp_main_core)
339
340         # Get workers
341         vpp_workers = node['cpu']['vpp_workers']
342         vpp_worker_len = len(vpp_workers)
343         if vpp_worker_len > 0:
344             vpp_worker_str = ''
345             for i, worker in enumerate(vpp_workers):
346                 if i > 0:
347                     vpp_worker_str += ','
348                 if worker[0] == worker[1]:
349                     vpp_worker_str += "{}".format(worker[0])
350                 else:
351                     vpp_worker_str += "{}-{}".format(worker[0], worker[1])
352
353             cpu += '  corelist-workers {}\n'.format(vpp_worker_str)
354
355         return cpu
356
357     @staticmethod
358     def _apply_vpp_devices(node):
359         """
360         Apply VPP PCI Device configuration to vpp startup.
361
362         :param node: Node dictionary with cpuinfo.
363         :type node: dict
364         """
365
366         devices = ''
367         ports_per_numa = node['cpu']['ports_per_numa']
368         total_mbufs = node['cpu']['total_mbufs']
369
370         for item in ports_per_numa.items():
371             value = item[1]
372             interfaces = value['interfaces']
373
374             # if 0 was specified for the number of vpp workers, use 1 queue
375             num_rx_queues = None
376             num_tx_queues = None
377             if 'rx_queues' in value:
378                 num_rx_queues = value['rx_queues']
379             if 'tx_queues' in value:
380                 num_tx_queues = value['tx_queues']
381
382             num_rx_desc = None
383             num_tx_desc = None
384
385             # Create the devices string
386             for interface in interfaces:
387                 pci_address = interface['pci_address']
388                 pci_address = pci_address.lstrip("'").rstrip("'")
389                 devices += '\n'
390                 devices += '  dev {} {{ \n'.format(pci_address)
391                 if num_rx_queues:
392                     devices += '    num-rx-queues {}\n'.format(num_rx_queues)
393                 else:
394                     devices += '    num-rx-queues {}\n'.format(1)
395                 if num_tx_queues:
396                     devices += '    num-tx-queues {}\n'.format(num_tx_queues)
397                 if num_rx_desc:
398                     devices += '    num-rx-desc {}\n'.format(num_rx_desc)
399                 if num_tx_desc:
400                     devices += '    num-tx-desc {}\n'.format(num_tx_desc)
401                 devices += '  }'
402
403         # If the total mbufs is not 0 or less than the default, set num-bufs
404         logging.debug("Total mbufs: {}".format(total_mbufs))
405         if total_mbufs is not 0 and total_mbufs > 16384:
406             devices += '\n  num-mbufs {}'.format(total_mbufs)
407
408         return devices
409
410     @staticmethod
411     def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end, total_vpp_workers,
412                           reserve_vpp_main_core):
413         """
414         Calculate the VPP worker information
415
416         :param node: Node dictionary
417         :param vpp_workers: List of VPP workers
418         :param numa_node: Numa node
419         :param other_cpus_end: The end of the cpus allocated for cores
420         other than vpp
421         :param total_vpp_workers: The number of vpp workers needed
422         :param reserve_vpp_main_core: Is there a core needed for
423         the vpp main core
424         :type node: dict
425         :type numa_node: int
426         :type other_cpus_end: int
427         :type total_vpp_workers: int
428         :type reserve_vpp_main_core: bool
429         :returns: Is a core still needed for the vpp main core
430         :rtype: bool
431         """
432
433         # Can we fit the workers in one of these slices
434         cpus = node['cpu']['cpus_per_node'][numa_node]
435         for cpu in cpus:
436             start = cpu[0]
437             end = cpu[1]
438             if start <= other_cpus_end:
439                 start = other_cpus_end + 1
440
441             if reserve_vpp_main_core:
442                 start += 1
443
444             workers_end = start + total_vpp_workers - 1
445
446             if workers_end <= end:
447                 if reserve_vpp_main_core:
448                     node['cpu']['vpp_main_core'] = start - 1
449                 reserve_vpp_main_core = False
450                 if total_vpp_workers:
451                     vpp_workers.append((start, workers_end))
452                 break
453
454         # We still need to reserve the main core
455         if reserve_vpp_main_core:
456             node['cpu']['vpp_main_core'] = other_cpus_end + 1
457
458         return reserve_vpp_main_core
459
460     @staticmethod
461     def _calc_desc_and_queues(total_numa_nodes,
462                               total_ports_per_numa,
463                               total_rx_queues,
464                               ports_per_numa_value):
465         """
466         Calculate the number of descriptors and queues
467
468         :param total_numa_nodes: The total number of numa nodes
469         :param total_ports_per_numa: The total number of ports for this
470         numa node
471         :param total_rx_queues: The total number of rx queues / port
472         :param ports_per_numa_value: The value from the ports_per_numa
473         dictionary
474         :type total_numa_nodes: int
475         :type total_ports_per_numa: int
476         :type total_rx_queues: int
477         :type ports_per_numa_value: dict
478         :returns The total number of message buffers
479         :rtype: int
480         """
481
482         # Get the number of rx queues
483         rx_queues = max(1, total_rx_queues)
484         tx_queues = rx_queues * total_numa_nodes + 1
485
486         # Get the descriptor entries
487         desc_entries = 1024
488         ports_per_numa_value['rx_queues'] = rx_queues
489         total_mbufs = (((rx_queues * desc_entries) +
490                         (tx_queues * desc_entries)) *
491                        total_ports_per_numa)
492         total_mbufs = total_mbufs
493
494         return total_mbufs
495
496     @staticmethod
497     def _create_ports_per_numa(node, interfaces):
498         """
499         Create a dictionary or ports per numa node
500         :param node: Node dictionary
501         :param interfaces: All the interfaces to be used by vpp
502         :type node: dict
503         :type interfaces: dict
504         :returns: The ports per numa dictionary
505         :rtype: dict
506         """
507
508         # Make a list of ports by numa node
509         ports_per_numa = {}
510         for item in interfaces.items():
511             i = item[1]
512             if i['numa_node'] not in ports_per_numa:
513                 ports_per_numa[i['numa_node']] = {'interfaces': []}
514                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
515             else:
516                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
517         node['cpu']['ports_per_numa'] = ports_per_numa
518
519         return ports_per_numa
520
521     def calculate_cpu_parameters(self):
522         """
523         Calculate the cpu configuration.
524
525         """
526
527         # Calculate the cpu parameters, needed for the
528         # vpp_startup and grub configuration
529         for i in self._nodes.items():
530             node = i[1]
531
532             # get total number of nic ports
533             interfaces = node['interfaces']
534
535             # Make a list of ports by numa node
536             ports_per_numa = self._create_ports_per_numa(node, interfaces)
537
538             # Get the number of cpus to skip, we never use the first cpu
539             other_cpus_start = 1
540             other_cpus_end = other_cpus_start + node['cpu']['total_other_cpus'] - 1
541             other_workers = None
542             if other_cpus_end is not 0:
543                 other_workers = (other_cpus_start, other_cpus_end)
544             node['cpu']['other_workers'] = other_workers
545
546             # Allocate the VPP main core and workers
547             vpp_workers = []
548             reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
549             total_vpp_cpus = node['cpu']['total_vpp_cpus']
550             total_rx_queues = node['cpu']['total_rx_queues']
551
552             # If total_vpp_cpus is 0 or is less than the numa nodes with ports
553             #  then we shouldn't get workers
554             total_workers_node = total_vpp_cpus / len(ports_per_numa)
555             total_main = 0
556             if reserve_vpp_main_core:
557                 total_main = 1
558             total_mbufs = 0
559             if total_main + total_workers_node is not 0:
560                 for item in ports_per_numa.items():
561                     numa_node = item[0]
562                     value = item[1]
563
564                     # Get the number of descriptors and queues
565                     mbufs = self._calc_desc_and_queues(len(ports_per_numa),
566                                                        len(value['interfaces']), total_rx_queues, value)
567                     total_mbufs += mbufs
568
569                     # Get the VPP workers
570                     reserve_vpp_main_core = self._calc_vpp_workers(node, vpp_workers, numa_node,
571                                                                    other_cpus_end, total_workers_node,
572                                                                    reserve_vpp_main_core)
573
574                 total_mbufs *= 2.5
575                 total_mbufs = int(total_mbufs)
576             else:
577                 total_mbufs = 0
578
579             # Save the info
580             node['cpu']['vpp_workers'] = vpp_workers
581             node['cpu']['total_mbufs'] = total_mbufs
582
583         # Write the config
584         self.updateconfig()
585
586     @staticmethod
587     def _apply_vpp_tcp(node):
588         """
589         Apply the VPP Unix config
590
591         :param node: Node dictionary with cpuinfo.
592         :type node: dict
593         """
594
595         active_open_sessions = node['tcp']['active_open_sessions']
596         aos = int(active_open_sessions)
597
598         passive_open_sessions = node['tcp']['passive_open_sessions']
599         pos = int(passive_open_sessions)
600
601         # Generate the api-segment gid vpp sheit in any case
602         if (aos + pos) == 0:
603             tcp = "api-segment {\n"
604             tcp = tcp + "  gid vpp\n"
605             tcp = tcp + "}\n"
606             return tcp.rstrip('\n')
607
608         tcp = "# TCP stack-related configuration parameters\n"
609         tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos)
610         tcp = tcp + "heapsize 4g\n\n"
611         tcp = tcp + "api-segment {\n"
612         tcp = tcp + "  global-size 2000M\n"
613         tcp = tcp + "  api-size 1G\n"
614         tcp = tcp + "}\n\n"
615
616         tcp = tcp + "session {\n"
617         tcp = tcp + "  event-queue-length " + "{:d}".format(aos + pos) + "\n"
618         tcp = tcp + "  preallocated-sessions " + "{:d}".format(aos + pos) + "\n"
619         tcp = tcp + "  v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
620         tcp = tcp + "  v4-session-table-memory 3g\n"
621         if aos > 0:
622             tcp = tcp + "  v4-halfopen-table-buckets " + \
623                   "{:d}".format((aos + pos) / 4) + "\n"
624             tcp = tcp + "  v4-halfopen-table-memory 3g\n"
625         tcp = tcp + "}\n\n"
626
627         tcp = tcp + "tcp {\n"
628         tcp = tcp + "  preallocated-connections " + "{:d}".format(aos + pos) + "\n"
629         if aos > 0:
630             tcp = tcp + "  preallocated-half-open-connections " + "{:d}".format(aos) + "\n"
631             tcp = tcp + "  local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n"
632             tcp = tcp + "  local-endpoints-table-memory 3g\n"
633         tcp = tcp + "}\n\n"
634
635         return tcp.rstrip('\n')
636
637     def apply_vpp_startup(self):
638         """
639         Apply the vpp startup configration
640
641         """
642
643         # Apply the VPP startup configruation
644         for i in self._nodes.items():
645             node = i[1]
646
647             # Get the startup file
648             rootdir = node['rootdir']
649             sfile = rootdir + node['vpp']['startup_config_file']
650
651             # Get the devices
652             devices = self._apply_vpp_devices(node)
653
654             # Get the CPU config
655             cpu = self._apply_vpp_cpu(node)
656
657             # Get the unix config
658             unix = self._apply_vpp_unix(node)
659
660             # Get the TCP configuration, if any
661             tcp = self._apply_vpp_tcp(node)
662
663             # Make a backup if needed
664             self._autoconfig_backup_file(sfile)
665
666             # Get the template
667             tfile = sfile + '.template'
668             (ret, stdout, stderr) = \
669                 VPPUtil.exec_command('cat {}'.format(tfile))
670             if ret != 0:
671                 raise RuntimeError('Executing cat command failed to node {}'.
672                                    format(node['host']))
673             startup = stdout.format(unix=unix,
674                                     cpu=cpu,
675                                     devices=devices,
676                                     tcp=tcp)
677
678             (ret, stdout, stderr) = \
679                 VPPUtil.exec_command('rm {}'.format(sfile))
680             if ret != 0:
681                 logging.debug(stderr)
682
683             cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
684             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
685             if ret != 0:
686                 raise RuntimeError('Writing config failed node {}'.
687                                    format(node['host']))
688
689     def apply_grub_cmdline(self):
690         """
691         Apply the grub cmdline
692
693         """
694
695         for i in self._nodes.items():
696             node = i[1]
697
698             # Get the isolated CPUs
699             other_workers = node['cpu']['other_workers']
700             vpp_workers = node['cpu']['vpp_workers']
701             if 'vpp_main_core' in node['cpu']:
702                 vpp_main_core = node['cpu']['vpp_main_core']
703             else:
704                 vpp_main_core = 0
705             all_workers = []
706             if other_workers is not None:
707                 all_workers = [other_workers]
708             if vpp_main_core is not 0:
709                 all_workers += [(vpp_main_core, vpp_main_core)]
710             all_workers += vpp_workers
711             isolated_cpus = ''
712             for idx, worker in enumerate(all_workers):
713                 if worker is None:
714                     continue
715                 if idx > 0:
716                     isolated_cpus += ','
717                 if worker[0] == worker[1]:
718                     isolated_cpus += "{}".format(worker[0])
719                 else:
720                     isolated_cpus += "{}-{}".format(worker[0], worker[1])
721
722             vppgrb = VppGrubUtil(node)
723             current_cmdline = vppgrb.get_current_cmdline()
724             if 'grub' not in node:
725                 node['grub'] = {}
726             node['grub']['current_cmdline'] = current_cmdline
727             node['grub']['default_cmdline'] = \
728                 vppgrb.apply_cmdline(node, isolated_cpus)
729
730         self.updateconfig()
731
732     def get_hugepages(self):
733         """
734         Get the hugepage configuration
735
736         """
737
738         for i in self._nodes.items():
739             node = i[1]
740
741             hpg = VppHugePageUtil(node)
742             max_map_count, shmmax = hpg.get_huge_page_config()
743             node['hugepages']['max_map_count'] = max_map_count
744             node['hugepages']['shmax'] = shmmax
745             total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
746             node['hugepages']['actual_total'] = total
747             node['hugepages']['free'] = free
748             node['hugepages']['size'] = size
749             node['hugepages']['memtotal'] = memtotal
750             node['hugepages']['memfree'] = memfree
751
752         self.updateconfig()
753
754     def get_grub(self):
755         """
756         Get the grub configuration
757
758         """
759
760         for i in self._nodes.items():
761             node = i[1]
762
763             vppgrb = VppGrubUtil(node)
764             current_cmdline = vppgrb.get_current_cmdline()
765             default_cmdline = vppgrb.get_default_cmdline()
766
767             # Get the total number of isolated CPUs
768             current_iso_cpus = 0
769             iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
770             iso_cpurl = len(iso_cpur)
771             if iso_cpurl > 0:
772                 iso_cpu_str = iso_cpur[0]
773                 iso_cpu_str = iso_cpu_str.split('=')[1]
774                 iso_cpul = iso_cpu_str.split(',')
775                 for iso_cpu in iso_cpul:
776                     isocpuspl = iso_cpu.split('-')
777                     if len(isocpuspl) is 1:
778                         current_iso_cpus += 1
779                     else:
780                         first = int(isocpuspl[0])
781                         second = int(isocpuspl[1])
782                         if first == second:
783                             current_iso_cpus += 1
784                         else:
785                             current_iso_cpus += second - first
786
787             if 'grub' not in node:
788                 node['grub'] = {}
789             node['grub']['current_cmdline'] = current_cmdline
790             node['grub']['default_cmdline'] = default_cmdline
791             node['grub']['current_iso_cpus'] = current_iso_cpus
792
793         self.updateconfig()
794
795     @staticmethod
796     def _get_device(node):
797         """
798         Get the device configuration for a single node
799
800         :param node: Node dictionary with cpuinfo.
801         :type node: dict
802
803         """
804
805         vpp = VppPCIUtil(node)
806         vpp.get_all_devices()
807
808         # Save the device information
809         node['devices'] = {}
810         node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
811         node['devices']['kernel_devices'] = vpp.get_kernel_devices()
812         node['devices']['other_devices'] = vpp.get_other_devices()
813         node['devices']['linkup_devices'] = vpp.get_link_up_devices()
814
815     def get_devices_per_node(self):
816         """
817         Get the device configuration for all the nodes
818
819         """
820
821         for i in self._nodes.items():
822             node = i[1]
823             # Update the interface data
824
825             self._get_device(node)
826
827         self.updateconfig()
828
829     @staticmethod
830     def get_cpu_layout(node):
831         """
832         Get the cpu layout
833
834         using lscpu -p get the cpu layout.
835         Returns a list with each item representing a single cpu.
836
837         :param node: Node dictionary.
838         :type node: dict
839         :returns: The cpu layout
840         :rtype: list
841         """
842
843         cmd = 'lscpu -p'
844         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
845         if ret != 0:
846             raise RuntimeError('{} failed on node {} {}'.
847                                format(cmd, node['host'], stderr))
848
849         pcpus = []
850         lines = stdout.split('\n')
851         for line in lines:
852             if line == '' or line[0] == '#':
853                 continue
854             linesplit = line.split(',')
855             layout = {'cpu': linesplit[0], 'core': linesplit[1],
856                       'socket': linesplit[2], 'node': linesplit[3]}
857
858             # cpu, core, socket, node
859             pcpus.append(layout)
860
861         return pcpus
862
863     def get_cpu(self):
864         """
865         Get the cpu configuration
866
867         """
868
869         # Get the CPU layout
870         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
871
872         for i in self._nodes.items():
873             node = i[1]
874
875             # Get the cpu layout
876             layout = self.get_cpu_layout(node)
877             node['cpu']['layout'] = layout
878
879             cpuinfo = node['cpuinfo']
880             smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
881             node['cpu']['smt_enabled'] = smt_enabled
882
883             # We don't want to write the cpuinfo
884             node['cpuinfo'] = ""
885
886         # Write the config
887         self.updateconfig()
888
889     def discover(self):
890         """
891         Get the current system configuration.
892
893         """
894
895         # Get the Huge Page configuration
896         self.get_hugepages()
897
898         # Get the device configuration
899         self.get_devices_per_node()
900
901         # Get the CPU configuration
902         self.get_cpu()
903
904         # Get the current grub cmdline
905         self.get_grub()
906
907     def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
908         """
909         Ask the user questions related to the cpu configuration.
910
911         :param node: Node dictionary
912         :param total_cpus: The total number of cpus in the system
913         :param numa_nodes: The list of numa nodes in the system
914         :type node: dict
915         :type total_cpus: int
916         :type numa_nodes: list
917         """
918
919         print "\nYour system has {} core(s) and {} Numa Nodes.". \
920             format(total_cpus, len(numa_nodes))
921         print "To begin, we suggest not reserving any cores for VPP or other processes."
922         print "Then to improve performance start reserving cores and adding queues as needed. "
923
924         max_vpp_cpus = 4
925         total_vpp_cpus = 0
926         if max_vpp_cpus > 0:
927             question = "\nHow many core(s) shall we reserve for VPP [0-{}][0]? ".format(max_vpp_cpus)
928             total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
929             node['cpu']['total_vpp_cpus'] = total_vpp_cpus
930
931         max_other_cores = (total_cpus - total_vpp_cpus) / 2
932         question = 'How many core(s) do you want to reserve for processes other than VPP? [0-{}][0]? '. \
933             format(str(max_other_cores))
934         total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
935         node['cpu']['total_other_cpus'] = total_other_cpus
936
937         max_main_cpus = max_vpp_cpus + 1 - total_vpp_cpus
938         reserve_vpp_main_core = False
939         if max_main_cpus > 0:
940             question = "Should we reserve 1 core for the VPP Main thread? "
941             question += "[y/N]? "
942             answer = self._ask_user_yn(question, 'n')
943             if answer == 'y':
944                 reserve_vpp_main_core = True
945             node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
946             node['cpu']['vpp_main_core'] = 0
947
948         question = "How many RX queues per port shall we use for VPP [1-4][1]? ". \
949             format(max_vpp_cpus)
950         total_rx_queues = self._ask_user_range(question, 1, 4, 1)
951         node['cpu']['total_rx_queues'] = total_rx_queues
952
953     def modify_cpu(self, ask_questions=True):
954         """
955         Modify the cpu configuration, asking for the user for the values.
956
957         :param ask_questions: When true ask the user for config parameters
958
959         """
960
961         # Get the CPU layout
962         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
963
964         for i in self._nodes.items():
965             node = i[1]
966             total_cpus = 0
967             total_cpus_per_slice = 0
968             cpus_per_node = {}
969             numa_nodes = []
970             cores = []
971             cpu_layout = self.get_cpu_layout(node)
972
973             # Assume the number of cpus per slice is always the same as the
974             # first slice
975             first_node = '0'
976             for cpu in cpu_layout:
977                 if cpu['node'] != first_node:
978                     break
979                 total_cpus_per_slice += 1
980
981             # Get the total number of cpus, cores, and numa nodes from the
982             # cpu layout
983             for cpul in cpu_layout:
984                 numa_node = cpul['node']
985                 core = cpul['core']
986                 cpu = cpul['cpu']
987                 total_cpus += 1
988
989                 if numa_node not in cpus_per_node:
990                     cpus_per_node[numa_node] = []
991                 cpuperslice = int(cpu) % total_cpus_per_slice
992                 if cpuperslice == 0:
993                     cpus_per_node[numa_node].append((int(cpu), int(cpu) +
994                                                      total_cpus_per_slice - 1))
995                 if numa_node not in numa_nodes:
996                     numa_nodes.append(numa_node)
997                 if core not in cores:
998                     cores.append(core)
999             node['cpu']['cpus_per_node'] = cpus_per_node
1000
1001             # Ask the user some questions
1002             if ask_questions and total_cpus >= 8:
1003                 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1004
1005             # Populate the interfaces with the numa node
1006             if 'interfaces' in node:
1007                 ikeys = node['interfaces'].keys()
1008                 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1009
1010             # We don't want to write the cpuinfo
1011             node['cpuinfo'] = ""
1012
1013         # Write the configs
1014         self._update_auto_config()
1015         self.updateconfig()
1016
1017     def _modify_other_devices(self, node,
1018                               other_devices, kernel_devices, dpdk_devices):
1019         """
1020         Modify the devices configuration, asking for the user for the values.
1021
1022         """
1023
1024         odevices_len = len(other_devices)
1025         if odevices_len > 0:
1026             print "\nThese device(s) are currently NOT being used",
1027             print "by VPP or the OS.\n"
1028             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1029             question = "\nWould you like to give any of these devices"
1030             question += " back to the OS [Y/n]? "
1031             answer = self._ask_user_yn(question, 'Y')
1032             if answer == 'y':
1033                 vppd = {}
1034                 for dit in other_devices.items():
1035                     dvid = dit[0]
1036                     device = dit[1]
1037                     question = "Would you like to use device {} for". \
1038                         format(dvid)
1039                     question += " the OS [y/N]? "
1040                     answer = self._ask_user_yn(question, 'n')
1041                     if answer == 'y':
1042                         if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1043                             driver = device['unused'][0]
1044                             VppPCIUtil.bind_vpp_device(node, driver, dvid)
1045                         else:
1046                             logging.debug('Could not bind device {}'.format(dvid))
1047                         vppd[dvid] = device
1048                 for dit in vppd.items():
1049                     dvid = dit[0]
1050                     device = dit[1]
1051                     kernel_devices[dvid] = device
1052                     del other_devices[dvid]
1053
1054         odevices_len = len(other_devices)
1055         if odevices_len > 0:
1056             print "\nThese device(s) are still NOT being used ",
1057             print "by VPP or the OS.\n"
1058             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1059             question = "\nWould you like use any of these for VPP [y/N]? "
1060             answer = self._ask_user_yn(question, 'N')
1061             if answer == 'y':
1062                 vppd = {}
1063                 for dit in other_devices.items():
1064                     dvid = dit[0]
1065                     device = dit[1]
1066                     question = "Would you like to use device {} ".format(dvid)
1067                     question += "for VPP [y/N]? "
1068                     answer = self._ask_user_yn(question, 'n')
1069                     if answer == 'y':
1070                         vppd[dvid] = device
1071                 for dit in vppd.items():
1072                     dvid = dit[0]
1073                     device = dit[1]
1074                     if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1075                         driver = device['unused'][0]
1076                         logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1077                         VppPCIUtil.bind_vpp_device(node, driver, dvid)
1078                     else:
1079                         logging.debug('Could not bind device {}'.format(dvid))
1080                     dpdk_devices[dvid] = device
1081                     del other_devices[dvid]
1082
1083     def update_interfaces_config(self):
1084         """
1085         Modify the interfaces directly from the config file.
1086
1087         """
1088
1089         for i in self._nodes.items():
1090             node = i[1]
1091             devices = node['devices']
1092             all_devices = devices['other_devices']
1093             all_devices.update(devices['dpdk_devices'])
1094             all_devices.update(devices['kernel_devices'])
1095
1096             current_ifcs = {}
1097             interfaces = {}
1098             if 'interfaces' in node:
1099                 current_ifcs = node['interfaces']
1100             if current_ifcs:
1101                 for ifc in current_ifcs.values():
1102                     dvid = ifc['pci_address']
1103                     if dvid in all_devices:
1104                         VppPCIUtil.vpp_create_interface(interfaces, dvid,
1105                                                         all_devices[dvid])
1106             node['interfaces'] = interfaces
1107
1108         self.updateconfig()
1109
1110     def modify_devices(self):
1111         """
1112         Modify the devices configuration, asking for the user for the values.
1113
1114         """
1115
1116         for i in self._nodes.items():
1117             node = i[1]
1118             devices = node['devices']
1119             other_devices = devices['other_devices']
1120             kernel_devices = devices['kernel_devices']
1121             dpdk_devices = devices['dpdk_devices']
1122
1123             if other_devices:
1124                 self._modify_other_devices(node, other_devices,
1125                                            kernel_devices, dpdk_devices)
1126
1127                 # Get the devices again for this node
1128                 self._get_device(node)
1129                 devices = node['devices']
1130                 kernel_devices = devices['kernel_devices']
1131                 dpdk_devices = devices['dpdk_devices']
1132
1133             klen = len(kernel_devices)
1134             if klen > 0:
1135                 print "\nThese devices have kernel interfaces, but",
1136                 print "appear to be safe to use with VPP.\n"
1137                 VppPCIUtil.show_vpp_devices(kernel_devices)
1138                 question = "\nWould you like to use any of these "
1139                 question += "device(s) for VPP [y/N]? "
1140                 answer = self._ask_user_yn(question, 'n')
1141                 if answer == 'y':
1142                     vppd = {}
1143                     for dit in kernel_devices.items():
1144                         dvid = dit[0]
1145                         device = dit[1]
1146                         question = "Would you like to use device {} ". \
1147                             format(dvid)
1148                         question += "for VPP [y/N]? "
1149                         answer = self._ask_user_yn(question, 'n')
1150                         if answer == 'y':
1151                             vppd[dvid] = device
1152                     for dit in vppd.items():
1153                         dvid = dit[0]
1154                         device = dit[1]
1155                         if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1156                             driver = device['unused'][0]
1157                             logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1158                             VppPCIUtil.bind_vpp_device(node, driver, dvid)
1159                         else:
1160                             logging.debug('Could not bind device {}'.format(dvid))
1161                         dpdk_devices[dvid] = device
1162                         del kernel_devices[dvid]
1163
1164             dlen = len(dpdk_devices)
1165             if dlen > 0:
1166                 print "\nThese device(s) will be used by VPP.\n"
1167                 VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1168                 question = "\nWould you like to remove any of "
1169                 question += "these device(s) [y/N]? "
1170                 answer = self._ask_user_yn(question, 'n')
1171                 if answer == 'y':
1172                     vppd = {}
1173                     for dit in dpdk_devices.items():
1174                         dvid = dit[0]
1175                         device = dit[1]
1176                         question = "Would you like to remove {} [y/N]? ". \
1177                             format(dvid)
1178                         answer = self._ask_user_yn(question, 'n')
1179                         if answer == 'y':
1180                             vppd[dvid] = device
1181                     for dit in vppd.items():
1182                         dvid = dit[0]
1183                         device = dit[1]
1184                         if 'unused' in device and len(device['unused']) != 0 and device['unused'][0] != '':
1185                             driver = device['unused'][0]
1186                             logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1187                             VppPCIUtil.bind_vpp_device(node, driver, dvid)
1188                         else:
1189                             logging.debug('Could not bind device {}'.format(dvid))
1190                         kernel_devices[dvid] = device
1191                         del dpdk_devices[dvid]
1192
1193             interfaces = {}
1194             for dit in dpdk_devices.items():
1195                 dvid = dit[0]
1196                 device = dit[1]
1197                 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1198             node['interfaces'] = interfaces
1199
1200             print "\nThese device(s) will be used by VPP, please",
1201             print "rerun this option if this is incorrect.\n"
1202             VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
1203
1204         self._update_auto_config()
1205         self.updateconfig()
1206
1207     def modify_huge_pages(self):
1208         """
1209         Modify the huge page configuration, asking for the user for the values.
1210
1211         """
1212
1213         for i in self._nodes.items():
1214             node = i[1]
1215
1216             total = node['hugepages']['actual_total']
1217             free = node['hugepages']['free']
1218             size = node['hugepages']['size']
1219             memfree = node['hugepages']['memfree'].split(' ')[0]
1220             hugesize = int(size.split(' ')[0])
1221             # The max number of huge pages should be no more than
1222             # 70% of total free memory
1223             maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize
1224             print "\nThere currently {} {} huge pages free.". \
1225                 format(free, size)
1226             question = "Do you want to reconfigure the number of "
1227             question += "huge pages [y/N]? "
1228             answer = self._ask_user_yn(question, 'n')
1229             if answer == 'n':
1230                 node['hugepages']['total'] = total
1231                 continue
1232
1233             print "\nThere currently a total of {} huge pages.". \
1234                 format(total)
1235             question = "How many huge pages do you want [{} - {}][{}]? ". \
1236                 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1237             answer = self._ask_user_range(question, 1024, maxpages, 1024)
1238             node['hugepages']['total'] = str(answer)
1239
1240         # Update auto-config.yaml
1241         self._update_auto_config()
1242
1243         # Rediscover just the hugepages
1244         self.get_hugepages()
1245
1246     def get_tcp_params(self):
1247         """
1248         Get the tcp configuration
1249
1250         """
1251         # maybe nothing to do here?
1252         self.updateconfig()
1253
1254     def acquire_tcp_params(self):
1255         """
1256         Ask the user for TCP stack configuration parameters
1257
1258         """
1259
1260         for i in self._nodes.items():
1261             node = i[1]
1262
1263             question = "\nHow many active-open / tcp client sessions are expected "
1264             question = question + "[0-10000000][0]? "
1265             answer = self._ask_user_range(question, 0, 10000000, 0)
1266             # Less than 10K is equivalent to 0
1267             if int(answer) < 10000:
1268                 answer = 0
1269             node['tcp']['active_open_sessions'] = answer
1270
1271             question = "How many passive-open / tcp server sessions are expected "
1272             question = question + "[0-10000000][0]? "
1273             answer = self._ask_user_range(question, 0, 10000000, 0)
1274             # Less than 10K is equivalent to 0
1275             if int(answer) < 10000:
1276                 answer = 0
1277             node['tcp']['passive_open_sessions'] = answer
1278
1279         # Update auto-config.yaml
1280         self._update_auto_config()
1281
1282         # Rediscover tcp parameters
1283         self.get_tcp_params()
1284
1285     @staticmethod
1286     def patch_qemu(node):
1287         """
1288         Patch qemu with the correct patches.
1289
1290         :param node: Node dictionary
1291         :type node: dict
1292         """
1293
1294         print '\nWe are patching the node "{}":\n'.format(node['host'])
1295         QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1296
1297     @staticmethod
1298     def cpu_info(node):
1299         """
1300         print the CPU information
1301
1302         """
1303
1304         cpu = CpuUtils.get_cpu_info_per_node(node)
1305
1306         item = 'Model name'
1307         if item in cpu:
1308             print "{:>20}:    {}".format(item, cpu[item])
1309         item = 'CPU(s)'
1310         if item in cpu:
1311             print "{:>20}:    {}".format(item, cpu[item])
1312         item = 'Thread(s) per core'
1313         if item in cpu:
1314             print "{:>20}:    {}".format(item, cpu[item])
1315         item = 'Core(s) per socket'
1316         if item in cpu:
1317             print "{:>20}:    {}".format(item, cpu[item])
1318         item = 'Socket(s)'
1319         if item in cpu:
1320             print "{:>20}:    {}".format(item, cpu[item])
1321         item = 'NUMA node(s)'
1322         numa_nodes = 0
1323         if item in cpu:
1324             numa_nodes = int(cpu[item])
1325         for i in xrange(0, numa_nodes):
1326             item = "NUMA node{} CPU(s)".format(i)
1327             print "{:>20}:    {}".format(item, cpu[item])
1328         item = 'CPU max MHz'
1329         if item in cpu:
1330             print "{:>20}:    {}".format(item, cpu[item])
1331         item = 'CPU min MHz'
1332         if item in cpu:
1333             print "{:>20}:    {}".format(item, cpu[item])
1334
1335         if node['cpu']['smt_enabled']:
1336             smt = 'Enabled'
1337         else:
1338             smt = 'Disabled'
1339         print "{:>20}:    {}".format('SMT', smt)
1340
1341         # VPP Threads
1342         print "\nVPP Threads: (Name: Cpu Number)"
1343         vpp_processes = cpu['vpp_processes']
1344         for i in vpp_processes.items():
1345             print "  {:10}: {:4}".format(i[0], i[1])
1346
1347     @staticmethod
1348     def device_info(node):
1349         """
1350         Show the device information.
1351
1352         """
1353
1354         if 'cpu' in node and 'total_mbufs' in node['cpu']:
1355             total_mbufs = node['cpu']['total_mbufs']
1356             if total_mbufs is not 0:
1357                 print "Total Number of Buffers: {}".format(total_mbufs)
1358
1359         vpp = VppPCIUtil(node)
1360         vpp.get_all_devices()
1361         linkup_devs = vpp.get_link_up_devices()
1362         if len(linkup_devs):
1363             print ("\nDevices with link up (can not be used with VPP):")
1364             vpp.show_vpp_devices(linkup_devs, show_header=False)
1365             # for dev in linkup_devs:
1366             #    print ("    " + dev)
1367         kernel_devs = vpp.get_kernel_devices()
1368         if len(kernel_devs):
1369             print ("\nDevices bound to kernel drivers:")
1370             vpp.show_vpp_devices(kernel_devs, show_header=False)
1371         else:
1372             print ("\nNo devices bound to kernel drivers")
1373
1374         dpdk_devs = vpp.get_dpdk_devices()
1375         if len(dpdk_devs):
1376             print ("\nDevices bound to DPDK drivers:")
1377             vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1378                                  show_header=False)
1379         else:
1380             print ("\nNo devices bound to DPDK drivers")
1381
1382         vpputl = VPPUtil()
1383         interfaces = vpputl.get_hardware(node)
1384         if interfaces == {}:
1385             return
1386
1387         print ("\nDevices in use by VPP:")
1388
1389         if len(interfaces.items()) < 2:
1390             print ("None")
1391             return
1392
1393         print "{:30} {:6} {:4} {:7} {:4} {:7}". \
1394             format('Name', 'Socket', 'RXQs',
1395                    'RXDescs', 'TXQs', 'TXDescs')
1396         for intf in sorted(interfaces.items()):
1397             name = intf[0]
1398             value = intf[1]
1399             if name == 'local0':
1400                 continue
1401             socket = rx_qs = rx_ds = tx_qs = tx_ds = ''
1402             if 'cpu socket' in value:
1403                 socket = int(value['cpu socket'])
1404             if 'rx queues' in value:
1405                 rx_qs = int(value['rx queues'])
1406             if 'rx descs' in value:
1407                 rx_ds = int(value['rx descs'])
1408             if 'tx queues' in value:
1409                 tx_qs = int(value['tx queues'])
1410             if 'tx descs' in value:
1411                 tx_ds = int(value['tx descs'])
1412
1413             print ("{:30} {:>6} {:>4} {:>7} {:>4} {:>7}".
1414                    format(name, socket, rx_qs, rx_ds, tx_qs, tx_ds))
1415
1416     @staticmethod
1417     def hugepage_info(node):
1418         """
1419         Show the huge page information.
1420
1421         """
1422
1423         hpg = VppHugePageUtil(node)
1424         hpg.show_huge_pages()
1425
1426     @staticmethod
1427     def min_system_resources(node):
1428         """
1429         Check the system for basic minimum resources, return true if
1430         there is enough.
1431
1432         :returns: boolean
1433         :rtype: dict
1434         """
1435
1436         min_sys_res = True
1437
1438         # CPUs
1439         if 'layout' in node['cpu']:
1440             total_cpus = len(node['cpu']['layout'])
1441             if total_cpus < 2:
1442                 print "\nThere is only {} CPU(s) available on this system.".format(total_cpus)
1443                 print "This is not enough to run VPP."
1444                 min_sys_res = False
1445
1446         # System Memory
1447         if 'free' in node['hugepages'] and \
1448                 'memfree' in node['hugepages'] and \
1449                 'size' in node['hugepages']:
1450             free = node['hugepages']['free']
1451             memfree = float(node['hugepages']['memfree'].split(' ')[0])
1452             hugesize = float(node['hugepages']['size'].split(' ')[0])
1453
1454             memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1455             percentmemhugepages = (memhugepages / memfree) * 100
1456             if free is '0' and \
1457                     percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1458                 print "\nThe System has only {} of free memory.".format(int(memfree))
1459                 print "You will not be able to allocate enough Huge Pages for VPP."
1460                 min_sys_res = False
1461
1462         return min_sys_res
1463
1464     def sys_info(self):
1465         """
1466         Print the system information
1467
1468         """
1469
1470         for i in self._nodes.items():
1471             print "\n=============================="
1472             name = i[0]
1473             node = i[1]
1474
1475             print "NODE: {}\n".format(name)
1476
1477             # CPU
1478             print "CPU:"
1479             self.cpu_info(node)
1480
1481             # Grub
1482             print "\nGrub Command Line:"
1483             if 'grub' in node:
1484                 print \
1485                     "  Current: {}".format(
1486                         node['grub']['current_cmdline'])
1487                 print \
1488                     "  Configured: {}".format(
1489                         node['grub']['default_cmdline'])
1490
1491             # Huge Pages
1492             print "\nHuge Pages:"
1493             self.hugepage_info(node)
1494
1495             # Devices
1496             print "\nDevices:"
1497             self.device_info(node)
1498
1499             # Status
1500             print "\nVPP Service Status:"
1501             state, errors = VPPUtil.status(node)
1502             print "  {}".format(state)
1503             for e in errors:
1504                 print "  {}".format(e)
1505
1506             # Minimum system resources
1507             self.min_system_resources(node)
1508
1509             print "\n=============================="
1510
1511     def _ipv4_interface_setup_questions(self, node):
1512         """
1513         Ask the user some questions and get a list of interfaces
1514         and IPv4 addresses associated with those interfaces
1515
1516         :param node: Node dictionary.
1517         :type node: dict
1518         :returns: A list or interfaces with ip addresses
1519         :rtype: dict
1520         """
1521
1522         vpputl = VPPUtil()
1523         interfaces = vpputl.get_hardware(node)
1524         if interfaces == {}:
1525             return
1526
1527         interfaces_with_ip = []
1528         for intf in sorted(interfaces.items()):
1529             name = intf[0]
1530             if name == 'local0':
1531                 continue
1532
1533             question = "Would you like add address to interface {} [Y/n]? ".format(name)
1534             answer = self._ask_user_yn(question, 'y')
1535             if answer == 'y':
1536                 address = {}
1537                 addr = self._ask_user_ipv4()
1538                 address['name'] = name
1539                 address['addr'] = addr
1540                 interfaces_with_ip.append(address)
1541
1542         return interfaces_with_ip
1543
1544     def ipv4_interface_setup(self):
1545         """
1546         After asking the user some questions, get a list of interfaces
1547         and IPv4 addresses associated with those interfaces
1548
1549         """
1550
1551         for i in self._nodes.items():
1552             node = i[1]
1553
1554             # Show the current interfaces with IP addresses
1555             current_ints = VPPUtil.get_int_ip(node)
1556             if current_ints is not {}:
1557                 print ("\nThese are the current interfaces with IP addresses:")
1558                 for items in sorted(current_ints.items()):
1559                     name = items[0]
1560                     value = items[1]
1561                     if 'address' not in value:
1562                         address = 'Not Set'
1563                     else:
1564                         address = value['address']
1565                     print ("{:30} {:20} {:10}".format(name, address, value['state']))
1566                 question = "\nWould you like to keep this configuration [Y/n]? "
1567                 answer = self._ask_user_yn(question, 'y')
1568                 if answer == 'y':
1569                     continue
1570             else:
1571                 print ("\nThere are currently no interfaces with IP addresses.")
1572
1573             # Create a script that add the ip addresses to the interfaces
1574             # and brings the interfaces up
1575             ints_with_addrs = self._ipv4_interface_setup_questions(node)
1576             content = ''
1577             for ints in ints_with_addrs:
1578                 name = ints['name']
1579                 addr = ints['addr']
1580                 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1581                 setintupstr = 'set int state {} up\n'.format(name)
1582                 content += setipstr + setintupstr
1583
1584             # Write the content to the script
1585             rootdir = node['rootdir']
1586             filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1587             with open(filename, 'w+') as sfile:
1588                 sfile.write(content)
1589
1590             # Execute the script
1591             cmd = 'vppctl exec {}'.format(filename)
1592             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1593             if ret != 0:
1594                 logging.debug(stderr)
1595
1596             print("\nA script as been created at {}".format(filename))
1597             print("This script can be run using the following:")
1598             print("vppctl exec {}\n".format(filename))
1599
1600     def _create_vints_questions(self, node):
1601         """
1602         Ask the user some questions and get a list of interfaces
1603         and IPv4 addresses associated with those interfaces
1604
1605         :param node: Node dictionary.
1606         :type node: dict
1607         :returns: A list or interfaces with ip addresses
1608         :rtype: list
1609         """
1610
1611         vpputl = VPPUtil()
1612         interfaces = vpputl.get_hardware(node)
1613         if interfaces == {}:
1614             return []
1615
1616         # First delete all the Virtual interfaces
1617         for intf in sorted(interfaces.items()):
1618             name = intf[0]
1619             if name[:7] == 'Virtual':
1620                 cmd = 'vppctl delete vhost-user {}'.format(name)
1621                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1622                 if ret != 0:
1623                     logging.debug('{} failed on node {} {}'.format(
1624                         cmd, node['host'], stderr))
1625
1626         # Create a virtual interface, for each interface the user wants to use
1627         interfaces = vpputl.get_hardware(node)
1628         if interfaces == {}:
1629             return []
1630         interfaces_with_virtual_interfaces = []
1631         inum = 1
1632         for intf in sorted(interfaces.items()):
1633             name = intf[0]
1634             if name == 'local0':
1635                 continue
1636
1637             question = "Would you like connect this interface {} to the VM [Y/n]? ".format(name)
1638             answer = self._ask_user_yn(question, 'y')
1639             if answer == 'y':
1640                 sockfilename = '/var/run/vpp/sock{}.sock'.format(inum)
1641                 if os.path.exists(sockfilename):
1642                     os.remove(sockfilename)
1643                 cmd = 'vppctl create vhost-user socket {} server'.format(sockfilename)
1644                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1645                 if ret != 0:
1646                     raise RuntimeError("Create vhost failed on node {} {}."
1647                                        .format(node['host'], stderr))
1648                 vintname = stdout.rstrip('\r\n')
1649
1650                 interface = {'name': name, 'virtualinterface': '{}'.format(vintname),
1651                              'bridge': '{}'.format(inum)}
1652                 inum += 1
1653                 interfaces_with_virtual_interfaces.append(interface)
1654
1655         return interfaces_with_virtual_interfaces
1656
1657     def create_and_bridge_virtual_interfaces(self):
1658         """
1659         After asking the user some questions, create a VM and connect the interfaces
1660         to VPP interfaces
1661
1662         """
1663
1664         for i in self._nodes.items():
1665             node = i[1]
1666
1667             # Show the current bridge and interface configuration
1668             print "\nThis the current bridge configuration:"
1669             VPPUtil.show_bridge(node)
1670             question = "\nWould you like to keep this configuration [Y/n]? "
1671             answer = self._ask_user_yn(question, 'y')
1672             if answer == 'y':
1673                 continue
1674
1675             # Create a script that builds a bridge configuration with physical interfaces
1676             # and virtual interfaces
1677             ints_with_vints = self._create_vints_questions(node)
1678             content = ''
1679             for intf in ints_with_vints:
1680                 vhoststr = 'comment { The following command creates the socket }\n'
1681                 vhoststr += 'comment { and returns a virtual interface }\n'
1682                 vhoststr += 'comment {{ create vhost-user socket /var/run/vpp/sock{}.sock server }}\n'. \
1683                     format(intf['bridge'])
1684
1685                 setintdnstr = 'set interface state {} down\n'.format(intf['name'])
1686
1687                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['name'], intf['bridge'])
1688                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(intf['virtualinterface'], intf['bridge'])
1689
1690                 # set interface state VirtualEthernet/0/0/0 up
1691                 setintvststr = 'set interface state {} up\n'.format(intf['virtualinterface'])
1692
1693                 # set interface state VirtualEthernet/0/0/0 down
1694                 setintupstr = 'set interface state {} up\n'.format(intf['name'])
1695
1696                 content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
1697
1698             # Write the content to the script
1699             rootdir = node['rootdir']
1700             filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1701             with open(filename, 'w+') as sfile:
1702                 sfile.write(content)
1703
1704             # Execute the script
1705             cmd = 'vppctl exec {}'.format(filename)
1706             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1707             if ret != 0:
1708                 logging.debug(stderr)
1709
1710             print("\nA script as been created at {}".format(filename))
1711             print("This script can be run using the following:")
1712             print("vppctl exec {}\n".format(filename))