vpp_config: Rework for Python2/3 compatibility.
[vpp.git] / extras / vpp_config / vpplib / AutoConfig.py
1 # Copyright (c) 2016 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library that supports Auto Configuration."""
15 from __future__ import absolute_import, division, print_function
16
17 import logging
18 import os
19 import re
20 from ipaddress import ip_address
21
22 import yaml
23
24 from vpplib.VPPUtil import VPPUtil
25 from vpplib.VppPCIUtil import VppPCIUtil
26 from vpplib.VppHugePageUtil import VppHugePageUtil
27 from vpplib.CpuUtils import CpuUtils
28 from vpplib.VppGrubUtil import VppGrubUtil
29 from vpplib.QemuUtils import QemuUtils
30
31 #  Python2/3 compatible
32 try:
33     input = raw_input  # noqa
34 except NameError:
35     pass
36
37 __all__ = ["AutoConfig"]
38
39 # Constants
40 MIN_SYSTEM_CPUS = 2
41 MIN_TOTAL_HUGE_PAGES = 1024
42 MAX_PERCENT_FOR_HUGE_PAGES = 70
43
44 IPERFVM_XML = 'configs/iperf-vm.xml'
45 IPERFVM_IMAGE = 'images/xenial-mod.img'
46 IPERFVM_ISO = 'configs/cloud-config.iso'
47
48
49 class AutoConfig(object):
50     """Auto Configuration Tools"""
51
52     def __init__(self, rootdir, filename, clean=False):
53         """
54         The Auto Configure class.
55
56         :param rootdir: The root directory for all the auto configuration files
57         :param filename: The autoconfiguration file
58         :param clean: When set initialize the nodes from the auto-config file
59         :type rootdir: str
60         :type filename: str
61         :type clean: bool
62         """
63         self._autoconfig_filename = rootdir + filename
64         self._rootdir = rootdir
65         self._metadata = {}
66         self._nodes = {}
67         self._vpp_devices_node = {}
68         self._hugepage_config = ""
69         self._clean = clean
70         self._loadconfig()
71         self._sockfilename = ""
72
73     def get_nodes(self):
74         """
75         Returns the nodes dictionary.
76
77         :returns: The nodes
78         :rtype: dictionary
79         """
80
81         return self._nodes
82
83     @staticmethod
84     def _autoconfig_backup_file(filename):
85         """
86         Create a backup file.
87
88         :param filename: The file to backup
89         :type filename: str
90         """
91
92         # Does a copy of the file exist, if not create one
93         ofile = filename + '.orig'
94         (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
95         if ret != 0:
96             logging.debug(stderr)
97             if stdout.strip('\n') != ofile:
98                 cmd = 'sudo cp {} {}'.format(filename, ofile)
99                 (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
100                 if ret != 0:
101                     logging.debug(stderr)
102
103     # noinspection PyBroadException
104     @staticmethod
105     def _ask_user_ipv4():
106         """
107         Asks the user for a number within a range.
108         default is returned if return is entered.
109
110         :returns: IP address with cidr
111         :rtype: str
112         """
113
114         while True:
115             answer = input("Please enter the IPv4 Address [n.n.n.n/n]: ")
116             try:
117                 ipinput = answer.split('/')
118                 ipaddr = ip_address(ipinput[0])
119                 if len(ipinput) > 1:
120                     plen = answer.split('/')[1]
121                 else:
122                     answer = input("Please enter the netmask [n.n.n.n]: ")
123                     plen = ip_address(answer).netmask_bits()
124                 return '{}/{}'.format(ipaddr, plen)
125             except None:
126                 print("Please enter a valid IPv4 address.")
127
128     @staticmethod
129     def _ask_user_range(question, first, last, default):
130         """
131         Asks the user for a number within a range.
132         default is returned if return is entered.
133
134         :param question: Text of a question.
135         :param first: First number in the range
136         :param last: Last number in the range
137         :param default: The value returned when return is entered
138         :type question: string
139         :type first: int
140         :type last: int
141         :type default: int
142         :returns: The answer to the question
143         :rtype: int
144         """
145
146         while True:
147             answer = input(question)
148             if answer == '':
149                 answer = default
150                 break
151             if re.findall(r'[0-9+]', answer):
152                 if int(answer) in range(first, last + 1):
153                     break
154                 else:
155                     print("Please a value between {} and {} or Return.".
156                           format(first, last))
157             else:
158                 print("Please a number between {} and {} or Return.".
159                       format(first, last))
160
161         return int(answer)
162
163     @staticmethod
164     def _ask_user_yn(question, default):
165         """
166         Asks the user for a yes or no question.
167
168         :param question: Text of a question.
169         :param default: The value returned when return is entered
170         :type question: string
171         :type default: string
172         :returns: The answer to the question
173         :rtype: string
174         """
175
176         input_valid = False
177         default = default.lower()
178         answer = ''
179         while not input_valid:
180             answer = input(question)
181             if answer == '':
182                 answer = default
183             if re.findall(r'[YyNn]', answer):
184                 input_valid = True
185                 answer = answer[0].lower()
186             else:
187                 print("Please answer Y, N or Return.")
188
189         return answer
190
191     def _loadconfig(self):
192         """
193         Load the testbed configuration, given the auto configuration file.
194
195         """
196
197         # Get the Topology, from the topology layout file
198         topo = {}
199         with open(self._autoconfig_filename, 'r') as stream:
200             try:
201                 topo = yaml.load(stream)
202                 if 'metadata' in topo:
203                     self._metadata = topo['metadata']
204             except yaml.YAMLError as exc:
205                 raise RuntimeError(
206                     "Couldn't read the Auto config file {}.".format(
207                         self._autoconfig_filename, exc))
208
209         systemfile = self._rootdir + self._metadata['system_config_file']
210         if self._clean is False and os.path.isfile(systemfile):
211             with open(systemfile, 'r') as sysstream:
212                 try:
213                     systopo = yaml.load(sysstream)
214                     if 'nodes' in systopo:
215                         self._nodes = systopo['nodes']
216                 except yaml.YAMLError as sysexc:
217                     raise RuntimeError(
218                         "Couldn't read the System config file {}.".format(
219                             systemfile, sysexc))
220         else:
221             # Get the nodes from Auto Config
222             if 'nodes' in topo:
223                 self._nodes = topo['nodes']
224
225         # Set the root directory in all the nodes
226         for i in self._nodes.items():
227             node = i[1]
228             node['rootdir'] = self._rootdir
229
230     def updateconfig(self):
231         """
232         Update the testbed configuration, given the auto configuration file.
233         We will write the system configuration file with the current node
234         information
235
236         """
237
238         # Initialize the yaml data
239         ydata = {'metadata': self._metadata, 'nodes': self._nodes}
240
241         # Write the system config file
242         filename = self._rootdir + self._metadata['system_config_file']
243         with open(filename, 'w') as yamlfile:
244             yaml.dump(ydata, yamlfile)
245
246     def _update_auto_config(self):
247         """
248         Write the auto configuration file with the new configuration data,
249         input from the user.
250
251         """
252
253         # Initialize the yaml data
254         nodes = {}
255         with open(self._autoconfig_filename, 'r') as stream:
256             try:
257                 ydata = yaml.load(stream)
258                 if 'nodes' in ydata:
259                     nodes = ydata['nodes']
260             except yaml.YAMLError as exc:
261                 print(exc)
262                 return
263
264         for i in nodes.items():
265             key = i[0]
266             node = i[1]
267
268             # Interfaces
269             node['interfaces'] = {}
270             for item in self._nodes[key]['interfaces'].items():
271                 port = item[0]
272                 interface = item[1]
273
274                 node['interfaces'][port] = {}
275                 addr = '{}'.format(interface['pci_address'])
276                 node['interfaces'][port]['pci_address'] = addr
277                 if 'mac_address' in interface:
278                     node['interfaces'][port]['mac_address'] = \
279                         interface['mac_address']
280
281             if 'total_other_cpus' in self._nodes[key]['cpu']:
282                 node['cpu']['total_other_cpus'] = \
283                     self._nodes[key]['cpu']['total_other_cpus']
284             if 'total_vpp_cpus' in self._nodes[key]['cpu']:
285                 node['cpu']['total_vpp_cpus'] = \
286                     self._nodes[key]['cpu']['total_vpp_cpus']
287             if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
288                 node['cpu']['reserve_vpp_main_core'] = \
289                     self._nodes[key]['cpu']['reserve_vpp_main_core']
290
291             # TCP
292             if 'active_open_sessions' in self._nodes[key]['tcp']:
293                 node['tcp']['active_open_sessions'] = \
294                     self._nodes[key]['tcp']['active_open_sessions']
295             if 'passive_open_sessions' in self._nodes[key]['tcp']:
296                 node['tcp']['passive_open_sessions'] = \
297                     self._nodes[key]['tcp']['passive_open_sessions']
298
299             # Huge pages
300             node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
301
302         # Write the auto config config file
303         with open(self._autoconfig_filename, 'w') as yamlfile:
304             yaml.dump(ydata, yamlfile)
305
306     def apply_huge_pages(self):
307         """
308         Apply the huge page config
309
310         """
311
312         for i in self._nodes.items():
313             node = i[1]
314
315             hpg = VppHugePageUtil(node)
316             hpg.hugepages_dryrun_apply()
317
318     @staticmethod
319     def _apply_vpp_unix(node):
320         """
321         Apply the VPP Unix config
322
323         :param node: Node dictionary with cpuinfo.
324         :type node: dict
325         """
326
327         unix = '  nodaemon\n'
328         if 'unix' not in node['vpp']:
329             return ''
330
331         unixv = node['vpp']['unix']
332         if 'interactive' in unixv:
333             interactive = unixv['interactive']
334             if interactive is True:
335                 unix = '  interactive\n'
336
337         return unix.rstrip('\n')
338
339     @staticmethod
340     def _apply_vpp_cpu(node):
341         """
342         Apply the VPP cpu config
343
344         :param node: Node dictionary with cpuinfo.
345         :type node: dict
346         """
347
348         # Get main core
349         cpu = '\n'
350         if 'vpp_main_core' in node['cpu']:
351             vpp_main_core = node['cpu']['vpp_main_core']
352         else:
353             vpp_main_core = 0
354         if vpp_main_core is not 0:
355             cpu += '  main-core {}\n'.format(vpp_main_core)
356
357         # Get workers
358         vpp_workers = node['cpu']['vpp_workers']
359         vpp_worker_len = len(vpp_workers)
360         if vpp_worker_len > 0:
361             vpp_worker_str = ''
362             for i, worker in enumerate(vpp_workers):
363                 if i > 0:
364                     vpp_worker_str += ','
365                 if worker[0] == worker[1]:
366                     vpp_worker_str += "{}".format(worker[0])
367                 else:
368                     vpp_worker_str += "{}-{}".format(worker[0], worker[1])
369
370             cpu += '  corelist-workers {}\n'.format(vpp_worker_str)
371
372         return cpu
373
374     @staticmethod
375     def _apply_vpp_devices(node):
376         """
377         Apply VPP PCI Device configuration to vpp startup.
378
379         :param node: Node dictionary with cpuinfo.
380         :type node: dict
381         """
382
383         devices = ''
384         ports_per_numa = node['cpu']['ports_per_numa']
385         total_mbufs = node['cpu']['total_mbufs']
386
387         for item in ports_per_numa.items():
388             value = item[1]
389             interfaces = value['interfaces']
390
391             # if 0 was specified for the number of vpp workers, use 1 queue
392             num_rx_queues = None
393             num_tx_queues = None
394             if 'rx_queues' in value:
395                 num_rx_queues = value['rx_queues']
396             if 'tx_queues' in value:
397                 num_tx_queues = value['tx_queues']
398
399             num_rx_desc = None
400             num_tx_desc = None
401
402             # Create the devices string
403             for interface in interfaces:
404                 pci_address = interface['pci_address']
405                 pci_address = pci_address.lstrip("'").rstrip("'")
406                 devices += '\n'
407                 devices += '  dev {} {{ \n'.format(pci_address)
408                 if num_rx_queues:
409                     devices += '    num-rx-queues {}\n'.format(num_rx_queues)
410                 else:
411                     devices += '    num-rx-queues {}\n'.format(1)
412                 if num_tx_queues:
413                     devices += '    num-tx-queues {}\n'.format(num_tx_queues)
414                 if num_rx_desc:
415                     devices += '    num-rx-desc {}\n'.format(num_rx_desc)
416                 if num_tx_desc:
417                     devices += '    num-tx-desc {}\n'.format(num_tx_desc)
418                 devices += '  }'
419
420         # If the total mbufs is not 0 or less than the default, set num-bufs
421         logging.debug("Total mbufs: {}".format(total_mbufs))
422         if total_mbufs is not 0 and total_mbufs > 16384:
423             devices += '\n  num-mbufs {}'.format(total_mbufs)
424
425         return devices
426
427     @staticmethod
428     def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end,
429                           total_vpp_workers,
430                           reserve_vpp_main_core):
431         """
432         Calculate the VPP worker information
433
434         :param node: Node dictionary
435         :param vpp_workers: List of VPP workers
436         :param numa_node: Numa node
437         :param other_cpus_end: The end of the cpus allocated for cores
438         other than vpp
439         :param total_vpp_workers: The number of vpp workers needed
440         :param reserve_vpp_main_core: Is there a core needed for
441         the vpp main core
442         :type node: dict
443         :type numa_node: int
444         :type other_cpus_end: int
445         :type total_vpp_workers: int
446         :type reserve_vpp_main_core: bool
447         :returns: Is a core still needed for the vpp main core
448         :rtype: bool
449         """
450
451         # Can we fit the workers in one of these slices
452         cpus = node['cpu']['cpus_per_node'][numa_node]
453         for cpu in cpus:
454             start = cpu[0]
455             end = cpu[1]
456             if start <= other_cpus_end:
457                 start = other_cpus_end + 1
458
459             if reserve_vpp_main_core:
460                 start += 1
461
462             workers_end = start + total_vpp_workers - 1
463
464             if workers_end <= end:
465                 if reserve_vpp_main_core:
466                     node['cpu']['vpp_main_core'] = start - 1
467                 reserve_vpp_main_core = False
468                 if total_vpp_workers:
469                     vpp_workers.append((start, workers_end))
470                 break
471
472         # We still need to reserve the main core
473         if reserve_vpp_main_core:
474             node['cpu']['vpp_main_core'] = other_cpus_end + 1
475
476         return reserve_vpp_main_core
477
478     @staticmethod
479     def _calc_desc_and_queues(total_numa_nodes,
480                               total_ports_per_numa,
481                               total_rx_queues,
482                               ports_per_numa_value):
483         """
484         Calculate the number of descriptors and queues
485
486         :param total_numa_nodes: The total number of numa nodes
487         :param total_ports_per_numa: The total number of ports for this
488         numa node
489         :param total_rx_queues: The total number of rx queues / port
490         :param ports_per_numa_value: The value from the ports_per_numa
491         dictionary
492         :type total_numa_nodes: int
493         :type total_ports_per_numa: int
494         :type total_rx_queues: int
495         :type ports_per_numa_value: dict
496         :returns The total number of message buffers
497         :rtype: int
498         """
499
500         # Get the number of rx queues
501         rx_queues = max(1, total_rx_queues)
502         tx_queues = rx_queues * total_numa_nodes + 1
503
504         # Get the descriptor entries
505         desc_entries = 1024
506         ports_per_numa_value['rx_queues'] = rx_queues
507         total_mbufs = (((rx_queues * desc_entries) +
508                         (tx_queues * desc_entries)) *
509                        total_ports_per_numa)
510         total_mbufs = total_mbufs
511
512         return total_mbufs
513
514     @staticmethod
515     def _create_ports_per_numa(node, interfaces):
516         """
517         Create a dictionary or ports per numa node
518         :param node: Node dictionary
519         :param interfaces: All the interfaces to be used by vpp
520         :type node: dict
521         :type interfaces: dict
522         :returns: The ports per numa dictionary
523         :rtype: dict
524         """
525
526         # Make a list of ports by numa node
527         ports_per_numa = {}
528         for item in interfaces.items():
529             i = item[1]
530             if i['numa_node'] not in ports_per_numa:
531                 ports_per_numa[i['numa_node']] = {'interfaces': []}
532                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
533             else:
534                 ports_per_numa[i['numa_node']]['interfaces'].append(i)
535         node['cpu']['ports_per_numa'] = ports_per_numa
536
537         return ports_per_numa
538
539     def calculate_cpu_parameters(self):
540         """
541         Calculate the cpu configuration.
542
543         """
544
545         # Calculate the cpu parameters, needed for the
546         # vpp_startup and grub configuration
547         for i in self._nodes.items():
548             node = i[1]
549
550             # get total number of nic ports
551             interfaces = node['interfaces']
552
553             # Make a list of ports by numa node
554             ports_per_numa = self._create_ports_per_numa(node, interfaces)
555
556             # Get the number of cpus to skip, we never use the first cpu
557             other_cpus_start = 1
558             other_cpus_end = other_cpus_start + \
559                 node['cpu']['total_other_cpus'] - 1
560             other_workers = None
561             if other_cpus_end is not 0:
562                 other_workers = (other_cpus_start, other_cpus_end)
563             node['cpu']['other_workers'] = other_workers
564
565             # Allocate the VPP main core and workers
566             vpp_workers = []
567             reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
568             total_vpp_cpus = node['cpu']['total_vpp_cpus']
569             total_rx_queues = node['cpu']['total_rx_queues']
570
571             # If total_vpp_cpus is 0 or is less than the numa nodes with ports
572             #  then we shouldn't get workers
573             total_workers_node = 0
574             if len(ports_per_numa):
575                 total_workers_node = total_vpp_cpus / len(ports_per_numa)
576             total_main = 0
577             if reserve_vpp_main_core:
578                 total_main = 1
579             total_mbufs = 0
580             if total_main + total_workers_node is not 0:
581                 for item in ports_per_numa.items():
582                     numa_node = item[0]
583                     value = item[1]
584
585                     # Get the number of descriptors and queues
586                     mbufs = self._calc_desc_and_queues(
587                         len(ports_per_numa),
588                         len(value['interfaces']), total_rx_queues, value)
589                     total_mbufs += mbufs
590
591                     # Get the VPP workers
592                     reserve_vpp_main_core = self._calc_vpp_workers(
593                         node, vpp_workers, numa_node,
594                         other_cpus_end, total_workers_node,
595                         reserve_vpp_main_core)
596
597                 total_mbufs *= 2.5
598                 total_mbufs = int(total_mbufs)
599             else:
600                 total_mbufs = 0
601
602             # Save the info
603             node['cpu']['vpp_workers'] = vpp_workers
604             node['cpu']['total_mbufs'] = total_mbufs
605
606         # Write the config
607         self.updateconfig()
608
609     @staticmethod
610     def _apply_vpp_tcp(node):
611         """
612         Apply the VPP Unix config
613
614         :param node: Node dictionary with cpuinfo.
615         :type node: dict
616         """
617
618         active_open_sessions = node['tcp']['active_open_sessions']
619         aos = int(active_open_sessions)
620
621         passive_open_sessions = node['tcp']['passive_open_sessions']
622         pos = int(passive_open_sessions)
623
624         # Generate the api-segment gid vpp sheit in any case
625         if (aos + pos) == 0:
626             tcp = '\n'.join([
627                 "api-segment {",
628                 "  gid vpp",
629                 "}"
630             ])
631             return tcp.rstrip('\n')
632
633         tcp = '\n'.join([
634             "# TCP stack-related configuration parameters",
635             "# expecting {:d} client sessions, {:d} server sessions\n".format(
636                 aos, pos),
637             "heapsize 4g\n",
638             "api-segment {",
639             "  global-size 2000M",
640             "  api-size 1G",
641             "}\n",
642
643             "session {",
644             "  event-queue-length {:d}".format(aos + pos),
645             "  preallocated-sessions {:d}".format(aos + pos),
646             "  v4-session-table-buckets {:d}".format((aos + pos) // 4),
647             "  v4-session-table-memory 3g\n"
648         ])
649         if aos > 0:
650             tcp = tcp + "  v4-halfopen-table-buckets {:d}".format(
651                 (aos + pos) // 4) + "\n"
652             tcp = tcp + "  v4-halfopen-table-memory 3g\n"
653             tcp = tcp + "  local-endpoints-table-buckets {:d}".format(
654                 (aos + pos) // 4) + "\n"
655             tcp = tcp + "  local-endpoints-table-memory 3g\n"
656         tcp = tcp + "}\n\n"
657
658         tcp = tcp + "tcp {\n"
659         tcp = tcp + "  preallocated-connections {:d}".format(aos + pos) + "\n"
660         if aos > 0:
661             tcp = tcp + "  preallocated-half-open-connections {:d}".format(
662                 aos) + "\n"
663         tcp = tcp + "}\n\n"
664
665         return tcp.rstrip('\n')
666
667     def apply_vpp_startup(self):
668         """
669         Apply the vpp startup configration
670
671         """
672
673         # Apply the VPP startup configruation
674         for i in self._nodes.items():
675             node = i[1]
676
677             # Get the startup file
678             rootdir = node['rootdir']
679             sfile = rootdir + node['vpp']['startup_config_file']
680
681             # Get the devices
682             devices = self._apply_vpp_devices(node)
683
684             # Get the CPU config
685             cpu = self._apply_vpp_cpu(node)
686
687             # Get the unix config
688             unix = self._apply_vpp_unix(node)
689
690             # Get the TCP configuration, if any
691             tcp = self._apply_vpp_tcp(node)
692
693             # Make a backup if needed
694             self._autoconfig_backup_file(sfile)
695
696             # Get the template
697             tfile = sfile + '.template'
698             (ret, stdout, stderr) = \
699                 VPPUtil.exec_command('cat {}'.format(tfile))
700             if ret != 0:
701                 raise RuntimeError('Executing cat command failed to node {}'.
702                                    format(node['host']))
703             startup = stdout.format(unix=unix,
704                                     cpu=cpu,
705                                     devices=devices,
706                                     tcp=tcp)
707
708             (ret, stdout, stderr) = \
709                 VPPUtil.exec_command('rm {}'.format(sfile))
710             if ret != 0:
711                 logging.debug(stderr)
712
713             cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
714             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
715             if ret != 0:
716                 raise RuntimeError('Writing config failed node {}'.
717                                    format(node['host']))
718
719     def apply_grub_cmdline(self):
720         """
721         Apply the grub cmdline
722
723         """
724
725         for i in self._nodes.items():
726             node = i[1]
727
728             # Get the isolated CPUs
729             other_workers = node['cpu']['other_workers']
730             vpp_workers = node['cpu']['vpp_workers']
731             if 'vpp_main_core' in node['cpu']:
732                 vpp_main_core = node['cpu']['vpp_main_core']
733             else:
734                 vpp_main_core = 0
735             all_workers = []
736             if other_workers is not None:
737                 all_workers = [other_workers]
738             if vpp_main_core is not 0:
739                 all_workers += [(vpp_main_core, vpp_main_core)]
740             all_workers += vpp_workers
741             isolated_cpus = ''
742             for idx, worker in enumerate(all_workers):
743                 if worker is None:
744                     continue
745                 if idx > 0:
746                     isolated_cpus += ','
747                 if worker[0] == worker[1]:
748                     isolated_cpus += "{}".format(worker[0])
749                 else:
750                     isolated_cpus += "{}-{}".format(worker[0], worker[1])
751
752             vppgrb = VppGrubUtil(node)
753             current_cmdline = vppgrb.get_current_cmdline()
754             if 'grub' not in node:
755                 node['grub'] = {}
756             node['grub']['current_cmdline'] = current_cmdline
757             node['grub']['default_cmdline'] = \
758                 vppgrb.apply_cmdline(node, isolated_cpus)
759
760         self.updateconfig()
761
762     def get_hugepages(self):
763         """
764         Get the hugepage configuration
765
766         """
767
768         for i in self._nodes.items():
769             node = i[1]
770
771             hpg = VppHugePageUtil(node)
772             max_map_count, shmmax = hpg.get_huge_page_config()
773             node['hugepages']['max_map_count'] = max_map_count
774             node['hugepages']['shmax'] = shmmax
775             total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
776             node['hugepages']['actual_total'] = total
777             node['hugepages']['free'] = free
778             node['hugepages']['size'] = size
779             node['hugepages']['memtotal'] = memtotal
780             node['hugepages']['memfree'] = memfree
781
782         self.updateconfig()
783
784     def get_grub(self):
785         """
786         Get the grub configuration
787
788         """
789
790         for i in self._nodes.items():
791             node = i[1]
792
793             vppgrb = VppGrubUtil(node)
794             current_cmdline = vppgrb.get_current_cmdline()
795             default_cmdline = vppgrb.get_default_cmdline()
796
797             # Get the total number of isolated CPUs
798             current_iso_cpus = 0
799             iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
800             iso_cpurl = len(iso_cpur)
801             if iso_cpurl > 0:
802                 iso_cpu_str = iso_cpur[0]
803                 iso_cpu_str = iso_cpu_str.split('=')[1]
804                 iso_cpul = iso_cpu_str.split(',')
805                 for iso_cpu in iso_cpul:
806                     isocpuspl = iso_cpu.split('-')
807                     if len(isocpuspl) is 1:
808                         current_iso_cpus += 1
809                     else:
810                         first = int(isocpuspl[0])
811                         second = int(isocpuspl[1])
812                         if first == second:
813                             current_iso_cpus += 1
814                         else:
815                             current_iso_cpus += second - first
816
817             if 'grub' not in node:
818                 node['grub'] = {}
819             node['grub']['current_cmdline'] = current_cmdline
820             node['grub']['default_cmdline'] = default_cmdline
821             node['grub']['current_iso_cpus'] = current_iso_cpus
822
823         self.updateconfig()
824
825     @staticmethod
826     def _get_device(node):
827         """
828         Get the device configuration for a single node
829
830         :param node: Node dictionary with cpuinfo.
831         :type node: dict
832
833         """
834
835         vpp = VppPCIUtil(node)
836         vpp.get_all_devices()
837
838         # Save the device information
839         node['devices'] = {}
840         node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
841         node['devices']['kernel_devices'] = vpp.get_kernel_devices()
842         node['devices']['other_devices'] = vpp.get_other_devices()
843         node['devices']['linkup_devices'] = vpp.get_link_up_devices()
844
845     def get_devices_per_node(self):
846         """
847         Get the device configuration for all the nodes
848
849         """
850
851         for i in self._nodes.items():
852             node = i[1]
853             # Update the interface data
854
855             self._get_device(node)
856
857         self.updateconfig()
858
859     @staticmethod
860     def get_cpu_layout(node):
861         """
862         Get the cpu layout
863
864         using lscpu -p get the cpu layout.
865         Returns a list with each item representing a single cpu.
866
867         :param node: Node dictionary.
868         :type node: dict
869         :returns: The cpu layout
870         :rtype: list
871         """
872
873         cmd = 'lscpu -p'
874         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
875         if ret != 0:
876             raise RuntimeError('{} failed on node {} {}'.
877                                format(cmd, node['host'], stderr))
878
879         pcpus = []
880         lines = stdout.split('\n')
881         for line in lines:
882             if line == '' or line[0] == '#':
883                 continue
884             linesplit = line.split(',')
885             layout = {'cpu': linesplit[0], 'core': linesplit[1],
886                       'socket': linesplit[2], 'node': linesplit[3]}
887
888             # cpu, core, socket, node
889             pcpus.append(layout)
890
891         return pcpus
892
893     def get_cpu(self):
894         """
895         Get the cpu configuration
896
897         """
898
899         # Get the CPU layout
900         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
901
902         for i in self._nodes.items():
903             node = i[1]
904
905             # Get the cpu layout
906             layout = self.get_cpu_layout(node)
907             node['cpu']['layout'] = layout
908
909             cpuinfo = node['cpuinfo']
910             smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
911             node['cpu']['smt_enabled'] = smt_enabled
912
913             # We don't want to write the cpuinfo
914             node['cpuinfo'] = ""
915
916         # Write the config
917         self.updateconfig()
918
919     def discover(self):
920         """
921         Get the current system configuration.
922
923         """
924
925         # Get the Huge Page configuration
926         self.get_hugepages()
927
928         # Get the device configuration
929         self.get_devices_per_node()
930
931         # Get the CPU configuration
932         self.get_cpu()
933
934         # Get the current grub cmdline
935         self.get_grub()
936
937     def _modify_cpu_questions(self, node, total_cpus, numa_nodes):
938         """
939         Ask the user questions related to the cpu configuration.
940
941         :param node: Node dictionary
942         :param total_cpus: The total number of cpus in the system
943         :param numa_nodes: The list of numa nodes in the system
944         :type node: dict
945         :type total_cpus: int
946         :type numa_nodes: list
947         """
948
949         print("\nYour system has {} core(s) and {} Numa Nodes.".
950               format(total_cpus, len(numa_nodes)))
951         print("To begin, we suggest not reserving any cores for "
952               "VPP or other processes.")
953         print("Then to improve performance start reserving cores and "
954               "adding queues as needed.")
955
956         max_vpp_cpus = 4
957         total_vpp_cpus = 0
958         if max_vpp_cpus > 0:
959             question = "\nHow many core(s) shall we reserve for " \
960                        "VPP [0-{}][0]? ".format(max_vpp_cpus)
961             total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
962             node['cpu']['total_vpp_cpus'] = total_vpp_cpus
963
964         max_other_cores = (total_cpus - total_vpp_cpus) / 2
965         question = 'How many core(s) do you want to reserve for ' \
966                    'processes other than VPP? [0-{}][0]? '. \
967             format(str(max_other_cores))
968         total_other_cpus = self._ask_user_range(
969             question, 0, max_other_cores, 0)
970         node['cpu']['total_other_cpus'] = total_other_cpus
971
972         max_main_cpus = max_vpp_cpus + 1 - total_vpp_cpus
973         reserve_vpp_main_core = False
974         if max_main_cpus > 0:
975             question = "Should we reserve 1 core for the VPP Main thread? "
976             question += "[y/N]? "
977             answer = self._ask_user_yn(question, 'n')
978             if answer == 'y':
979                 reserve_vpp_main_core = True
980             node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
981             node['cpu']['vpp_main_core'] = 0
982
983         question = "How many RX queues per port shall we use for " \
984                    "VPP [1-4][1]? ".format(max_vpp_cpus)
985         total_rx_queues = self._ask_user_range(question, 1, 4, 1)
986         node['cpu']['total_rx_queues'] = total_rx_queues
987
988     def modify_cpu(self, ask_questions=True):
989         """
990         Modify the cpu configuration, asking for the user for the values.
991
992         :param ask_questions: When true ask the user for config parameters
993
994         """
995
996         # Get the CPU layout
997         CpuUtils.get_cpu_layout_from_all_nodes(self._nodes)
998
999         for i in self._nodes.items():
1000             node = i[1]
1001             total_cpus = 0
1002             total_cpus_per_slice = 0
1003             cpus_per_node = {}
1004             numa_nodes = []
1005             cores = []
1006             cpu_layout = self.get_cpu_layout(node)
1007
1008             # Assume the number of cpus per slice is always the same as the
1009             # first slice
1010             first_node = '0'
1011             for cpu in cpu_layout:
1012                 if cpu['node'] != first_node:
1013                     break
1014                 total_cpus_per_slice += 1
1015
1016             # Get the total number of cpus, cores, and numa nodes from the
1017             # cpu layout
1018             for cpul in cpu_layout:
1019                 numa_node = cpul['node']
1020                 core = cpul['core']
1021                 cpu = cpul['cpu']
1022                 total_cpus += 1
1023
1024                 if numa_node not in cpus_per_node:
1025                     cpus_per_node[numa_node] = []
1026                 cpuperslice = int(cpu) % total_cpus_per_slice
1027                 if cpuperslice == 0:
1028                     cpus_per_node[numa_node].append((int(cpu), int(cpu) +
1029                                                      total_cpus_per_slice - 1))
1030                 if numa_node not in numa_nodes:
1031                     numa_nodes.append(numa_node)
1032                 if core not in cores:
1033                     cores.append(core)
1034             node['cpu']['cpus_per_node'] = cpus_per_node
1035
1036             # Ask the user some questions
1037             if ask_questions and total_cpus >= 8:
1038                 self._modify_cpu_questions(node, total_cpus, numa_nodes)
1039
1040             # Populate the interfaces with the numa node
1041             if 'interfaces' in node:
1042                 ikeys = node['interfaces'].keys()
1043                 VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
1044
1045             # We don't want to write the cpuinfo
1046             node['cpuinfo'] = ""
1047
1048         # Write the configs
1049         self._update_auto_config()
1050         self.updateconfig()
1051
1052     def _modify_other_devices(self, node,
1053                               other_devices, kernel_devices, dpdk_devices):
1054         """
1055         Modify the devices configuration, asking for the user for the values.
1056
1057         """
1058
1059         odevices_len = len(other_devices)
1060         if odevices_len > 0:
1061             print("\nThese device(s) are currently NOT being used "
1062                   "by VPP or the OS.\n")
1063             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1064             question = "\nWould you like to give any of these devices"
1065             question += " back to the OS [Y/n]? "
1066             answer = self._ask_user_yn(question, 'Y')
1067             if answer == 'y':
1068                 vppd = {}
1069                 for dit in other_devices.items():
1070                     dvid = dit[0]
1071                     device = dit[1]
1072                     question = "Would you like to use device {} for". \
1073                         format(dvid)
1074                     question += " the OS [y/N]? "
1075                     answer = self._ask_user_yn(question, 'n')
1076                     if answer == 'y':
1077                         if 'unused' in device and len(
1078                                 device['unused']) != 0 and \
1079                                 device['unused'][0] != '':
1080                             driver = device['unused'][0]
1081                             ret = VppPCIUtil.bind_vpp_device(
1082                                 node, driver, dvid)
1083                             if ret:
1084                                 logging.debug(
1085                                     'Could not bind device {}'.format(dvid))
1086                             else:
1087                                 vppd[dvid] = device
1088                 for dit in vppd.items():
1089                     dvid = dit[0]
1090                     device = dit[1]
1091                     kernel_devices[dvid] = device
1092                     del other_devices[dvid]
1093
1094         odevices_len = len(other_devices)
1095         if odevices_len > 0:
1096             print("\nThese device(s) are still NOT being used "
1097                   "by VPP or the OS.\n")
1098             VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
1099             question = "\nWould you like use any of these for VPP [y/N]? "
1100             answer = self._ask_user_yn(question, 'N')
1101             if answer == 'y':
1102                 vppd = {}
1103                 for dit in other_devices.items():
1104                     dvid = dit[0]
1105                     device = dit[1]
1106                     question = "Would you like to use device {} ".format(dvid)
1107                     question += "for VPP [y/N]? "
1108                     answer = self._ask_user_yn(question, 'n')
1109                     if answer == 'y':
1110                         vppd[dvid] = device
1111                 for dit in vppd.items():
1112                     dvid = dit[0]
1113                     device = dit[1]
1114                     if 'unused' in device and len(device['unused']) != 0 and \
1115                             device['unused'][0] != '':
1116                         driver = device['unused'][0]
1117                         logging.debug(
1118                             'Binding device {} to driver {}'.format(dvid,
1119                                                                     driver))
1120                         ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1121                         if ret:
1122                             logging.debug(
1123                                 'Could not bind device {}'.format(dvid))
1124                         else:
1125                             dpdk_devices[dvid] = device
1126                             del other_devices[dvid]
1127
1128     def update_interfaces_config(self):
1129         """
1130         Modify the interfaces directly from the config file.
1131
1132         """
1133
1134         for i in self._nodes.items():
1135             node = i[1]
1136             devices = node['devices']
1137             all_devices = devices['other_devices']
1138             all_devices.update(devices['dpdk_devices'])
1139             all_devices.update(devices['kernel_devices'])
1140
1141             current_ifcs = {}
1142             interfaces = {}
1143             if 'interfaces' in node:
1144                 current_ifcs = node['interfaces']
1145             if current_ifcs:
1146                 for ifc in current_ifcs.values():
1147                     dvid = ifc['pci_address']
1148                     if dvid in all_devices:
1149                         VppPCIUtil.vpp_create_interface(interfaces, dvid,
1150                                                         all_devices[dvid])
1151             node['interfaces'] = interfaces
1152
1153         self.updateconfig()
1154
1155     def modify_devices(self):
1156         """
1157         Modify the devices configuration, asking for the user for the values.
1158
1159         """
1160
1161         for i in self._nodes.items():
1162             node = i[1]
1163             devices = node['devices']
1164             other_devices = devices['other_devices']
1165             kernel_devices = devices['kernel_devices']
1166             dpdk_devices = devices['dpdk_devices']
1167
1168             if other_devices:
1169                 self._modify_other_devices(node, other_devices,
1170                                            kernel_devices, dpdk_devices)
1171
1172                 # Get the devices again for this node
1173                 self._get_device(node)
1174                 devices = node['devices']
1175                 kernel_devices = devices['kernel_devices']
1176                 dpdk_devices = devices['dpdk_devices']
1177
1178             klen = len(kernel_devices)
1179             if klen > 0:
1180                 print("\nThese devices are safe to be used with VPP.\n")
1181                 VppPCIUtil.show_vpp_devices(kernel_devices)
1182                 question = "\nWould you like to use any of these " \
1183                            "device(s) for VPP [y/N]? "
1184                 answer = self._ask_user_yn(question, 'n')
1185                 if answer == 'y':
1186                     vppd = {}
1187                     for dit in kernel_devices.items():
1188                         dvid = dit[0]
1189                         device = dit[1]
1190                         question = "Would you like to use device {} ".format(dvid)
1191                         question += "for VPP [y/N]? "
1192                         answer = self._ask_user_yn(question, 'n')
1193                         if answer == 'y':
1194                             vppd[dvid] = device
1195                     for dit in vppd.items():
1196                         dvid = dit[0]
1197                         device = dit[1]
1198                         if 'unused' in device and len(
1199                                 device['unused']) != 0 and device['unused'][
1200                                 0] != '':
1201                             driver = device['unused'][0]
1202                             question = "Would you like to bind the driver {} for {} [y/N]? ".format(driver, dvid)
1203                             answer = self._ask_user_yn(question, 'n')
1204                             if answer == 'y':
1205                                 logging.debug('Binding device {} to driver {}'.format(dvid, driver))
1206                                 ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
1207                                 if ret:
1208                                     logging.debug('Could not bind device {}'.format(dvid))
1209                         dpdk_devices[dvid] = device
1210                         del kernel_devices[dvid]
1211
1212             dlen = len(dpdk_devices)
1213             if dlen > 0:
1214                 print("\nThese device(s) are already using DPDK.\n")
1215                 VppPCIUtil.show_vpp_devices(dpdk_devices,
1216                                             show_interfaces=False)
1217                 question = "\nWould you like to remove any of "
1218                 question += "these device(s) [y/N]? "
1219                 answer = self._ask_user_yn(question, 'n')
1220                 if answer == 'y':
1221                     vppdl = {}
1222                     for dit in dpdk_devices.items():
1223                         dvid = dit[0]
1224                         device = dit[1]
1225                         question = "Would you like to remove {} [y/N]? ". \
1226                             format(dvid)
1227                         answer = self._ask_user_yn(question, 'n')
1228                         if answer == 'y':
1229                             vppdl[dvid] = device
1230                     for dit in vppdl.items():
1231                         dvid = dit[0]
1232                         device = dit[1]
1233                         if 'unused' in device and len(
1234                                 device['unused']) != 0 and device['unused'][
1235                                 0] != '':
1236                             driver = device['unused'][0]
1237                             logging.debug(
1238                                 'Binding device {} to driver {}'.format(
1239                                     dvid, driver))
1240                             ret = VppPCIUtil.bind_vpp_device(node, driver,
1241                                                              dvid)
1242                             if ret:
1243                                 logging.debug(
1244                                     'Could not bind device {}'.format(dvid))
1245                             else:
1246                                 kernel_devices[dvid] = device
1247                                 del dpdk_devices[dvid]
1248
1249             interfaces = {}
1250             for dit in dpdk_devices.items():
1251                 dvid = dit[0]
1252                 device = dit[1]
1253                 VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
1254             node['interfaces'] = interfaces
1255
1256         self._update_auto_config()
1257         self.updateconfig()
1258
1259     def modify_huge_pages(self):
1260         """
1261         Modify the huge page configuration, asking for the user for the values.
1262
1263         """
1264
1265         for i in self._nodes.items():
1266             node = i[1]
1267
1268             total = node['hugepages']['actual_total']
1269             free = node['hugepages']['free']
1270             size = node['hugepages']['size']
1271             memfree = node['hugepages']['memfree'].split(' ')[0]
1272             hugesize = int(size.split(' ')[0])
1273             # The max number of huge pages should be no more than
1274             # 70% of total free memory
1275             maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES // 100) // \
1276                 hugesize
1277             print("\nThere currently {} {} huge pages free.".format(
1278                 free, size))
1279             question = "Do you want to reconfigure the number of " \
1280                        "huge pages [y/N]? "
1281             answer = self._ask_user_yn(question, 'n')
1282             if answer == 'n':
1283                 node['hugepages']['total'] = total
1284                 continue
1285
1286             print("\nThere currently a total of {} huge pages.".
1287                   format(total))
1288             question = "How many huge pages do you want [{} - {}][{}]? ". \
1289                 format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
1290             answer = self._ask_user_range(question, 1024, maxpages, 1024)
1291             node['hugepages']['total'] = str(answer)
1292
1293         # Update auto-config.yaml
1294         self._update_auto_config()
1295
1296         # Rediscover just the hugepages
1297         self.get_hugepages()
1298
1299     def get_tcp_params(self):
1300         """
1301         Get the tcp configuration
1302
1303         """
1304         # maybe nothing to do here?
1305         self.updateconfig()
1306
1307     def acquire_tcp_params(self):
1308         """
1309         Ask the user for TCP stack configuration parameters
1310
1311         """
1312
1313         for i in self._nodes.items():
1314             node = i[1]
1315
1316             question = "\nHow many active-open / tcp client sessions are " \
1317                        "expected [0-10000000][0]? "
1318             answer = self._ask_user_range(question, 0, 10000000, 0)
1319             # Less than 10K is equivalent to 0
1320             if int(answer) < 10000:
1321                 answer = 0
1322             node['tcp']['active_open_sessions'] = answer
1323
1324             question = "How many passive-open / tcp server sessions are " \
1325                        "expected [0-10000000][0]? "
1326             answer = self._ask_user_range(question, 0, 10000000, 0)
1327             # Less than 10K is equivalent to 0
1328             if int(answer) < 10000:
1329                 answer = 0
1330             node['tcp']['passive_open_sessions'] = answer
1331
1332         # Update auto-config.yaml
1333         self._update_auto_config()
1334
1335         # Rediscover tcp parameters
1336         self.get_tcp_params()
1337
1338     @staticmethod
1339     def patch_qemu(node):
1340         """
1341         Patch qemu with the correct patches.
1342
1343         :param node: Node dictionary
1344         :type node: dict
1345         """
1346
1347         print('\nWe are patching the node "{}":\n'.format(node['host']))
1348         QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
1349
1350     @staticmethod
1351     def cpu_info(node):
1352         """
1353         print the CPU information
1354
1355         """
1356
1357         cpu = CpuUtils.get_cpu_info_per_node(node)
1358
1359         item = 'Model name'
1360         if item in cpu:
1361             print("{:>20}:    {}".format(item, cpu[item]))
1362         item = 'CPU(s)'
1363         if item in cpu:
1364             print("{:>20}:    {}".format(item, cpu[item]))
1365         item = 'Thread(s) per core'
1366         if item in cpu:
1367             print("{:>20}:    {}".format(item, cpu[item]))
1368         item = 'Core(s) per socket'
1369         if item in cpu:
1370             print("{:>20}:    {}".format(item, cpu[item]))
1371         item = 'Socket(s)'
1372         if item in cpu:
1373             print("{:>20}:    {}".format(item, cpu[item]))
1374         item = 'NUMA node(s)'
1375         numa_nodes = 0
1376         if item in cpu:
1377             numa_nodes = int(cpu[item])
1378         for i in range(0, numa_nodes):
1379             item = "NUMA node{} CPU(s)".format(i)
1380             print("{:>20}:    {}".format(item, cpu[item]))
1381         item = 'CPU max MHz'
1382         if item in cpu:
1383             print("{:>20}:    {}".format(item, cpu[item]))
1384         item = 'CPU min MHz'
1385         if item in cpu:
1386             print("{:>20}:    {}".format(item, cpu[item]))
1387
1388         if node['cpu']['smt_enabled']:
1389             smt = 'Enabled'
1390         else:
1391             smt = 'Disabled'
1392         print("{:>20}:    {}".format('SMT', smt))
1393
1394         # VPP Threads
1395         print("\nVPP Threads: (Name: Cpu Number)")
1396         vpp_processes = cpu['vpp_processes']
1397         for i in vpp_processes.items():
1398             print("  {:10}: {:4}".format(i[0], i[1]))
1399
1400     @staticmethod
1401     def device_info(node):
1402         """
1403         Show the device information.
1404
1405         """
1406
1407         if 'cpu' in node and 'total_mbufs' in node['cpu']:
1408             total_mbufs = node['cpu']['total_mbufs']
1409             if total_mbufs is not 0:
1410                 print("Total Number of Buffers: {}".format(total_mbufs))
1411
1412         vpp = VppPCIUtil(node)
1413         vpp.get_all_devices()
1414         linkup_devs = vpp.get_link_up_devices()
1415         if len(linkup_devs):
1416             print("\nDevices with link up (can not be used with VPP):")
1417             vpp.show_vpp_devices(linkup_devs, show_header=False)
1418             # for dev in linkup_devs:
1419             #    print ("    " + dev)
1420         kernel_devs = vpp.get_kernel_devices()
1421         if len(kernel_devs):
1422             print("\nDevices bound to kernel drivers:")
1423             vpp.show_vpp_devices(kernel_devs, show_header=False)
1424         else:
1425             print("\nNo devices bound to kernel drivers")
1426
1427         dpdk_devs = vpp.get_dpdk_devices()
1428         if len(dpdk_devs):
1429             print("\nDevices bound to DPDK drivers:")
1430             vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
1431                                  show_header=False)
1432         else:
1433             print("\nNo devices bound to DPDK drivers")
1434
1435         other_devs = vpp.get_other_devices()
1436         if len(other_devs):
1437             print("\nDevices not bound to Kernel or DPDK drivers:")
1438             vpp.show_vpp_devices(other_devs, show_interfaces=True,
1439                                  show_header=False)
1440         else:
1441             print("\nNo devices not bound to Kernel or DPDK drivers")
1442
1443         vpputl = VPPUtil()
1444         interfaces = vpputl.get_hardware(node)
1445         if interfaces == {}:
1446             return
1447
1448         print("\nDevices in use by VPP:")
1449
1450         if len(interfaces.items()) < 2:
1451             print("None")
1452             return
1453
1454         print("{:30} {:4} {:4} {:7} {:4} {:7}".
1455               format('Name', 'Numa', 'RXQs',
1456                      'RXDescs', 'TXQs', 'TXDescs'))
1457         for intf in sorted(interfaces.items()):
1458             name = intf[0]
1459             value = intf[1]
1460             if name == 'local0':
1461                 continue
1462             numa = rx_qs = rx_ds = tx_qs = tx_ds = ''
1463             if 'numa' in value:
1464                 numa = int(value['numa'])
1465             if 'rx queues' in value:
1466                 rx_qs = int(value['rx queues'])
1467             if 'rx descs' in value:
1468                 rx_ds = int(value['rx descs'])
1469             if 'tx queues' in value:
1470                 tx_qs = int(value['tx queues'])
1471             if 'tx descs' in value:
1472                 tx_ds = int(value['tx descs'])
1473
1474             print("{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".
1475                   format(name, numa, rx_qs, rx_ds, tx_qs, tx_ds))
1476
1477     @staticmethod
1478     def hugepage_info(node):
1479         """
1480         Show the huge page information.
1481
1482         """
1483
1484         hpg = VppHugePageUtil(node)
1485         hpg.show_huge_pages()
1486
1487     @staticmethod
1488     def min_system_resources(node):
1489         """
1490         Check the system for basic minimum resources, return true if
1491         there is enough.
1492
1493         :returns: boolean
1494         :rtype: dict
1495         """
1496
1497         min_sys_res = True
1498
1499         # CPUs
1500         if 'layout' in node['cpu']:
1501             total_cpus = len(node['cpu']['layout'])
1502             if total_cpus < 2:
1503                 print("\nThere is only {} CPU(s) available on this system. "
1504                       "This is not enough to run VPP.".format(total_cpus))
1505                 min_sys_res = False
1506
1507         # System Memory
1508         if 'free' in node['hugepages'] and \
1509                 'memfree' in node['hugepages'] and \
1510                 'size' in node['hugepages']:
1511             free = node['hugepages']['free']
1512             memfree = float(node['hugepages']['memfree'].split(' ')[0])
1513             hugesize = float(node['hugepages']['size'].split(' ')[0])
1514
1515             memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
1516             percentmemhugepages = (memhugepages / memfree) * 100
1517             if free is '0' and \
1518                     percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
1519                 print(
1520                     "\nThe System has only {} of free memory. You will not "
1521                     "be able to allocate enough Huge Pages for VPP.".format(
1522                         int(
1523                             memfree))
1524                 )
1525                 min_sys_res = False
1526
1527         return min_sys_res
1528
1529     def sys_info(self):
1530         """
1531         Print the system information
1532
1533         """
1534
1535         for i in self._nodes.items():
1536             print("\n==============================")
1537             name = i[0]
1538             node = i[1]
1539
1540             print("NODE: {}\n".format(name))
1541
1542             # CPU
1543             print("CPU:")
1544             self.cpu_info(node)
1545
1546             # Grub
1547             print("\nGrub Command Line:")
1548             if 'grub' in node:
1549                 print("  Current: {}".format(
1550                     node['grub']['current_cmdline']))
1551                 print("  Configured: {}".format(
1552                     node['grub']['default_cmdline']))
1553
1554             # Huge Pages
1555             print("\nHuge Pages:")
1556             self.hugepage_info(node)
1557
1558             # Devices
1559             print("\nDevices:")
1560             self.device_info(node)
1561
1562             # Status
1563             print("\nVPP Service Status:")
1564             state, errors = VPPUtil.status(node)
1565             print("  {}".format(state))
1566             for e in errors:
1567                 print("  {}".format(e))
1568
1569             # Minimum system resources
1570             self.min_system_resources(node)
1571
1572             print("\n==============================")
1573
1574     def _ipv4_interface_setup_questions(self, node):
1575         """
1576         Ask the user some questions and get a list of interfaces
1577         and IPv4 addresses associated with those interfaces
1578
1579         :param node: Node dictionary.
1580         :type node: dict
1581         :returns: A list or interfaces with ip addresses
1582         :rtype: dict
1583         """
1584
1585         vpputl = VPPUtil()
1586         interfaces = vpputl.get_hardware(node)
1587         if interfaces == {}:
1588             return
1589
1590         interfaces_with_ip = []
1591         for intf in sorted(interfaces.items()):
1592             name = intf[0]
1593             if name == 'local0':
1594                 continue
1595
1596             question = "Would you like add address to " \
1597                        "interface {} [Y/n]? ".format(name)
1598             answer = self._ask_user_yn(question, 'y')
1599             if answer == 'y':
1600                 address = {}
1601                 addr = self._ask_user_ipv4()
1602                 address['name'] = name
1603                 address['addr'] = addr
1604                 interfaces_with_ip.append(address)
1605
1606         return interfaces_with_ip
1607
1608     def ipv4_interface_setup(self):
1609         """
1610         After asking the user some questions, get a list of interfaces
1611         and IPv4 addresses associated with those interfaces
1612
1613         """
1614
1615         for i in self._nodes.items():
1616             node = i[1]
1617
1618             # Show the current interfaces with IP addresses
1619             current_ints = VPPUtil.get_int_ip(node)
1620             if current_ints is not {}:
1621                 print("\nThese are the current interfaces with IP addresses:")
1622                 for items in sorted(current_ints.items()):
1623                     name = items[0]
1624                     value = items[1]
1625                     if 'address' not in value:
1626                         address = 'Not Set'
1627                     else:
1628                         address = value['address']
1629                     print("{:30} {:20} {:10}".format(name, address,
1630                                                      value['state']))
1631                 question = "\nWould you like to keep this configuration " \
1632                            "[Y/n]? "
1633                 answer = self._ask_user_yn(question, 'y')
1634                 if answer == 'y':
1635                     continue
1636             else:
1637                 print("\nThere are currently no interfaces with IP "
1638                       "addresses.")
1639
1640             # Create a script that add the ip addresses to the interfaces
1641             # and brings the interfaces up
1642             ints_with_addrs = self._ipv4_interface_setup_questions(node)
1643             content = ''
1644             for ints in ints_with_addrs:
1645                 name = ints['name']
1646                 addr = ints['addr']
1647                 setipstr = 'set int ip address {} {}\n'.format(name, addr)
1648                 setintupstr = 'set int state {} up\n'.format(name)
1649                 content += setipstr + setintupstr
1650
1651             # Write the content to the script
1652             rootdir = node['rootdir']
1653             filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
1654             with open(filename, 'w+') as sfile:
1655                 sfile.write(content)
1656
1657             # Execute the script
1658             cmd = 'vppctl exec {}'.format(filename)
1659             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1660             if ret != 0:
1661                 logging.debug(stderr)
1662
1663             print("\nA script as been created at {}".format(filename))
1664             print("This script can be run using the following:")
1665             print("vppctl exec {}\n".format(filename))
1666
1667     def _create_vints_questions(self, node):
1668         """
1669         Ask the user some questions and get a list of interfaces
1670         and IPv4 addresses associated with those interfaces
1671
1672         :param node: Node dictionary.
1673         :type node: dict
1674         :returns: A list or interfaces with ip addresses
1675         :rtype: list
1676         """
1677
1678         vpputl = VPPUtil()
1679         interfaces = vpputl.get_hardware(node)
1680         if interfaces == {}:
1681             return []
1682
1683         # First delete all the Virtual interfaces
1684         for intf in sorted(interfaces.items()):
1685             name = intf[0]
1686             if name[:7] == 'Virtual':
1687                 cmd = 'vppctl delete vhost-user {}'.format(name)
1688                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1689                 if ret != 0:
1690                     logging.debug('{} failed on node {} {}'.format(
1691                         cmd, node['host'], stderr))
1692
1693         # Create a virtual interface, for each interface the user wants to use
1694         interfaces = vpputl.get_hardware(node)
1695         if interfaces == {}:
1696             return []
1697         interfaces_with_virtual_interfaces = []
1698         inum = 1
1699         for intf in sorted(interfaces.items()):
1700             name = intf[0]
1701             if name == 'local0':
1702                 continue
1703
1704             question = "Would you like connect this interface {} to " \
1705                        "the VM [Y/n]? ".format(name)
1706             answer = self._ask_user_yn(question, 'y')
1707             if answer == 'y':
1708                 sockfilename = '/var/run/vpp/{}.sock'.format(
1709                     name.replace('/', '_'))
1710                 if os.path.exists(sockfilename):
1711                     os.remove(sockfilename)
1712                 cmd = 'vppctl create vhost-user socket {} server'.format(
1713                     sockfilename)
1714                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1715                 if ret != 0:
1716                     raise RuntimeError(
1717                         "Couldn't execute the command {}, {}.".format(cmd,
1718                                                                       stderr))
1719                 vintname = stdout.rstrip('\r\n')
1720
1721                 cmd = 'chmod 777 {}'.format(sockfilename)
1722                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1723                 if ret != 0:
1724                     raise RuntimeError(
1725                         "Couldn't execute the command {}, {}.".format(cmd,
1726                                                                       stderr))
1727
1728                 interface = {'name': name,
1729                              'virtualinterface': '{}'.format(vintname),
1730                              'bridge': '{}'.format(inum)}
1731                 inum += 1
1732                 interfaces_with_virtual_interfaces.append(interface)
1733
1734         return interfaces_with_virtual_interfaces
1735
1736     def create_and_bridge_virtual_interfaces(self):
1737         """
1738         After asking the user some questions, create a VM and connect
1739         the interfaces to VPP interfaces
1740
1741         """
1742
1743         for i in self._nodes.items():
1744             node = i[1]
1745
1746             # Show the current bridge and interface configuration
1747             print("\nThis the current bridge configuration:")
1748             VPPUtil.show_bridge(node)
1749             question = "\nWould you like to keep this configuration [Y/n]? "
1750             answer = self._ask_user_yn(question, 'y')
1751             if answer == 'y':
1752                 continue
1753
1754             # Create a script that builds a bridge configuration with
1755             # physical interfaces and virtual interfaces
1756             ints_with_vints = self._create_vints_questions(node)
1757             content = ''
1758             for intf in ints_with_vints:
1759                 vhoststr = '\n'.join([
1760                     'comment { The following command creates the socket }',
1761                     'comment { and returns a virtual interface }',
1762                     'comment {{ create vhost-user socket '
1763                     '/var/run/vpp/sock{}.sock server }}\n'.format(
1764                         intf['bridge'])
1765                 ])
1766
1767                 setintdnstr = 'set interface state {} down\n'.format(
1768                     intf['name'])
1769
1770                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1771                     intf['name'], intf['bridge'])
1772                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1773                     intf['virtualinterface'], intf['bridge'])
1774
1775                 # set interface state VirtualEthernet/0/0/0 up
1776                 setintvststr = 'set interface state {} up\n'.format(
1777                     intf['virtualinterface'])
1778
1779                 # set interface state VirtualEthernet/0/0/0 down
1780                 setintupstr = 'set interface state {} up\n'.format(
1781                     intf['name'])
1782
1783                 content += vhoststr + setintdnstr + setintbrstr + \
1784                     setvintbrstr + setintvststr + setintupstr
1785
1786             # Write the content to the script
1787             rootdir = node['rootdir']
1788             filename = rootdir + \
1789                 '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
1790             with open(filename, 'w+') as sfile:
1791                 sfile.write(content)
1792
1793             # Execute the script
1794             cmd = 'vppctl exec {}'.format(filename)
1795             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1796             if ret != 0:
1797                 logging.debug(stderr)
1798
1799             print("\nA script as been created at {}".format(filename))
1800             print("This script can be run using the following:")
1801             print("vppctl exec {}\n".format(filename))
1802
1803     def _iperf_vm_questions(self, node):
1804         """
1805         Ask the user some questions and get a list of interfaces
1806         and IPv4 addresses associated with those interfaces
1807
1808         :param node: Node dictionary.
1809         :type node: dict
1810         :returns: A list or interfaces with ip addresses
1811         :rtype: list
1812         """
1813
1814         vpputl = VPPUtil()
1815         interfaces = vpputl.get_hardware(node)
1816         if interfaces == {}:
1817             return []
1818
1819         # First delete all the Virtual interfaces
1820         for intf in sorted(interfaces.items()):
1821             name = intf[0]
1822             if name[:7] == 'Virtual':
1823                 cmd = 'vppctl delete vhost-user {}'.format(name)
1824                 (ret, stdout, stderr) = vpputl.exec_command(cmd)
1825                 if ret != 0:
1826                     logging.debug('{} failed on node {} {}'.format(
1827                         cmd, node['host'], stderr))
1828
1829         # Create a virtual interface, for each interface the user wants to use
1830         interfaces = vpputl.get_hardware(node)
1831         if interfaces == {}:
1832             return []
1833         interfaces_with_virtual_interfaces = []
1834         inum = 1
1835
1836         while True:
1837             print('\nPlease pick one interface to connect to the iperf VM.')
1838             for intf in sorted(interfaces.items()):
1839                 name = intf[0]
1840                 if name == 'local0':
1841                     continue
1842
1843                 question = "Would you like connect this interface {} to " \
1844                            "the VM [y/N]? ".format(name)
1845                 answer = self._ask_user_yn(question, 'n')
1846                 if answer == 'y':
1847                     self._sockfilename = '/var/run/vpp/{}.sock'.format(
1848                         name.replace('/', '_'))
1849                     if os.path.exists(self._sockfilename):
1850                         os.remove(self._sockfilename)
1851                     cmd = 'vppctl create vhost-user socket {} server'.format(
1852                         self._sockfilename)
1853                     (ret, stdout, stderr) = vpputl.exec_command(cmd)
1854                     if ret != 0:
1855                         raise RuntimeError(
1856                             "Couldn't execute the command {}, {}.".format(
1857                                 cmd, stderr))
1858                     vintname = stdout.rstrip('\r\n')
1859
1860                     cmd = 'chmod 777 {}'.format(self._sockfilename)
1861                     (ret, stdout, stderr) = vpputl.exec_command(cmd)
1862                     if ret != 0:
1863                         raise RuntimeError(
1864                             "Couldn't execute the command {}, {}.".format(
1865                                 cmd, stderr))
1866
1867                     interface = {'name': name,
1868                                  'virtualinterface': '{}'.format(vintname),
1869                                  'bridge': '{}'.format(inum)}
1870                     inum += 1
1871                     interfaces_with_virtual_interfaces.append(interface)
1872                     return interfaces_with_virtual_interfaces
1873
1874     def create_and_bridge_iperf_virtual_interface(self):
1875         """
1876         After asking the user some questions, and create and bridge a
1877         virtual interface to be used with iperf VM
1878
1879         """
1880
1881         for i in self._nodes.items():
1882             node = i[1]
1883
1884             # Show the current bridge and interface configuration
1885             print("\nThis the current bridge configuration:")
1886             ifaces = VPPUtil.show_bridge(node)
1887             question = "\nWould you like to keep this configuration [Y/n]? "
1888             answer = self._ask_user_yn(question, 'y')
1889             if answer == 'y':
1890                 self._sockfilename = '/var/run/vpp/{}.sock'.format(
1891                     ifaces[0]['name'].replace('/', '_'))
1892                 if os.path.exists(self._sockfilename):
1893                     continue
1894
1895             # Create a script that builds a bridge configuration with
1896             # physical interfaces and virtual interfaces
1897             ints_with_vints = self._iperf_vm_questions(node)
1898             content = ''
1899             for intf in ints_with_vints:
1900                 vhoststr = '\n'.join([
1901                     'comment { The following command creates the socket }',
1902                     'comment { and returns a virtual interface }',
1903                     'comment {{ create vhost-user socket '
1904                     '/var/run/vpp/sock{}.sock server }}\n'.format(
1905                         intf['bridge'])
1906                 ])
1907
1908                 setintdnstr = 'set interface state {} down\n'.format(
1909                     intf['name'])
1910
1911                 setintbrstr = 'set interface l2 bridge {} {}\n'.format(
1912                     intf['name'], intf['bridge'])
1913                 setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
1914                     intf['virtualinterface'], intf['bridge'])
1915
1916                 # set interface state VirtualEthernet/0/0/0 up
1917                 setintvststr = 'set interface state {} up\n'.format(
1918                     intf['virtualinterface'])
1919
1920                 # set interface state VirtualEthernet/0/0/0 down
1921                 setintupstr = 'set interface state {} up\n'.format(
1922                     intf['name'])
1923
1924                 content += vhoststr + setintdnstr + setintbrstr + \
1925                     setvintbrstr + setintvststr + setintupstr
1926
1927             # Write the content to the script
1928             rootdir = node['rootdir']
1929             filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
1930             with open(filename, 'w+') as sfile:
1931                 sfile.write(content)
1932
1933             # Execute the script
1934             cmd = 'vppctl exec {}'.format(filename)
1935             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1936             if ret != 0:
1937                 logging.debug(stderr)
1938
1939             print("\nA script as been created at {}".format(filename))
1940             print("This script can be run using the following:")
1941             print("vppctl exec {}\n".format(filename))
1942
1943     @staticmethod
1944     def destroy_iperf_vm(name):
1945         """
1946         After asking the user some questions, create a VM and connect
1947         the interfaces to VPP interfaces
1948
1949         :param name: The name of the VM to be be destroyed
1950         :type name: str
1951         """
1952
1953         cmd = 'virsh list'
1954         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1955         if ret != 0:
1956             logging.debug(stderr)
1957             raise RuntimeError(
1958                 "Couldn't execute the command {} : {}".format(cmd, stderr))
1959
1960         if re.findall(name, stdout):
1961             cmd = 'virsh destroy {}'.format(name)
1962             (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
1963             if ret != 0:
1964                 logging.debug(stderr)
1965                 raise RuntimeError(
1966                     "Couldn't execute the command {} : {}".format(
1967                         cmd, stderr))
1968
1969     def create_iperf_vm(self, vmname):
1970         """
1971         After asking the user some questions, create a VM and connect
1972         the interfaces to VPP interfaces
1973
1974         """
1975
1976         # Read the iperf VM template file
1977         distro = VPPUtil.get_linux_distro()
1978         if distro[0] == 'Ubuntu':
1979             tfilename = \
1980                 '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(
1981                     self._rootdir)
1982         else:
1983             tfilename = \
1984                 '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(
1985                     self._rootdir)
1986
1987         with open(tfilename, 'r') as tfile:
1988             tcontents = tfile.read()
1989         tfile.close()
1990
1991         # Add the variables
1992         imagename = '{}/vpp/vpp-config/{}'.format(
1993             self._rootdir, IPERFVM_IMAGE)
1994         isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
1995         tcontents = tcontents.format(vmname=vmname, imagename=imagename,
1996                                      isoname=isoname,
1997                                      vhostsocketname=self._sockfilename)
1998
1999         # Write the xml
2000         ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
2001         with open(ifilename, 'w+') as ifile:
2002             ifile.write(tcontents)
2003         ifile.close()
2004
2005         cmd = 'virsh create {}'.format(ifilename)
2006         (ret, stdout, stderr) = VPPUtil.exec_command(cmd)
2007         if ret != 0:
2008             logging.debug(stderr)
2009             raise RuntimeError(
2010                 "Couldn't execute the command {} : {}".format(cmd, stderr))