1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """DUT setup library."""
16 from robot.api import logger
18 from resources.libraries.python.constants import Constants
19 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
20 from resources.libraries.python.topology import NodeType, Topology
23 class DUTSetup(object):
24 """Contains methods for setting up DUTs."""
27 def get_service_logs(node, service):
28 """Get specific service unit logs from node.
30 :param node: Node in the topology.
31 :param service: Service unit name.
35 if DUTSetup.running_in_container(node):
36 command = 'echo $(< /var/log/supervisord.log)'
38 command = ('journalctl --no-pager --unit={name} '
39 '--since="$(echo `systemctl show -p '
40 'ActiveEnterTimestamp {name}` | '
41 'awk \'{{print $2 $3}}\')"'.
43 message = 'Node {host} failed to get logs from unit {name}'.\
44 format(host=node['host'], name=service)
46 exec_cmd_no_error(node, command, timeout=30, sudo=True,
50 def get_service_logs_on_all_duts(nodes, service):
51 """Get specific service unit logs from all DUTs.
53 :param nodes: Nodes in the topology.
54 :param service: Service unit name.
58 for node in nodes.values():
59 if node['type'] == NodeType.DUT:
60 DUTSetup.get_service_logs(node, service)
63 def start_service(node, service):
64 """Start up the named service on node.
66 :param node: Node in the topology.
67 :param service: Service unit name.
71 if DUTSetup.running_in_container(node):
72 command = 'supervisorctl restart {name}'.format(name=service)
74 command = 'service {name} restart'.format(name=service)
75 message = 'Node {host} failed to start service {name}'.\
76 format(host=node['host'], name=service)
78 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
80 DUTSetup.get_service_logs(node, service)
83 def start_service_on_all_duts(nodes, service):
84 """Start up the named service on all DUTs.
86 :param node: Nodes in the topology.
87 :param service: Service unit name.
91 for node in nodes.values():
92 if node['type'] == NodeType.DUT:
93 DUTSetup.start_service(node, service)
96 def stop_service(node, service):
97 """Stop the named service on node.
99 :param node: Node in the topology.
100 :param service: Service unit name.
104 if DUTSetup.running_in_container(node):
105 command = 'supervisorctl stop {name}'.format(name=service)
107 command = 'service {name} stop'.format(name=service)
108 message = 'Node {host} failed to stop service {name}'.\
109 format(host=node['host'], name=service)
111 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
113 DUTSetup.get_service_logs(node, service)
116 def stop_service_on_all_duts(nodes, service):
117 """Stop the named service on all DUTs.
119 :param node: Nodes in the topology.
120 :param service: Service unit name.
124 for node in nodes.values():
125 if node['type'] == NodeType.DUT:
126 DUTSetup.stop_service(node, service)
130 """Run script over SSH to setup the DUT node.
132 :param node: DUT node to set up.
135 :raises Exception: If the DUT setup fails.
137 command = 'bash {0}/{1}/dut_setup.sh'.\
138 format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH)
139 message = 'DUT test setup script failed at node {name}'.\
140 format(name=node['host'])
142 exec_cmd_no_error(node, command, timeout=120, sudo=True,
146 def setup_all_duts(nodes):
147 """Run script over SSH to setup all DUT nodes.
149 :param nodes: Topology nodes.
152 for node in nodes.values():
153 if node['type'] == NodeType.DUT:
154 DUTSetup.setup_dut(node)
157 def get_vpp_pid(node):
158 """Get PID of running VPP process.
160 :param node: DUT node.
164 :raises RuntimeError: If it is not possible to get the PID.
170 logger.trace('Try {}: Get VPP PID'.format(i))
171 ret_code, stdout, stderr = ssh.exec_command('pidof vpp')
174 raise RuntimeError('Not possible to get PID of VPP process '
175 'on node: {0}\n {1}'.
176 format(node['host'], stdout + stderr))
178 if len(stdout.splitlines()) == 1:
180 elif not stdout.splitlines():
181 logger.debug("No VPP PID found on node {0}".
182 format(node['host']))
185 logger.debug("More then one VPP PID found on node {0}".
186 format(node['host']))
188 for line in stdout.splitlines():
189 ret_list.append(int(line))
195 def get_vpp_pids(nodes):
196 """Get PID of running VPP process on all DUTs.
198 :param nodes: DUT nodes.
204 for node in nodes.values():
205 if node['type'] == NodeType.DUT:
206 pids[node['host']] = DUTSetup.get_vpp_pid(node)
210 def crypto_device_verify(node, force_init=False, numvfs=32):
211 """Verify if Crypto QAT device virtual functions are initialized on all
212 DUTs. If parameter force initialization is set to True, then try to
213 initialize or remove VFs on QAT.
215 :param node: DUT node.
216 :param force_init: If True then try to initialize to specific value.
217 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
219 :type force_init: bool
222 :raises RuntimeError: If QAT VFs are not created and force init is set
225 pci_addr = Topology.get_cryptodev(node)
226 sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
228 if sriov_numvfs != numvfs:
230 # QAT is not initialized and we want to initialize with numvfs
231 DUTSetup.crypto_device_init(node, numvfs)
233 raise RuntimeError('QAT device failed to create VFs on {host}'.
234 format(host=node['host']))
237 def crypto_device_init(node, numvfs):
238 """Init Crypto QAT device virtual functions on DUT.
240 :param node: DUT node.
241 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
245 :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
247 pci_addr = Topology.get_cryptodev(node)
249 # QAT device must be re-bound to kernel driver before initialization.
250 DUTSetup.verify_kernel_module(node, 'qat_dh895xcc', force_load=True)
252 # Stop VPP to prevent deadlock.
253 DUTSetup.stop_service(node, Constants.VPP_UNIT)
255 current_driver = DUTSetup.get_pci_dev_driver(
256 node, pci_addr.replace(':', r'\:'))
257 if current_driver is not None:
258 DUTSetup.pci_driver_unbind(node, pci_addr)
260 # Bind to kernel driver.
261 DUTSetup.pci_driver_bind(node, pci_addr, 'dh895xcc')
263 # Initialize QAT VFs.
265 DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
268 def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
269 """Get PCI address of Virtual Function.
271 :param node: DUT node.
272 :param pf_pci_addr: Physical Function PCI address.
273 :param vf_id: Virtual Function number.
275 :type pf_pci_addr: str
277 :returns: Virtual Function PCI address.
279 :raises RuntimeError: If failed to get Virtual Function PCI address.
282 "'basename $(readlink /sys/bus/pci/devices/{pci}/virtfn{vf_id})'".\
283 format(pci=pf_pci_addr, vf_id=vf_id)
284 message = 'Failed to get virtual function PCI address.'
286 stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
289 return stdout.strip()
292 def get_sriov_numvfs(node, pf_pci_addr):
293 """Get number of SR-IOV VFs.
295 :param node: DUT node.
296 :param pf_pci_addr: Physical Function PCI device address.
298 :type pf_pci_addr: str
299 :returns: Number of VFs.
301 :raises RuntimeError: If PCI device is not SR-IOV capable.
303 command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'.\
304 format(pci=pf_pci_addr.replace(':', r'\:'))
305 message = 'PCI device {pci} is not a SR-IOV device.'.\
306 format(pci=pf_pci_addr)
309 stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
312 sriov_numvfs = int(stdout)
314 logger.trace('Reading sriov_numvfs info failed on {host}'.
315 format(host=node['host']))
320 def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
321 """Init or reset SR-IOV virtual functions by setting its number on PCI
322 device on DUT. Setting to zero removes all VFs.
324 :param node: DUT node.
325 :param pf_pci_addr: Physical Function PCI device address.
326 :param numvfs: Number of VFs to initialize, 0 - removes the VFs.
328 :type pf_pci_addr: str
330 :raises RuntimeError: Failed to create VFs on PCI.
333 "'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'".\
334 format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
335 message = 'Failed to create {num} VFs on {pci} device on {host}'.\
336 format(num=numvfs, pci=pf_pci_addr, host=node['host'])
338 exec_cmd_no_error(node, command, timeout=120, sudo=True,
342 def pci_driver_unbind(node, pci_addr):
343 """Unbind PCI device from current driver on node.
345 :param node: DUT node.
346 :param pci_addr: PCI device address.
349 :raises RuntimeError: If PCI device unbind failed.
352 "'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'".\
353 format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
354 message = 'Failed to unbind PCI device {pci} on {host}'.\
355 format(pci=pci_addr, host=node['host'])
357 exec_cmd_no_error(node, command, timeout=120, sudo=True,
361 def pci_driver_bind(node, pci_addr, driver):
362 """Bind PCI device to driver on node.
364 :param node: DUT node.
365 :param pci_addr: PCI device address.
366 :param driver: Driver to bind.
370 :raises RuntimeError: If PCI device bind failed.
372 message = 'Failed to bind PCI device {pci} to {driver} on host {host}'.\
373 format(pci=pci_addr, driver=driver, host=node['host'])
376 "'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
377 format(driver=driver, pci=pci_addr.replace(':', r'\:'))
379 exec_cmd_no_error(node, command, timeout=120, sudo=True,
383 "'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
384 format(pci=pci_addr, driver=driver)
386 exec_cmd_no_error(node, command, timeout=120, sudo=True,
390 "'echo | tee /sys/bus/pci/devices/{pci}/driver_override'".\
391 format(pci=pci_addr.replace(':', r'\:'))
393 exec_cmd_no_error(node, command, timeout=120, sudo=True,
397 def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
398 """Unbind Virtual Function from driver on node.
400 :param node: DUT node.
401 :param pf_pci_addr: PCI device address.
402 :param vf_id: Virtual Function ID.
404 :type pf_pci_addr: str
406 :raises RuntimeError: If Virtual Function unbind failed.
408 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
409 vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
410 format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
413 "'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'".\
414 format(vf_pci_addr=vf_pci_addr, vf_path=vf_path)
416 message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
417 format(vf_pci_addr=vf_pci_addr, host=node['host'])
419 exec_cmd_no_error(node, command, timeout=120, sudo=True,
423 def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
424 """Bind Virtual Function to driver on node.
426 :param node: DUT node.
427 :param pf_pci_addr: PCI device address.
428 :param vf_id: Virtual Function ID.
429 :param driver: Driver to bind.
431 :type pf_pci_addr: str
434 :raises RuntimeError: If PCI device bind failed.
436 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
437 vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
438 format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
440 message = 'Failed to bind VF {vf_pci_addr} to {driver} on {host}'.\
441 format(vf_pci_addr=vf_pci_addr, driver=driver, host=node['host'])
444 "'echo {driver} | tee {vf_path}/driver_override'".\
445 format(driver=driver, vf_path=vf_path)
447 exec_cmd_no_error(node, command, timeout=120, sudo=True,
451 "'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
452 format(vf_pci_addr=vf_pci_addr, driver=driver)
454 exec_cmd_no_error(node, command, timeout=120, sudo=True,
458 "'echo | tee {vf_path}/driver_override'".\
459 format(vf_path=vf_path)
461 exec_cmd_no_error(node, command, timeout=120, sudo=True,
465 def get_pci_dev_driver(node, pci_addr):
466 """Get current PCI device driver on node.
469 # lspci -vmmks 0000:00:05.0
471 Class: Ethernet controller
473 Device: Virtio network device
474 SVendor: Red Hat, Inc
479 :param node: DUT node.
480 :param pci_addr: PCI device address.
483 :returns: Driver or None
484 :raises RuntimeError: If PCI rescan or lspci command execution failed.
485 :raises RuntimeError: If it is not possible to get the interface driver
486 information from the node.
492 logger.trace('Try number {0}: Get PCI device driver'.format(i))
494 cmd = 'lspci -vmmks {0}'.format(pci_addr)
495 ret_code, stdout, _ = ssh.exec_command(cmd)
497 raise RuntimeError("'{0}' failed on '{1}'"
498 .format(cmd, node['host']))
500 for line in stdout.splitlines():
506 name, value = line.split("\t", 1)
508 if name == "Driver:":
510 if name == 'Driver:':
514 logger.trace('Driver for PCI device {} not found, executing '
515 'pci rescan and retrying'.format(pci_addr))
516 cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
517 ret_code, _, _ = ssh.exec_command_sudo(cmd)
518 if int(ret_code) != 0:
519 raise RuntimeError("'{0}' failed on '{1}'"
520 .format(cmd, node['host']))
525 def verify_kernel_module(node, module, force_load=False):
526 """Verify if kernel module is loaded on node. If parameter force
527 load is set to True, then try to load the modules.
530 :param module: Module to verify.
531 :param force_load: If True then try to load module.
534 :type force_load: bool
535 :raises RuntimeError: If module is not loaded or failed to load.
537 command = 'grep -w {module} /proc/modules'.format(module=module)
538 message = 'Kernel module {module} is not loaded on host {host}'.\
539 format(module=module, host=node['host'])
542 exec_cmd_no_error(node, command, timeout=30, sudo=False,
546 # Module is not loaded and we want to load it
547 DUTSetup.load_kernel_module(node, module)
552 def verify_kernel_module_on_all_duts(nodes, module, force_load=False):
553 """Verify if kernel module is loaded on all DUTs. If parameter force
554 load is set to True, then try to load the modules.
556 :param node: DUT nodes.
557 :param module: Module to verify.
558 :param force_load: If True then try to load module.
561 :type force_load: bool
563 for node in nodes.values():
564 if node['type'] == NodeType.DUT:
565 DUTSetup.verify_kernel_module(node, module, force_load)
568 def verify_uio_driver_on_all_duts(nodes):
569 """Verify if uio driver kernel module is loaded on all DUTs. If module
570 is not present it will try to load it.
572 :param node: DUT nodes.
575 for node in nodes.values():
576 if node['type'] == NodeType.DUT:
577 uio_driver = Topology.get_uio_driver(node)
578 DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
581 def load_kernel_module(node, module):
582 """Load kernel module on node.
584 :param node: DUT node.
585 :param module: Module to load.
589 :raises RuntimeError: If loading failed.
591 command = 'modprobe {module}'.format(module=module)
592 message = 'Failed to load {module} on host {host}'.\
593 format(module=module, host=node['host'])
595 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
598 def install_vpp_on_all_duts(nodes, vpp_pkg_dir, vpp_rpm_pkgs, vpp_deb_pkgs):
599 """Install VPP on all DUT nodes.
601 :param nodes: Nodes in the topology.
602 :param vpp_pkg_dir: Path to directory where VPP packages are stored.
603 :param vpp_rpm_pkgs: List of VPP rpm packages to be installed.
604 :param vpp_deb_pkgs: List of VPP deb packages to be installed.
606 :type vpp_pkg_dir: str
607 :type vpp_rpm_pkgs: list
608 :type vpp_deb_pkgs: list
609 :raises RuntimeError: If failed to remove or install VPP.
611 for node in nodes.values():
612 if node['type'] == NodeType.DUT:
613 logger.debug("Installing VPP on node {0}".format(node['host']))
617 cmd = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true'
618 ssh.exec_command_sudo(cmd, timeout=90)
620 cmd = "[[ -f /etc/redhat-release ]]"
621 return_code, _, _ = ssh.exec_command(cmd)
622 if not int(return_code):
623 # workaroud - uninstall existing vpp installation until
624 # start-testcase script is updated on all virl servers
625 rpm_pkgs_remove = "vpp*"
626 cmd_u = 'yum -y remove "{0}"'.format(rpm_pkgs_remove)
627 r_rcode, _, r_err = ssh.exec_command_sudo(cmd_u, timeout=90)
629 raise RuntimeError('Failed to remove previous VPP'
630 'installation on host {0}:\n{1}'
631 .format(node['host'], r_err))
633 rpm_pkgs = "*.rpm ".join(str(vpp_pkg_dir + pkg)
634 for pkg in vpp_rpm_pkgs) + "*.rpm"
635 cmd_i = "rpm -ivh {0}".format(rpm_pkgs)
636 ret_code, _, err = ssh.exec_command_sudo(cmd_i, timeout=90)
638 raise RuntimeError('Failed to install VPP on host {0}:'
639 '\n{1}'.format(node['host'], err))
641 ssh.exec_command_sudo("rpm -qai vpp*")
642 logger.info("VPP installed on node {0}".
643 format(node['host']))
645 # workaroud - uninstall existing vpp installation until
646 # start-testcase script is updated on all virl servers
647 deb_pkgs_remove = "vpp*"
648 cmd_u = 'apt-get purge -y "{0}"'.format(deb_pkgs_remove)
649 r_rcode, _, r_err = ssh.exec_command_sudo(cmd_u, timeout=90)
651 raise RuntimeError('Failed to remove previous VPP'
652 'installation on host {0}:\n{1}'
653 .format(node['host'], r_err))
654 deb_pkgs = "*.deb ".join(str(vpp_pkg_dir + pkg)
655 for pkg in vpp_deb_pkgs) + "*.deb"
656 cmd_i = "dpkg -i --force-all {0}".format(deb_pkgs)
657 ret_code, _, err = ssh.exec_command_sudo(cmd_i, timeout=90)
659 raise RuntimeError('Failed to install VPP on host {0}:'
660 '\n{1}'.format(node['host'], err))
662 ssh.exec_command_sudo("dpkg -l | grep vpp")
663 logger.info("VPP installed on node {0}".
664 format(node['host']))
669 def running_in_container(node):
670 """This method tests if topology node is running inside container.
672 :param node: Topology node.
674 :returns: True if running in docker container, false if not or failed
678 command = "fgrep docker /proc/1/cgroup"
679 message = 'Failed to get cgroup settings.'
681 exec_cmd_no_error(node, command, timeout=30, sudo=False,
688 def get_docker_mergeddir(node, uuid):
689 """Get Docker overlay for MergedDir diff.
691 :param node: DUT node.
692 :param uuid: Docker UUID.
695 :returns: Docker container MergedDir.
697 :raises RuntimeError: If getting output failed.
699 command = "docker inspect --format='"\
700 "{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid)
701 message = 'Failed to get directory of {uuid} on host {host}'.\
702 format(uuid=uuid, host=node['host'])
704 stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
705 return stdout.strip()
708 def get_huge_page_size(node):
709 """Get default size of huge pages in system.
711 :param node: Node in the topology.
713 :returns: Default size of free huge pages in system.
715 :raises RuntimeError: If reading failed for three times.
721 ret_code, stdout, _ = ssh.exec_command_sudo(
722 "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
725 huge_size = int(stdout)
727 logger.trace('Reading huge page size information failed')
731 raise RuntimeError('Getting huge page size information failed.')
735 def get_huge_page_free(node, huge_size):
736 """Get number of free huge pages in system.
738 :param node: Node in the topology.
739 :param huge_size: Size of hugepages.
742 :returns: Number of free huge pages in system.
744 :raises RuntimeError: If reading failed for three times.
746 # TODO: add numa aware option
751 ret_code, stdout, _ = ssh.exec_command_sudo(
752 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
756 huge_free = int(stdout)
758 logger.trace('Reading free huge pages information failed')
762 raise RuntimeError('Getting free huge pages information failed.')
766 def get_huge_page_total(node, huge_size):
767 """Get total number of huge pages in system.
769 :param node: Node in the topology.
770 :param huge_size: Size of hugepages.
774 :returns: Total number of huge pages in system.
776 :raises RuntimeError: If reading failed for three times.
778 # TODO: add numa aware option
783 ret_code, stdout, _ = ssh.exec_command_sudo(
784 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
788 huge_total = int(stdout)
790 logger.trace('Reading total huge pages information failed')
794 raise RuntimeError('Getting total huge pages information failed.')
798 def check_huge_page(node, huge_mnt, mem_size, allocate=False):
799 """Check if there is enough HugePages in system. If allocate is set to
800 true, try to allocate more HugePages.
802 :param node: Node in the topology.
803 :param huge_mnt: HugePage mount point.
804 :param mem_size: Requested memory in MB.
805 :param allocate: Whether to allocate more memory if not enough.
811 :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
812 or increasing map count failed.
814 # TODO: split function into smaller parts.
818 # Get huge pages information
819 huge_size = DUTSetup.get_huge_page_size(node)
820 huge_free = DUTSetup.get_huge_page_free(node, huge_size)
821 huge_total = DUTSetup.get_huge_page_total(node, huge_size)
823 # Check if memory reqested is available on host
824 if (mem_size * 1024) > (huge_free * huge_size):
825 # If we want to allocate hugepage dynamically
827 mem_needed = (mem_size * 1024) - (huge_free * huge_size)
828 huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
829 max_map_count = huge_to_allocate*4
830 # Increase maximum number of memory map areas a process may have
831 ret_code, _, _ = ssh.exec_command_sudo(
832 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
833 format(max_map_count))
834 if int(ret_code) != 0:
835 raise RuntimeError('Increase map count failed on {host}'.
836 format(host=node['host']))
837 # Increase hugepage count
838 ret_code, _, _ = ssh.exec_command_sudo(
839 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
840 format(huge_to_allocate))
841 if int(ret_code) != 0:
842 raise RuntimeError('Mount huge pages failed on {host}'.
843 format(host=node['host']))
844 # If we do not want to allocate dynamicaly end with error
846 raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
847 format(huge_free, huge_free * huge_size))
848 # Check if huge pages mount point exist
850 ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
851 if int(ret_code) == 0:
852 for line in stdout.splitlines():
853 # Try to find something like:
854 # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
856 if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
859 # If huge page mount point not exist create one
861 ret_code, _, _ = ssh.exec_command_sudo(
862 'mkdir -p {mnt}'.format(mnt=huge_mnt))
863 if int(ret_code) != 0:
864 raise RuntimeError('Create mount dir failed on {host}'.
865 format(host=node['host']))
866 ret_code, _, _ = ssh.exec_command_sudo(
867 'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
868 format(mnt=huge_mnt))
869 if int(ret_code) != 0:
870 raise RuntimeError('Mount huge pages failed on {host}'.
871 format(host=node['host']))