1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """DUT setup library."""
16 from robot.api import logger
18 from resources.libraries.python.Constants import Constants
19 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
20 from resources.libraries.python.topology import NodeType, Topology
23 class DUTSetup(object):
24 """Contains methods for setting up DUTs."""
27 def get_service_logs(node, service):
28 """Get specific service unit logs from node.
30 :param node: Node in the topology.
31 :param service: Service unit name.
35 if DUTSetup.running_in_container(node):
36 command = ('echo $(< /var/log/supervisord.log);'
37 'echo $(< /tmp/*supervisor*.log)')
39 command = ('journalctl --no-pager --unit={name} '
40 '--since="$(echo `systemctl show -p '
41 'ActiveEnterTimestamp {name}` | '
42 'awk \'{{print $2 $3}}\')"'.
44 message = 'Node {host} failed to get logs from unit {name}'.\
45 format(host=node['host'], name=service)
47 exec_cmd_no_error(node, command, timeout=30, sudo=True,
51 def get_service_logs_on_all_duts(nodes, service):
52 """Get specific service unit logs from all DUTs.
54 :param nodes: Nodes in the topology.
55 :param service: Service unit name.
59 for node in nodes.values():
60 if node['type'] == NodeType.DUT:
61 DUTSetup.get_service_logs(node, service)
64 def restart_service(node, service):
65 """Restart the named service on node.
67 :param node: Node in the topology.
68 :param service: Service unit name.
72 if DUTSetup.running_in_container(node):
73 command = 'supervisorctl restart {name}'.format(name=service)
75 command = 'service {name} restart'.format(name=service)
76 message = 'Node {host} failed to restart service {name}'.\
77 format(host=node['host'], name=service)
79 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
81 DUTSetup.get_service_logs(node, service)
84 def restart_service_on_all_duts(nodes, service):
85 """Restart the named service on all DUTs.
87 :param node: Nodes in the topology.
88 :param service: Service unit name.
92 for node in nodes.values():
93 if node['type'] == NodeType.DUT:
94 DUTSetup.restart_service(node, service)
97 def start_service(node, service):
98 """Start up the named service on node.
100 :param node: Node in the topology.
101 :param service: Service unit name.
105 # TODO: change command to start once all parent function updated.
106 if DUTSetup.running_in_container(node):
107 command = 'supervisorctl restart {name}'.format(name=service)
109 command = 'service {name} restart'.format(name=service)
110 message = 'Node {host} failed to start service {name}'.\
111 format(host=node['host'], name=service)
113 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
115 DUTSetup.get_service_logs(node, service)
118 def start_service_on_all_duts(nodes, service):
119 """Start up the named service on all DUTs.
121 :param node: Nodes in the topology.
122 :param service: Service unit name.
126 for node in nodes.values():
127 if node['type'] == NodeType.DUT:
128 DUTSetup.start_service(node, service)
131 def stop_service(node, service):
132 """Stop the named service on node.
134 :param node: Node in the topology.
135 :param service: Service unit name.
139 if DUTSetup.running_in_container(node):
140 command = 'supervisorctl stop {name}'.format(name=service)
142 command = 'service {name} stop'.format(name=service)
143 message = 'Node {host} failed to stop service {name}'.\
144 format(host=node['host'], name=service)
146 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
148 DUTSetup.get_service_logs(node, service)
151 def stop_service_on_all_duts(nodes, service):
152 """Stop the named service on all DUTs.
154 :param node: Nodes in the topology.
155 :param service: Service unit name.
159 for node in nodes.values():
160 if node['type'] == NodeType.DUT:
161 DUTSetup.stop_service(node, service)
165 """Run script over SSH to setup the DUT node.
167 :param node: DUT node to set up.
170 :raises Exception: If the DUT setup fails.
172 command = 'bash {0}/{1}/dut_setup.sh'.\
173 format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH)
174 message = 'DUT test setup script failed at node {name}'.\
175 format(name=node['host'])
177 exec_cmd_no_error(node, command, timeout=120, sudo=True,
181 def setup_all_duts(nodes):
182 """Run script over SSH to setup all DUT nodes.
184 :param nodes: Topology nodes.
187 for node in nodes.values():
188 if node['type'] == NodeType.DUT:
189 DUTSetup.setup_dut(node)
192 def get_vpp_pid(node):
193 """Get PID of running VPP process.
195 :param node: DUT node.
199 :raises RuntimeError: If it is not possible to get the PID.
205 logger.trace('Try {}: Get VPP PID'.format(i))
206 ret_code, stdout, stderr = ssh.exec_command('pidof vpp')
209 raise RuntimeError('Not possible to get PID of VPP process '
210 'on node: {0}\n {1}'.
211 format(node['host'], stdout + stderr))
213 pid_list = stdout.split()
214 if len(pid_list) == 1:
217 logger.debug("No VPP PID found on node {0}".
218 format(node['host']))
221 logger.debug("More then one VPP PID found on node {0}".
222 format(node['host']))
223 return [int(pid) for pid in pid_list]
228 def get_vpp_pids(nodes):
229 """Get PID of running VPP process on all DUTs.
231 :param nodes: DUT nodes.
237 for node in nodes.values():
238 if node['type'] == NodeType.DUT:
239 pids[node['host']] = DUTSetup.get_vpp_pid(node)
243 def crypto_device_verify(node, crypto_type, numvfs, force_init=False):
244 """Verify if Crypto QAT device virtual functions are initialized on all
245 DUTs. If parameter force initialization is set to True, then try to
246 initialize or remove VFs on QAT.
248 :param node: DUT node.
249 :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
250 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
251 :param force_init: If True then try to initialize to specific value.
253 :type crypto_type: string
255 :type force_init: bool
257 :raises RuntimeError: If QAT VFs are not created and force init is set
260 pci_addr = Topology.get_cryptodev(node)
261 sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
263 if sriov_numvfs != numvfs:
265 # QAT is not initialized and we want to initialize with numvfs
266 DUTSetup.crypto_device_init(node, crypto_type, numvfs)
268 raise RuntimeError('QAT device failed to create VFs on {host}'.
269 format(host=node['host']))
272 def crypto_device_init(node, crypto_type, numvfs):
273 """Init Crypto QAT device virtual functions on DUT.
275 :param node: DUT node.
276 :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
277 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
279 :type crypto_type: string
282 :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
284 if crypto_type == "HW_DH895xcc":
285 kernel_mod = "qat_dh895xcc"
286 kernel_drv = "dh895xcc"
287 elif crypto_type == "HW_C3xxx":
288 kernel_mod = "qat_c3xxx"
291 raise RuntimeError('Unsupported crypto device type on {host}'.
292 format(host=node['host']))
294 pci_addr = Topology.get_cryptodev(node)
296 # QAT device must be re-bound to kernel driver before initialization.
297 DUTSetup.verify_kernel_module(node, kernel_mod, force_load=True)
299 # Stop VPP to prevent deadlock.
300 DUTSetup.stop_service(node, Constants.VPP_UNIT)
302 current_driver = DUTSetup.get_pci_dev_driver(
303 node, pci_addr.replace(':', r'\:'))
304 if current_driver is not None:
305 DUTSetup.pci_driver_unbind(node, pci_addr)
307 # Bind to kernel driver.
308 DUTSetup.pci_driver_bind(node, pci_addr, kernel_drv)
310 # Initialize QAT VFs.
312 DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
315 def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
316 """Get PCI address of Virtual Function.
318 :param node: DUT node.
319 :param pf_pci_addr: Physical Function PCI address.
320 :param vf_id: Virtual Function number.
322 :type pf_pci_addr: str
324 :returns: Virtual Function PCI address.
326 :raises RuntimeError: If failed to get Virtual Function PCI address.
329 "'basename $(readlink /sys/bus/pci/devices/{pci}/virtfn{vf_id})'".\
330 format(pci=pf_pci_addr, vf_id=vf_id)
331 message = 'Failed to get virtual function PCI address.'
333 stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
336 return stdout.strip()
339 def get_sriov_numvfs(node, pf_pci_addr):
340 """Get number of SR-IOV VFs.
342 :param node: DUT node.
343 :param pf_pci_addr: Physical Function PCI device address.
345 :type pf_pci_addr: str
346 :returns: Number of VFs.
348 :raises RuntimeError: If PCI device is not SR-IOV capable.
350 command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'.\
351 format(pci=pf_pci_addr.replace(':', r'\:'))
352 message = 'PCI device {pci} is not a SR-IOV device.'.\
353 format(pci=pf_pci_addr)
356 stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
359 sriov_numvfs = int(stdout)
361 logger.trace('Reading sriov_numvfs info failed on {host}'.
362 format(host=node['host']))
367 def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
368 """Init or reset SR-IOV virtual functions by setting its number on PCI
369 device on DUT. Setting to zero removes all VFs.
371 :param node: DUT node.
372 :param pf_pci_addr: Physical Function PCI device address.
373 :param numvfs: Number of VFs to initialize, 0 - removes the VFs.
375 :type pf_pci_addr: str
377 :raises RuntimeError: Failed to create VFs on PCI.
380 "'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'".\
381 format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
382 message = 'Failed to create {num} VFs on {pci} device on {host}'.\
383 format(num=numvfs, pci=pf_pci_addr, host=node['host'])
385 exec_cmd_no_error(node, command, timeout=120, sudo=True,
389 def pci_driver_unbind(node, pci_addr):
390 """Unbind PCI device from current driver on node.
392 :param node: DUT node.
393 :param pci_addr: PCI device address.
396 :raises RuntimeError: If PCI device unbind failed.
399 "'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'".\
400 format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
401 message = 'Failed to unbind PCI device {pci} on {host}'.\
402 format(pci=pci_addr, host=node['host'])
404 exec_cmd_no_error(node, command, timeout=120, sudo=True,
408 def pci_driver_bind(node, pci_addr, driver):
409 """Bind PCI device to driver on node.
411 :param node: DUT node.
412 :param pci_addr: PCI device address.
413 :param driver: Driver to bind.
417 :raises RuntimeError: If PCI device bind failed.
419 message = 'Failed to bind PCI device {pci} to {driver} on host {host}'.\
420 format(pci=pci_addr, driver=driver, host=node['host'])
423 "'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
424 format(driver=driver, pci=pci_addr.replace(':', r'\:'))
426 exec_cmd_no_error(node, command, timeout=120, sudo=True,
430 "'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
431 format(pci=pci_addr, driver=driver)
433 exec_cmd_no_error(node, command, timeout=120, sudo=True,
437 "'echo | tee /sys/bus/pci/devices/{pci}/driver_override'".\
438 format(pci=pci_addr.replace(':', r'\:'))
440 exec_cmd_no_error(node, command, timeout=120, sudo=True,
444 def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
445 """Unbind Virtual Function from driver on node.
447 :param node: DUT node.
448 :param pf_pci_addr: PCI device address.
449 :param vf_id: Virtual Function ID.
451 :type pf_pci_addr: str
453 :raises RuntimeError: If Virtual Function unbind failed.
455 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
456 vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
457 format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
460 "'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'".\
461 format(vf_pci_addr=vf_pci_addr, vf_path=vf_path)
463 message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
464 format(vf_pci_addr=vf_pci_addr, host=node['host'])
466 exec_cmd_no_error(node, command, timeout=120, sudo=True,
470 def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
471 """Bind Virtual Function to driver on node.
473 :param node: DUT node.
474 :param pf_pci_addr: PCI device address.
475 :param vf_id: Virtual Function ID.
476 :param driver: Driver to bind.
478 :type pf_pci_addr: str
481 :raises RuntimeError: If PCI device bind failed.
483 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
484 vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
485 format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
487 message = 'Failed to bind VF {vf_pci_addr} to {driver} on {host}'.\
488 format(vf_pci_addr=vf_pci_addr, driver=driver, host=node['host'])
491 "'echo {driver} | tee {vf_path}/driver_override'".\
492 format(driver=driver, vf_path=vf_path)
494 exec_cmd_no_error(node, command, timeout=120, sudo=True,
498 "'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
499 format(vf_pci_addr=vf_pci_addr, driver=driver)
501 exec_cmd_no_error(node, command, timeout=120, sudo=True,
505 "'echo | tee {vf_path}/driver_override'".\
506 format(vf_path=vf_path)
508 exec_cmd_no_error(node, command, timeout=120, sudo=True,
512 def get_pci_dev_driver(node, pci_addr):
513 """Get current PCI device driver on node.
516 # lspci -vmmks 0000:00:05.0
518 Class: Ethernet controller
520 Device: Virtio network device
521 SVendor: Red Hat, Inc
526 :param node: DUT node.
527 :param pci_addr: PCI device address.
530 :returns: Driver or None
531 :raises RuntimeError: If PCI rescan or lspci command execution failed.
532 :raises RuntimeError: If it is not possible to get the interface driver
533 information from the node.
539 logger.trace('Try number {0}: Get PCI device driver'.format(i))
541 cmd = 'lspci -vmmks {0}'.format(pci_addr)
542 ret_code, stdout, _ = ssh.exec_command(cmd)
544 raise RuntimeError("'{0}' failed on '{1}'"
545 .format(cmd, node['host']))
547 for line in stdout.splitlines():
553 name, value = line.split("\t", 1)
555 if name == "Driver:":
557 if name == 'Driver:':
561 logger.trace('Driver for PCI device {} not found, executing '
562 'pci rescan and retrying'.format(pci_addr))
563 cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
564 ret_code, _, _ = ssh.exec_command_sudo(cmd)
565 if int(ret_code) != 0:
566 raise RuntimeError("'{0}' failed on '{1}'"
567 .format(cmd, node['host']))
572 def verify_kernel_module(node, module, force_load=False):
573 """Verify if kernel module is loaded on node. If parameter force
574 load is set to True, then try to load the modules.
577 :param module: Module to verify.
578 :param force_load: If True then try to load module.
581 :type force_load: bool
582 :raises RuntimeError: If module is not loaded or failed to load.
584 command = 'grep -w {module} /proc/modules'.format(module=module)
585 message = 'Kernel module {module} is not loaded on host {host}'.\
586 format(module=module, host=node['host'])
589 exec_cmd_no_error(node, command, timeout=30, sudo=False,
593 # Module is not loaded and we want to load it
594 DUTSetup.load_kernel_module(node, module)
599 def verify_kernel_module_on_all_duts(nodes, module, force_load=False):
600 """Verify if kernel module is loaded on all DUTs. If parameter force
601 load is set to True, then try to load the modules.
603 :param node: DUT nodes.
604 :param module: Module to verify.
605 :param force_load: If True then try to load module.
608 :type force_load: bool
610 for node in nodes.values():
611 if node['type'] == NodeType.DUT:
612 DUTSetup.verify_kernel_module(node, module, force_load)
615 def verify_uio_driver_on_all_duts(nodes):
616 """Verify if uio driver kernel module is loaded on all DUTs. If module
617 is not present it will try to load it.
619 :param node: DUT nodes.
622 for node in nodes.values():
623 if node['type'] == NodeType.DUT:
624 uio_driver = Topology.get_uio_driver(node)
625 DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
628 def load_kernel_module(node, module):
629 """Load kernel module on node.
631 :param node: DUT node.
632 :param module: Module to load.
636 :raises RuntimeError: If loading failed.
638 command = 'modprobe {module}'.format(module=module)
639 message = 'Failed to load {module} on host {host}'.\
640 format(module=module, host=node['host'])
642 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
645 def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
646 """Install VPP on all DUT nodes. Start the VPP service in case of
647 systemd is not available or does not support autostart.
649 :param nodes: Nodes in the topology.
650 :param vpp_pkg_dir: Path to directory where VPP packages are stored.
652 :type vpp_pkg_dir: str
653 :raises RuntimeError: If failed to remove or install VPP.
655 for node in nodes.values():
656 message = 'Failed to install VPP on host {host}!'.\
657 format(host=node['host'])
658 if node['type'] == NodeType.DUT:
659 command = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true'
660 exec_cmd_no_error(node, command, sudo=True)
662 command = '. /etc/lsb-release; echo "${DISTRIB_ID}"'
663 stdout, _ = exec_cmd_no_error(node, command)
665 if stdout.strip() == 'Ubuntu':
666 exec_cmd_no_error(node, 'apt-get purge -y "*vpp*" || true',
667 timeout=120, sudo=True)
668 exec_cmd_no_error(node, 'dpkg -i --force-all {dir}*.deb'.
669 format(dir=vpp_pkg_dir), timeout=120,
670 sudo=True, message=message)
671 exec_cmd_no_error(node, 'dpkg -l | grep vpp', sudo=True)
672 if DUTSetup.running_in_container(node):
673 DUTSetup.restart_service(node, Constants.VPP_UNIT)
675 exec_cmd_no_error(node, 'yum -y remove "*vpp*" || true',
676 timeout=120, sudo=True)
677 exec_cmd_no_error(node, 'rpm -ivh {dir}*.rpm'.
678 format(dir=vpp_pkg_dir), timeout=120,
679 sudo=True, message=message)
680 exec_cmd_no_error(node, 'rpm -qai *vpp*', sudo=True)
681 DUTSetup.restart_service(node, Constants.VPP_UNIT)
684 def running_in_container(node):
685 """This method tests if topology node is running inside container.
687 :param node: Topology node.
689 :returns: True if running in docker container, false if not or failed
693 command = "fgrep docker /proc/1/cgroup"
694 message = 'Failed to get cgroup settings.'
696 exec_cmd_no_error(node, command, timeout=30, sudo=False,
703 def get_docker_mergeddir(node, uuid):
704 """Get Docker overlay for MergedDir diff.
706 :param node: DUT node.
707 :param uuid: Docker UUID.
710 :returns: Docker container MergedDir.
712 :raises RuntimeError: If getting output failed.
714 command = "docker inspect --format='"\
715 "{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid)
716 message = 'Failed to get directory of {uuid} on host {host}'.\
717 format(uuid=uuid, host=node['host'])
719 stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
720 return stdout.strip()
723 def get_huge_page_size(node):
724 """Get default size of huge pages in system.
726 :param node: Node in the topology.
728 :returns: Default size of free huge pages in system.
730 :raises RuntimeError: If reading failed for three times.
736 ret_code, stdout, _ = ssh.exec_command_sudo(
737 "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
740 huge_size = int(stdout)
742 logger.trace('Reading huge page size information failed')
746 raise RuntimeError('Getting huge page size information failed.')
750 def get_huge_page_free(node, huge_size):
751 """Get number of free huge pages in system.
753 :param node: Node in the topology.
754 :param huge_size: Size of hugepages.
757 :returns: Number of free huge pages in system.
759 :raises RuntimeError: If reading failed for three times.
761 # TODO: add numa aware option
766 ret_code, stdout, _ = ssh.exec_command_sudo(
767 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
771 huge_free = int(stdout)
773 logger.trace('Reading free huge pages information failed')
777 raise RuntimeError('Getting free huge pages information failed.')
781 def get_huge_page_total(node, huge_size):
782 """Get total number of huge pages in system.
784 :param node: Node in the topology.
785 :param huge_size: Size of hugepages.
789 :returns: Total number of huge pages in system.
791 :raises RuntimeError: If reading failed for three times.
793 # TODO: add numa aware option
798 ret_code, stdout, _ = ssh.exec_command_sudo(
799 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
803 huge_total = int(stdout)
805 logger.trace('Reading total huge pages information failed')
809 raise RuntimeError('Getting total huge pages information failed.')
813 def check_huge_page(node, huge_mnt, mem_size, allocate=False):
814 """Check if there is enough HugePages in system. If allocate is set to
815 true, try to allocate more HugePages.
817 :param node: Node in the topology.
818 :param huge_mnt: HugePage mount point.
819 :param mem_size: Requested memory in MB.
820 :param allocate: Whether to allocate more memory if not enough.
826 :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
827 or increasing map count failed.
829 # TODO: split function into smaller parts.
833 # Get huge pages information
834 huge_size = DUTSetup.get_huge_page_size(node)
835 huge_free = DUTSetup.get_huge_page_free(node, huge_size)
836 huge_total = DUTSetup.get_huge_page_total(node, huge_size)
838 # Check if memory reqested is available on host
839 if (mem_size * 1024) > (huge_free * huge_size):
840 # If we want to allocate hugepage dynamically
842 mem_needed = (mem_size * 1024) - (huge_free * huge_size)
843 huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
844 max_map_count = huge_to_allocate*4
845 # Increase maximum number of memory map areas a process may have
846 ret_code, _, _ = ssh.exec_command_sudo(
847 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
848 format(max_map_count))
849 if int(ret_code) != 0:
850 raise RuntimeError('Increase map count failed on {host}'.
851 format(host=node['host']))
852 # Increase hugepage count
853 ret_code, _, _ = ssh.exec_command_sudo(
854 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
855 format(huge_to_allocate))
856 if int(ret_code) != 0:
857 raise RuntimeError('Mount huge pages failed on {host}'.
858 format(host=node['host']))
859 # If we do not want to allocate dynamicaly end with error
861 raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
862 format(huge_free, huge_free * huge_size))
863 # Check if huge pages mount point exist
865 ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
866 if int(ret_code) == 0:
867 for line in stdout.splitlines():
868 # Try to find something like:
869 # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
871 if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
874 # If huge page mount point not exist create one
876 ret_code, _, _ = ssh.exec_command_sudo(
877 'mkdir -p {mnt}'.format(mnt=huge_mnt))
878 if int(ret_code) != 0:
879 raise RuntimeError('Create mount dir failed on {host}'.
880 format(host=node['host']))
881 ret_code, _, _ = ssh.exec_command_sudo(
882 'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
883 format(mnt=huge_mnt))
884 if int(ret_code) != 0:
885 raise RuntimeError('Mount huge pages failed on {host}'.
886 format(host=node['host']))