1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """DUT setup library."""
16 from robot.api import logger
18 from resources.libraries.python.Constants import Constants
19 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
20 from resources.libraries.python.topology import NodeType, Topology
23 class DUTSetup(object):
24 """Contains methods for setting up DUTs."""
27 def get_service_logs(node, service):
28 """Get specific service unit logs from node.
30 :param node: Node in the topology.
31 :param service: Service unit name.
35 if DUTSetup.running_in_container(node):
36 command = ('echo $(< /tmp/*supervisor*.log)')
38 command = ('journalctl --no-pager --unit={name} '
39 '--since="$(echo `systemctl show -p '
40 'ActiveEnterTimestamp {name}` | '
41 'awk \'{{print $2 $3}}\')"'.
43 message = 'Node {host} failed to get logs from unit {name}'.\
44 format(host=node['host'], name=service)
46 exec_cmd_no_error(node, command, timeout=30, sudo=True,
50 def get_service_logs_on_all_duts(nodes, service):
51 """Get specific service unit logs from all DUTs.
53 :param nodes: Nodes in the topology.
54 :param service: Service unit name.
58 for node in nodes.values():
59 if node['type'] == NodeType.DUT:
60 DUTSetup.get_service_logs(node, service)
63 def restart_service(node, service):
64 """Restart the named service on node.
66 :param node: Node in the topology.
67 :param service: Service unit name.
71 if DUTSetup.running_in_container(node):
72 command = 'supervisorctl restart {name}'.format(name=service)
74 command = 'service {name} restart'.format(name=service)
75 message = 'Node {host} failed to restart service {name}'.\
76 format(host=node['host'], name=service)
79 node, command, timeout=180, sudo=True, message=message)
81 DUTSetup.get_service_logs(node, service)
84 def restart_service_on_all_duts(nodes, service):
85 """Restart the named service on all DUTs.
87 :param node: Nodes in the topology.
88 :param service: Service unit name.
92 for node in nodes.values():
93 if node['type'] == NodeType.DUT:
94 DUTSetup.restart_service(node, service)
97 def start_service(node, service):
98 """Start up the named service on node.
100 :param node: Node in the topology.
101 :param service: Service unit name.
105 # TODO: change command to start once all parent function updated.
106 if DUTSetup.running_in_container(node):
107 command = 'supervisorctl restart {name}'.format(name=service)
109 command = 'service {name} restart'.format(name=service)
110 message = 'Node {host} failed to start service {name}'.\
111 format(host=node['host'], name=service)
114 node, command, timeout=180, sudo=True, message=message)
116 DUTSetup.get_service_logs(node, service)
119 def start_service_on_all_duts(nodes, service):
120 """Start up the named service on all DUTs.
122 :param node: Nodes in the topology.
123 :param service: Service unit name.
127 for node in nodes.values():
128 if node['type'] == NodeType.DUT:
129 DUTSetup.start_service(node, service)
132 def stop_service(node, service):
133 """Stop the named service on node.
135 :param node: Node in the topology.
136 :param service: Service unit name.
140 if DUTSetup.running_in_container(node):
141 command = 'supervisorctl stop {name}'.format(name=service)
143 command = 'service {name} stop'.format(name=service)
144 message = 'Node {host} failed to stop service {name}'.\
145 format(host=node['host'], name=service)
148 node, command, timeout=180, sudo=True, message=message)
150 DUTSetup.get_service_logs(node, service)
153 def stop_service_on_all_duts(nodes, service):
154 """Stop the named service on all DUTs.
156 :param node: Nodes in the topology.
157 :param service: Service unit name.
161 for node in nodes.values():
162 if node['type'] == NodeType.DUT:
163 DUTSetup.stop_service(node, service)
166 def get_vpp_pid(node):
167 """Get PID of running VPP process.
169 :param node: DUT node.
173 :raises RuntimeError: If it is not possible to get the PID.
179 logger.trace('Try {}: Get VPP PID'.format(i))
180 ret_code, stdout, stderr = ssh.exec_command('pidof vpp')
183 raise RuntimeError('Not possible to get PID of VPP process '
184 'on node: {0}\n {1}'.
185 format(node['host'], stdout + stderr))
187 pid_list = stdout.split()
188 if len(pid_list) == 1:
191 logger.debug("No VPP PID found on node {0}".
192 format(node['host']))
195 logger.debug("More then one VPP PID found on node {0}".
196 format(node['host']))
197 return [int(pid) for pid in pid_list]
202 def get_vpp_pids(nodes):
203 """Get PID of running VPP process on all DUTs.
205 :param nodes: DUT nodes.
211 for node in nodes.values():
212 if node['type'] == NodeType.DUT:
213 pids[node['host']] = DUTSetup.get_vpp_pid(node)
217 def crypto_device_verify(node, crypto_type, numvfs, force_init=False):
218 """Verify if Crypto QAT device virtual functions are initialized on all
219 DUTs. If parameter force initialization is set to True, then try to
220 initialize or remove VFs on QAT.
222 :param node: DUT node.
223 :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
224 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
225 :param force_init: If True then try to initialize to specific value.
227 :type crypto_type: string
229 :type force_init: bool
231 :raises RuntimeError: If QAT VFs are not created and force init is set
234 pci_addr = Topology.get_cryptodev(node)
235 sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
237 if sriov_numvfs != numvfs:
239 # QAT is not initialized and we want to initialize with numvfs
240 DUTSetup.crypto_device_init(node, crypto_type, numvfs)
242 raise RuntimeError('QAT device failed to create VFs on {host}'.
243 format(host=node['host']))
246 def crypto_device_init(node, crypto_type, numvfs):
247 """Init Crypto QAT device virtual functions on DUT.
249 :param node: DUT node.
250 :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
251 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
253 :type crypto_type: string
256 :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
258 if crypto_type == "HW_DH895xcc":
259 kernel_mod = "qat_dh895xcc"
260 kernel_drv = "dh895xcc"
261 elif crypto_type == "HW_C3xxx":
262 kernel_mod = "qat_c3xxx"
265 raise RuntimeError('Unsupported crypto device type on {host}'.
266 format(host=node['host']))
268 pci_addr = Topology.get_cryptodev(node)
270 # QAT device must be re-bound to kernel driver before initialization.
271 DUTSetup.verify_kernel_module(node, kernel_mod, force_load=True)
273 # Stop VPP to prevent deadlock.
274 DUTSetup.stop_service(node, Constants.VPP_UNIT)
276 current_driver = DUTSetup.get_pci_dev_driver(
277 node, pci_addr.replace(':', r'\:'))
278 if current_driver is not None:
279 DUTSetup.pci_driver_unbind(node, pci_addr)
281 # Bind to kernel driver.
282 DUTSetup.pci_driver_bind(node, pci_addr, kernel_drv)
284 # Initialize QAT VFs.
286 DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
289 def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
290 """Get PCI address of Virtual Function.
292 :param node: DUT node.
293 :param pf_pci_addr: Physical Function PCI address.
294 :param vf_id: Virtual Function number.
296 :type pf_pci_addr: str
298 :returns: Virtual Function PCI address.
300 :raises RuntimeError: If failed to get Virtual Function PCI address.
303 "'basename $(readlink /sys/bus/pci/devices/{pci}/virtfn{vf_id})'".\
304 format(pci=pf_pci_addr, vf_id=vf_id)
305 message = 'Failed to get virtual function PCI address.'
307 stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
310 return stdout.strip()
313 def get_sriov_numvfs(node, pf_pci_addr):
314 """Get number of SR-IOV VFs.
316 :param node: DUT node.
317 :param pf_pci_addr: Physical Function PCI device address.
319 :type pf_pci_addr: str
320 :returns: Number of VFs.
322 :raises RuntimeError: If PCI device is not SR-IOV capable.
324 command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'.\
325 format(pci=pf_pci_addr.replace(':', r'\:'))
326 message = 'PCI device {pci} is not a SR-IOV device.'.\
327 format(pci=pf_pci_addr)
330 stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
333 sriov_numvfs = int(stdout)
335 logger.trace('Reading sriov_numvfs info failed on {host}'.
336 format(host=node['host']))
341 def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
342 """Init or reset SR-IOV virtual functions by setting its number on PCI
343 device on DUT. Setting to zero removes all VFs.
345 :param node: DUT node.
346 :param pf_pci_addr: Physical Function PCI device address.
347 :param numvfs: Number of VFs to initialize, 0 - removes the VFs.
349 :type pf_pci_addr: str
351 :raises RuntimeError: Failed to create VFs on PCI.
354 "'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'".\
355 format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
356 message = 'Failed to create {num} VFs on {pci} device on {host}'.\
357 format(num=numvfs, pci=pf_pci_addr, host=node['host'])
359 exec_cmd_no_error(node, command, timeout=120, sudo=True,
363 def pci_driver_unbind(node, pci_addr):
364 """Unbind PCI device from current driver on node.
366 :param node: DUT node.
367 :param pci_addr: PCI device address.
370 :raises RuntimeError: If PCI device unbind failed.
373 "'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'".\
374 format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
375 message = 'Failed to unbind PCI device {pci} on {host}'.\
376 format(pci=pci_addr, host=node['host'])
378 exec_cmd_no_error(node, command, timeout=120, sudo=True,
382 def pci_driver_bind(node, pci_addr, driver):
383 """Bind PCI device to driver on node.
385 :param node: DUT node.
386 :param pci_addr: PCI device address.
387 :param driver: Driver to bind.
391 :raises RuntimeError: If PCI device bind failed.
393 message = 'Failed to bind PCI device {pci} to {driver} on host {host}'.\
394 format(pci=pci_addr, driver=driver, host=node['host'])
397 "'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
398 format(driver=driver, pci=pci_addr.replace(':', r'\:'))
400 exec_cmd_no_error(node, command, timeout=120, sudo=True,
404 "'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
405 format(pci=pci_addr, driver=driver)
407 exec_cmd_no_error(node, command, timeout=120, sudo=True,
411 "'echo | tee /sys/bus/pci/devices/{pci}/driver_override'".\
412 format(pci=pci_addr.replace(':', r'\:'))
414 exec_cmd_no_error(node, command, timeout=120, sudo=True,
418 def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
419 """Unbind Virtual Function from driver on node.
421 :param node: DUT node.
422 :param pf_pci_addr: PCI device address.
423 :param vf_id: Virtual Function ID.
425 :type pf_pci_addr: str
427 :raises RuntimeError: If Virtual Function unbind failed.
429 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
430 vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
431 format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
434 "'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'".\
435 format(vf_pci_addr=vf_pci_addr, vf_path=vf_path)
437 message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
438 format(vf_pci_addr=vf_pci_addr, host=node['host'])
440 exec_cmd_no_error(node, command, timeout=120, sudo=True,
444 def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
445 """Bind Virtual Function to driver on node.
447 :param node: DUT node.
448 :param pf_pci_addr: PCI device address.
449 :param vf_id: Virtual Function ID.
450 :param driver: Driver to bind.
452 :type pf_pci_addr: str
455 :raises RuntimeError: If PCI device bind failed.
457 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
458 vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
459 format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
461 message = 'Failed to bind VF {vf_pci_addr} to {driver} on {host}'.\
462 format(vf_pci_addr=vf_pci_addr, driver=driver, host=node['host'])
465 "'echo {driver} | tee {vf_path}/driver_override'".\
466 format(driver=driver, vf_path=vf_path)
468 exec_cmd_no_error(node, command, timeout=120, sudo=True,
472 "'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
473 format(vf_pci_addr=vf_pci_addr, driver=driver)
475 exec_cmd_no_error(node, command, timeout=120, sudo=True,
479 "'echo | tee {vf_path}/driver_override'".\
480 format(vf_path=vf_path)
482 exec_cmd_no_error(node, command, timeout=120, sudo=True,
486 def get_pci_dev_driver(node, pci_addr):
487 """Get current PCI device driver on node.
490 # lspci -vmmks 0000:00:05.0
492 Class: Ethernet controller
494 Device: Virtio network device
495 SVendor: Red Hat, Inc
500 :param node: DUT node.
501 :param pci_addr: PCI device address.
504 :returns: Driver or None
505 :raises RuntimeError: If PCI rescan or lspci command execution failed.
506 :raises RuntimeError: If it is not possible to get the interface driver
507 information from the node.
513 logger.trace('Try number {0}: Get PCI device driver'.format(i))
515 cmd = 'lspci -vmmks {0}'.format(pci_addr)
516 ret_code, stdout, _ = ssh.exec_command(cmd)
518 raise RuntimeError("'{0}' failed on '{1}'"
519 .format(cmd, node['host']))
521 for line in stdout.splitlines():
527 name, value = line.split("\t", 1)
529 if name == "Driver:":
531 if name == 'Driver:':
535 logger.trace('Driver for PCI device {} not found, executing '
536 'pci rescan and retrying'.format(pci_addr))
537 cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
538 ret_code, _, _ = ssh.exec_command_sudo(cmd)
539 if int(ret_code) != 0:
540 raise RuntimeError("'{0}' failed on '{1}'"
541 .format(cmd, node['host']))
546 def verify_kernel_module(node, module, force_load=False):
547 """Verify if kernel module is loaded on node. If parameter force
548 load is set to True, then try to load the modules.
551 :param module: Module to verify.
552 :param force_load: If True then try to load module.
555 :type force_load: bool
556 :raises RuntimeError: If module is not loaded or failed to load.
558 command = 'grep -w {module} /proc/modules'.format(module=module)
559 message = 'Kernel module {module} is not loaded on host {host}'.\
560 format(module=module, host=node['host'])
563 exec_cmd_no_error(node, command, timeout=30, sudo=False,
567 # Module is not loaded and we want to load it
568 DUTSetup.load_kernel_module(node, module)
573 def verify_kernel_module_on_all_duts(nodes, module, force_load=False):
574 """Verify if kernel module is loaded on all DUTs. If parameter force
575 load is set to True, then try to load the modules.
577 :param node: DUT nodes.
578 :param module: Module to verify.
579 :param force_load: If True then try to load module.
582 :type force_load: bool
584 for node in nodes.values():
585 if node['type'] == NodeType.DUT:
586 DUTSetup.verify_kernel_module(node, module, force_load)
589 def verify_uio_driver_on_all_duts(nodes):
590 """Verify if uio driver kernel module is loaded on all DUTs. If module
591 is not present it will try to load it.
593 :param node: DUT nodes.
596 for node in nodes.values():
597 if node['type'] == NodeType.DUT:
598 uio_driver = Topology.get_uio_driver(node)
599 DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
602 def load_kernel_module(node, module):
603 """Load kernel module on node.
605 :param node: DUT node.
606 :param module: Module to load.
610 :raises RuntimeError: If loading failed.
612 command = 'modprobe {module}'.format(module=module)
613 message = 'Failed to load {module} on host {host}'.\
614 format(module=module, host=node['host'])
616 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
619 def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
620 """Install VPP on all DUT nodes. Start the VPP service in case of
621 systemd is not available or does not support autostart.
623 :param nodes: Nodes in the topology.
624 :param vpp_pkg_dir: Path to directory where VPP packages are stored.
626 :type vpp_pkg_dir: str
627 :raises RuntimeError: If failed to remove or install VPP.
629 for node in nodes.values():
630 message = 'Failed to install VPP on host {host}!'.\
631 format(host=node['host'])
632 if node['type'] == NodeType.DUT:
633 command = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true'
634 exec_cmd_no_error(node, command, sudo=True)
636 command = '. /etc/lsb-release; echo "${DISTRIB_ID}"'
637 stdout, _ = exec_cmd_no_error(node, command)
639 if stdout.strip() == 'Ubuntu':
640 exec_cmd_no_error(node, 'apt-get purge -y "*vpp*" || true',
641 timeout=120, sudo=True)
642 exec_cmd_no_error(node, 'dpkg -i --force-all {dir}*.deb'.
643 format(dir=vpp_pkg_dir), timeout=120,
644 sudo=True, message=message)
645 exec_cmd_no_error(node, 'dpkg -l | grep vpp', sudo=True)
646 if DUTSetup.running_in_container(node):
647 DUTSetup.restart_service(node, Constants.VPP_UNIT)
649 exec_cmd_no_error(node, 'yum -y remove "*vpp*" || true',
650 timeout=120, sudo=True)
651 exec_cmd_no_error(node, 'rpm -ivh {dir}*.rpm'.
652 format(dir=vpp_pkg_dir), timeout=120,
653 sudo=True, message=message)
654 exec_cmd_no_error(node, 'rpm -qai *vpp*', sudo=True)
655 DUTSetup.restart_service(node, Constants.VPP_UNIT)
658 def running_in_container(node):
659 """This method tests if topology node is running inside container.
661 :param node: Topology node.
663 :returns: True if running in docker container, false if not or failed
667 command = "fgrep docker /proc/1/cgroup"
668 message = 'Failed to get cgroup settings.'
670 exec_cmd_no_error(node, command, timeout=30, sudo=False,
677 def get_docker_mergeddir(node, uuid):
678 """Get Docker overlay for MergedDir diff.
680 :param node: DUT node.
681 :param uuid: Docker UUID.
684 :returns: Docker container MergedDir.
686 :raises RuntimeError: If getting output failed.
688 command = "docker inspect --format='"\
689 "{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid)
690 message = 'Failed to get directory of {uuid} on host {host}'.\
691 format(uuid=uuid, host=node['host'])
693 stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
694 return stdout.strip()
697 def get_huge_page_size(node):
698 """Get default size of huge pages in system.
700 :param node: Node in the topology.
702 :returns: Default size of free huge pages in system.
704 :raises RuntimeError: If reading failed for three times.
710 ret_code, stdout, _ = ssh.exec_command_sudo(
711 "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
714 huge_size = int(stdout)
716 logger.trace('Reading huge page size information failed')
720 raise RuntimeError('Getting huge page size information failed.')
724 def get_huge_page_free(node, huge_size):
725 """Get number of free huge pages in system.
727 :param node: Node in the topology.
728 :param huge_size: Size of hugepages.
731 :returns: Number of free huge pages in system.
733 :raises RuntimeError: If reading failed for three times.
735 # TODO: add numa aware option
740 ret_code, stdout, _ = ssh.exec_command_sudo(
741 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
745 huge_free = int(stdout)
747 logger.trace('Reading free huge pages information failed')
751 raise RuntimeError('Getting free huge pages information failed.')
755 def get_huge_page_total(node, huge_size):
756 """Get total number of huge pages in system.
758 :param node: Node in the topology.
759 :param huge_size: Size of hugepages.
763 :returns: Total number of huge pages in system.
765 :raises RuntimeError: If reading failed for three times.
767 # TODO: add numa aware option
772 ret_code, stdout, _ = ssh.exec_command_sudo(
773 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
777 huge_total = int(stdout)
779 logger.trace('Reading total huge pages information failed')
783 raise RuntimeError('Getting total huge pages information failed.')
787 def check_huge_page(node, huge_mnt, mem_size, allocate=False):
788 """Check if there is enough HugePages in system. If allocate is set to
789 true, try to allocate more HugePages.
791 :param node: Node in the topology.
792 :param huge_mnt: HugePage mount point.
793 :param mem_size: Requested memory in MB.
794 :param allocate: Whether to allocate more memory if not enough.
800 :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
801 or increasing map count failed.
803 # TODO: split function into smaller parts.
807 # Get huge pages information
808 huge_size = DUTSetup.get_huge_page_size(node)
809 huge_free = DUTSetup.get_huge_page_free(node, huge_size)
810 huge_total = DUTSetup.get_huge_page_total(node, huge_size)
812 # Check if memory reqested is available on host
813 if (mem_size * 1024) > (huge_free * huge_size):
814 # If we want to allocate hugepage dynamically
816 mem_needed = (mem_size * 1024) - (huge_free * huge_size)
817 huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
818 max_map_count = huge_to_allocate*4
819 # Increase maximum number of memory map areas a process may have
820 ret_code, _, _ = ssh.exec_command_sudo(
821 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
822 format(max_map_count))
823 if int(ret_code) != 0:
824 raise RuntimeError('Increase map count failed on {host}'.
825 format(host=node['host']))
826 # Increase hugepage count
827 ret_code, _, _ = ssh.exec_command_sudo(
828 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
829 format(huge_to_allocate))
830 if int(ret_code) != 0:
831 raise RuntimeError('Mount huge pages failed on {host}'.
832 format(host=node['host']))
833 # If we do not want to allocate dynamicaly end with error
835 raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
836 format(huge_free, huge_free * huge_size))
837 # Check if huge pages mount point exist
839 ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
840 if int(ret_code) == 0:
841 for line in stdout.splitlines():
842 # Try to find something like:
843 # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
845 if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
848 # If huge page mount point not exist create one
850 ret_code, _, _ = ssh.exec_command_sudo(
851 'mkdir -p {mnt}'.format(mnt=huge_mnt))
852 if int(ret_code) != 0:
853 raise RuntimeError('Create mount dir failed on {host}'.
854 format(host=node['host']))
855 ret_code, _, _ = ssh.exec_command_sudo(
856 'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
857 format(mnt=huge_mnt))
858 if int(ret_code) != 0:
859 raise RuntimeError('Mount huge pages failed on {host}'.
860 format(host=node['host']))