1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """DUT setup library."""
16 from time import sleep
17 from robot.api import logger
19 from resources.libraries.python.Constants import Constants
20 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
21 from resources.libraries.python.topology import NodeType, Topology
25 """Contains methods for setting up DUTs."""
28 def get_service_logs(node, service):
29 """Get specific service unit logs from node.
31 :param node: Node in the topology.
32 :param service: Service unit name.
36 command = u"echo $(< /tmp/*supervisor*.log)"\
37 if DUTSetup.running_in_container(node) \
38 else f"journalctl --no-pager --unit={service} " \
39 f"--since=\"$(echo `systemctl show -p ActiveEnterTimestamp " \
40 f"{service}` | awk \'{{print $2 $3}}\')\""
41 message = f"Node {node[u'host']} failed to get logs from unit {service}"
44 node, command, timeout=30, sudo=True, message=message
48 def get_service_logs_on_all_duts(nodes, service):
49 """Get specific service unit logs from all DUTs.
51 :param nodes: Nodes in the topology.
52 :param service: Service unit name.
56 for node in nodes.values():
57 if node[u"type"] == NodeType.DUT:
58 DUTSetup.get_service_logs(node, service)
61 def restart_service(node, service):
62 """Restart the named service on node.
64 :param node: Node in the topology.
65 :param service: Service unit name.
69 command = f"supervisorctl restart {service}" \
70 if DUTSetup.running_in_container(node) \
71 else f"service {service} restart"
72 message = f"Node {node[u'host']} failed to restart service {service}"
75 node, command, timeout=180, sudo=True, message=message
78 DUTSetup.get_service_logs(node, service)
81 def restart_service_on_all_duts(nodes, service):
82 """Restart the named service on all DUTs.
84 :param nodes: Nodes in the topology.
85 :param service: Service unit name.
89 for node in nodes.values():
90 if node[u"type"] == NodeType.DUT:
91 DUTSetup.restart_service(node, service)
94 def start_service(node, service):
95 """Start up the named service on node.
97 :param node: Node in the topology.
98 :param service: Service unit name.
102 # TODO: change command to start once all parent function updated.
103 command = f"supervisorctl restart {service}" \
104 if DUTSetup.running_in_container(node) \
105 else f"service {service} restart"
106 message = f"Node {node[u'host']} failed to start service {service}"
109 node, command, timeout=180, sudo=True, message=message
112 DUTSetup.get_service_logs(node, service)
115 def start_service_on_all_duts(nodes, service):
116 """Start up the named service on all DUTs.
118 :param nodes: Nodes in the topology.
119 :param service: Service unit name.
123 for node in nodes.values():
124 if node[u"type"] == NodeType.DUT:
125 DUTSetup.start_service(node, service)
128 def stop_service(node, service):
129 """Stop the named service on node.
131 :param node: Node in the topology.
132 :param service: Service unit name.
136 DUTSetup.get_service_logs(node, service)
138 command = f"supervisorctl stop {service}" \
139 if DUTSetup.running_in_container(node) \
140 else f"service {service} stop"
141 message = f"Node {node[u'host']} failed to stop service {service}"
144 node, command, timeout=180, sudo=True, message=message
148 def stop_service_on_all_duts(nodes, service):
149 """Stop the named service on all DUTs.
151 :param nodes: Nodes in the topology.
152 :param service: Service unit name.
156 for node in nodes.values():
157 if node[u"type"] == NodeType.DUT:
158 DUTSetup.stop_service(node, service)
161 def kill_program(node, program, namespace=None):
162 """Kill program on the specified topology node.
164 :param node: Topology node.
165 :param program: Program name.
166 :param namespace: Namespace program is running in.
173 if namespace in (None, u"default"):
176 shell_cmd = f"ip netns exec {namespace} sh -c"
178 pgrep_cmd = f"{shell_cmd} \'pgrep {program}\'"
179 ret_code, _, _ = exec_cmd(node, pgrep_cmd, timeout=cmd_timeout,
182 logger.trace(f"{program} is not running on {host}")
184 ret_code, _, _ = exec_cmd(node, f"{shell_cmd} \'pkill {program}\'",
185 timeout=cmd_timeout, sudo=True)
186 for attempt in range(5):
187 ret_code, _, _ = exec_cmd(node, pgrep_cmd, timeout=cmd_timeout,
190 logger.trace(f"Attempt {attempt}: {program} is dead on {host}")
193 logger.trace(f"SIGKILLing {program} on {host}")
194 ret_code, _, _ = exec_cmd(node, f"{shell_cmd} \'pkill -9 {program}\'",
195 timeout=cmd_timeout, sudo=True)
198 def verify_program_installed(node, program):
199 """Verify that program is installed on the specified topology node.
201 :param node: Topology node.
202 :param program: Program name.
206 cmd = f"command -v {program}"
207 exec_cmd_no_error(node, cmd, message=f"{program} is not installed")
210 def get_pid(node, process):
211 """Get PID of running process.
213 :param node: DUT node.
214 :param process: process name.
219 :raises RuntimeError: If it is not possible to get the PID.
226 logger.trace(f"Try {i}: Get {process} PID")
227 ret_code, stdout, stderr = ssh.exec_command(f"pidof {process}")
231 f"Not possible to get PID of {process} process on node: "
232 f"{node[u'host']}\n {stdout + stderr}"
235 pid_list = stdout.split()
236 if len(pid_list) == 1:
239 logger.debug(f"No {process} PID found on node {node[u'host']}")
241 logger.debug(f"More than one {process} PID found " \
242 f"on node {node[u'host']}")
243 retval = [int(pid) for pid in pid_list]
248 def get_vpp_pids(nodes):
249 """Get PID of running VPP process on all DUTs.
251 :param nodes: DUT nodes.
257 for node in nodes.values():
258 if node[u"type"] == NodeType.DUT:
259 pids[node[u"host"]] = DUTSetup.get_pid(node, u"vpp")
263 def crypto_device_verify(node, crypto_type, numvfs, force_init=False):
264 """Verify if Crypto QAT device virtual functions are initialized on all
265 DUTs. If parameter force initialization is set to True, then try to
266 initialize or remove VFs on QAT.
268 :param node: DUT node.
269 :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
270 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
271 :param force_init: If True then try to initialize to specific value.
273 :type crypto_type: string
275 :type force_init: bool
277 :raises RuntimeError: If QAT VFs are not created and force init is set
280 pci_addr = Topology.get_cryptodev(node)
281 sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
283 if sriov_numvfs != numvfs:
285 # QAT is not initialized and we want to initialize with numvfs
286 DUTSetup.crypto_device_init(node, crypto_type, numvfs)
289 f"QAT device failed to create VFs on {node[u'host']}"
293 def crypto_device_init(node, crypto_type, numvfs):
294 """Init Crypto QAT device virtual functions on DUT.
296 :param node: DUT node.
297 :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
298 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
300 :type crypto_type: string
303 :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
305 if crypto_type == u"HW_DH895xcc":
306 kernel_mod = u"qat_dh895xcc"
307 kernel_drv = u"dh895xcc"
308 elif crypto_type == u"HW_C3xxx":
309 kernel_mod = u"qat_c3xxx"
310 kernel_drv = u"c3xxx"
313 f"Unsupported crypto device type on {node[u'host']}"
316 pci_addr = Topology.get_cryptodev(node)
318 # QAT device must be re-bound to kernel driver before initialization.
319 DUTSetup.verify_kernel_module(node, kernel_mod, force_load=True)
321 # Stop VPP to prevent deadlock.
322 DUTSetup.stop_service(node, Constants.VPP_UNIT)
324 current_driver = DUTSetup.get_pci_dev_driver(
325 node, pci_addr.replace(u":", r"\:")
327 if current_driver is not None:
328 DUTSetup.pci_driver_unbind(node, pci_addr)
330 # Bind to kernel driver.
331 DUTSetup.pci_driver_bind(node, pci_addr, kernel_drv)
333 # Initialize QAT VFs.
335 DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
338 def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
339 """Get PCI address of Virtual Function.
341 :param node: DUT node.
342 :param pf_pci_addr: Physical Function PCI address.
343 :param vf_id: Virtual Function number.
345 :type pf_pci_addr: str
347 :returns: Virtual Function PCI address.
349 :raises RuntimeError: If failed to get Virtual Function PCI address.
351 command = f"sh -c \"basename $(readlink " \
352 f"/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id})\""
353 message = u"Failed to get virtual function PCI address."
355 stdout, _ = exec_cmd_no_error(
356 node, command, timeout=30, sudo=True, message=message
359 return stdout.strip()
362 def get_sriov_numvfs(node, pf_pci_addr):
363 """Get number of SR-IOV VFs.
365 :param node: DUT node.
366 :param pf_pci_addr: Physical Function PCI device address.
368 :type pf_pci_addr: str
369 :returns: Number of VFs.
371 :raises RuntimeError: If PCI device is not SR-IOV capable.
373 pci = pf_pci_addr.replace(u":", r"\:")
374 command = f"cat /sys/bus/pci/devices/{pci}/sriov_numvfs"
375 message = f"PCI device {pf_pci_addr} is not a SR-IOV device."
378 stdout, _ = exec_cmd_no_error(
379 node, command, timeout=30, sudo=True, message=message
382 sriov_numvfs = int(stdout)
385 f"Reading sriov_numvfs info failed on {node[u'host']}"
391 def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
392 """Init or reset SR-IOV virtual functions by setting its number on PCI
393 device on DUT. Setting to zero removes all VFs.
395 :param node: DUT node.
396 :param pf_pci_addr: Physical Function PCI device address.
397 :param numvfs: Number of VFs to initialize, 0 - removes the VFs.
399 :type pf_pci_addr: str
401 :raises RuntimeError: Failed to create VFs on PCI.
403 pci = pf_pci_addr.replace(u":", r"\:")
404 command = f"sh -c \"echo {numvfs} | " \
405 f"tee /sys/bus/pci/devices/{pci}/sriov_numvfs\""
406 message = f"Failed to create {numvfs} VFs on {pf_pci_addr} device " \
407 f"on {node[u'host']}"
410 node, command, timeout=120, sudo=True, message=message
414 def pci_driver_unbind(node, pci_addr):
415 """Unbind PCI device from current driver on node.
417 :param node: DUT node.
418 :param pci_addr: PCI device address.
421 :raises RuntimeError: If PCI device unbind failed.
423 pci = pci_addr.replace(u":", r"\:")
424 command = f"sh -c \"echo {pci_addr} | " \
425 f"tee /sys/bus/pci/devices/{pci}/driver/unbind\""
426 message = f"Failed to unbind PCI device {pci_addr} on {node[u'host']}"
429 node, command, timeout=120, sudo=True, message=message
433 def pci_driver_bind(node, pci_addr, driver):
434 """Bind PCI device to driver on node.
436 :param node: DUT node.
437 :param pci_addr: PCI device address.
438 :param driver: Driver to bind.
442 :raises RuntimeError: If PCI device bind failed.
444 message = f"Failed to bind PCI device {pci_addr} to {driver} " \
445 f"on host {node[u'host']}"
446 pci = pci_addr.replace(u":", r"\:")
447 command = f"sh -c \"echo {driver} | " \
448 f"tee /sys/bus/pci/devices/{pci}/driver_override\""
451 node, command, timeout=120, sudo=True, message=message
454 command = f"sh -c \"echo {pci_addr} | " \
455 f"tee /sys/bus/pci/drivers/{driver}/bind\""
458 node, command, timeout=120, sudo=True, message=message
461 command = f"sh -c \"echo | " \
462 f"tee /sys/bus/pci/devices/{pci}/driver_override\""
465 node, command, timeout=120, sudo=True, message=message
469 def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
470 """Unbind Virtual Function from driver on node.
472 :param node: DUT node.
473 :param pf_pci_addr: PCI device address.
474 :param vf_id: Virtual Function ID.
476 :type pf_pci_addr: str
478 :raises RuntimeError: If Virtual Function unbind failed.
480 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
481 pf_pci = pf_pci_addr.replace(u":", r"\:")
482 vf_path = f"/sys/bus/pci/devices/{pf_pci}/virtfn{vf_id}"
484 command = f"sh -c \"echo {vf_pci_addr} | tee {vf_path}/driver/unbind\""
485 message = f"Failed to unbind VF {vf_pci_addr} on {node[u'host']}"
488 node, command, timeout=120, sudo=True, message=message
492 def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
493 """Bind Virtual Function to driver on node.
495 :param node: DUT node.
496 :param pf_pci_addr: PCI device address.
497 :param vf_id: Virtual Function ID.
498 :param driver: Driver to bind.
500 :type pf_pci_addr: str
503 :raises RuntimeError: If PCI device bind failed.
505 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
506 pf_pci = pf_pci_addr.replace(u":", r'\:')
507 vf_path = f"/sys/bus/pci/devices/{pf_pci}/virtfn{vf_id}"
509 message = f"Failed to bind VF {vf_pci_addr} to {driver} " \
510 f"on {node[u'host']}"
511 command = f"sh -c \"echo {driver} | tee {vf_path}/driver_override\""
514 node, command, timeout=120, sudo=True, message=message
517 command = f"sh -c \"echo {vf_pci_addr} | " \
518 f"tee /sys/bus/pci/drivers/{driver}/bind\""
521 node, command, timeout=120, sudo=True, message=message
524 command = f"sh -c \"echo | tee {vf_path}/driver_override\""
527 node, command, timeout=120, sudo=True, message=message
531 def get_pci_dev_driver(node, pci_addr):
532 """Get current PCI device driver on node.
535 # lspci -vmmks 0000:00:05.0
537 Class: Ethernet controller
539 Device: Virtio network device
540 SVendor: Red Hat, Inc
545 :param node: DUT node.
546 :param pci_addr: PCI device address.
549 :returns: Driver or None
550 :raises RuntimeError: If PCI rescan or lspci command execution failed.
551 :raises RuntimeError: If it is not possible to get the interface driver
552 information from the node.
558 logger.trace(f"Try number {i}: Get PCI device driver")
560 cmd = f"lspci -vmmks {pci_addr}"
561 ret_code, stdout, _ = ssh.exec_command(cmd)
563 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
565 for line in stdout.splitlines():
571 name, value = line.split(u"\t", 1)
573 if name == u"Driver:":
575 if name == u"Driver:":
580 f"Driver for PCI device {pci_addr} not found, "
581 f"executing pci rescan and retrying"
583 cmd = u"sh -c \"echo 1 > /sys/bus/pci/rescan\""
584 ret_code, _, _ = ssh.exec_command_sudo(cmd)
585 if int(ret_code) != 0:
586 raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
591 def verify_kernel_module(node, module, force_load=False):
592 """Verify if kernel module is loaded on node. If parameter force
593 load is set to True, then try to load the modules.
596 :param module: Module to verify.
597 :param force_load: If True then try to load module.
600 :type force_load: bool
601 :raises RuntimeError: If module is not loaded or failed to load.
603 command = f"grep -w {module} /proc/modules"
604 message = f"Kernel module {module} is not loaded " \
605 f"on host {node[u'host']}"
609 node, command, timeout=30, sudo=False, message=message
613 # Module is not loaded and we want to load it
614 DUTSetup.load_kernel_module(node, module)
619 def verify_kernel_module_on_all_duts(nodes, module, force_load=False):
620 """Verify if kernel module is loaded on all DUTs. If parameter force
621 load is set to True, then try to load the modules.
623 :param nodes: DUT nodes.
624 :param module: Module to verify.
625 :param force_load: If True then try to load module.
628 :type force_load: bool
630 for node in nodes.values():
631 if node[u"type"] == NodeType.DUT:
632 DUTSetup.verify_kernel_module(node, module, force_load)
635 def verify_uio_driver_on_all_duts(nodes):
636 """Verify if uio driver kernel module is loaded on all DUTs. If module
637 is not present it will try to load it.
639 :param nodes: DUT nodes.
642 for node in nodes.values():
643 if node[u"type"] == NodeType.DUT:
644 uio_driver = Topology.get_uio_driver(node)
645 DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
648 def load_kernel_module(node, module):
649 """Load kernel module on node.
651 :param node: DUT node.
652 :param module: Module to load.
656 :raises RuntimeError: If loading failed.
658 command = f"modprobe {module}"
659 message = f"Failed to load {module} on host {node[u'host']}"
661 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
664 def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
665 """Install VPP on all DUT nodes. Start the VPP service in case of
666 systemd is not available or does not support autostart.
668 :param nodes: Nodes in the topology.
669 :param vpp_pkg_dir: Path to directory where VPP packages are stored.
671 :type vpp_pkg_dir: str
672 :raises RuntimeError: If failed to remove or install VPP.
674 for node in nodes.values():
675 message = f"Failed to install VPP on host {node[u'host']}!"
676 if node[u"type"] == NodeType.DUT:
677 command = u"ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true"
678 exec_cmd_no_error(node, command, sudo=True)
680 command = u". /etc/lsb-release; echo \"${DISTRIB_ID}\""
681 stdout, _ = exec_cmd_no_error(node, command)
683 if stdout.strip() == u"Ubuntu":
685 node, u"apt-get purge -y '*vpp*' || true",
686 timeout=120, sudo=True
688 # workaround to avoid installation of vpp-api-python
690 node, u"rm -f {vpp_pkg_dir}vpp-api-python.deb",
691 timeout=120, sudo=True
694 node, f"dpkg -i --force-all {vpp_pkg_dir}*.deb",
695 timeout=120, sudo=True, message=message
697 exec_cmd_no_error(node, u"dpkg -l | grep vpp", sudo=True)
698 if DUTSetup.running_in_container(node):
699 DUTSetup.restart_service(node, Constants.VPP_UNIT)
702 node, u"yum -y remove '*vpp*' || true",
703 timeout=120, sudo=True
705 # workaround to avoid installation of vpp-api-python
707 node, u"rm -f {vpp_pkg_dir}vpp-api-python.rpm",
708 timeout=120, sudo=True
711 node, f"rpm -ivh {vpp_pkg_dir}*.rpm",
712 timeout=120, sudo=True, message=message
714 exec_cmd_no_error(node, u"rpm -qai '*vpp*'", sudo=True)
715 DUTSetup.restart_service(node, Constants.VPP_UNIT)
718 def running_in_container(node):
719 """This method tests if topology node is running inside container.
721 :param node: Topology node.
723 :returns: True if running in docker container, false if not or failed
727 command = u"fgrep docker /proc/1/cgroup"
728 message = u"Failed to get cgroup settings."
731 node, command, timeout=30, sudo=False, message=message
738 def get_docker_mergeddir(node, uuid):
739 """Get Docker overlay for MergedDir diff.
741 :param node: DUT node.
742 :param uuid: Docker UUID.
745 :returns: Docker container MergedDir.
747 :raises RuntimeError: If getting output failed.
749 command = f"docker inspect " \
750 f"--format='{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}"
751 message = f"Failed to get directory of {uuid} on host {node[u'host']}"
753 stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
754 return stdout.strip()
757 def get_huge_page_size(node):
758 """Get default size of huge pages in system.
760 :param node: Node in the topology.
762 :returns: Default size of free huge pages in system.
764 :raises RuntimeError: If reading failed for three times.
770 ret_code, stdout, _ = ssh.exec_command_sudo(
771 u"grep Hugepagesize /proc/meminfo | awk '{ print $2 }'"
775 huge_size = int(stdout)
777 logger.trace(u"Reading huge page size information failed")
781 raise RuntimeError(u"Getting huge page size information failed.")
785 def get_huge_page_free(node, huge_size):
786 """Get number of free huge pages in system.
788 :param node: Node in the topology.
789 :param huge_size: Size of hugepages.
792 :returns: Number of free huge pages in system.
794 :raises RuntimeError: If reading failed for three times.
796 # TODO: add numa aware option
801 ret_code, stdout, _ = ssh.exec_command_sudo(
802 f"cat /sys/kernel/mm/hugepages/hugepages-{huge_size}kB/"
807 huge_free = int(stdout)
809 logger.trace(u"Reading free huge pages information failed")
813 raise RuntimeError(u"Getting free huge pages information failed.")
817 def get_huge_page_total(node, huge_size):
818 """Get total number of huge pages in system.
820 :param node: Node in the topology.
821 :param huge_size: Size of hugepages.
824 :returns: Total number of huge pages in system.
826 :raises RuntimeError: If reading failed for three times.
828 # TODO: add numa aware option
833 ret_code, stdout, _ = ssh.exec_command_sudo(
834 f"cat /sys/kernel/mm/hugepages/hugepages-{huge_size}kB/"
839 huge_total = int(stdout)
841 logger.trace(u"Reading total huge pages information failed")
845 raise RuntimeError(u"Getting total huge pages information failed.")
849 def check_huge_page(node, huge_mnt, mem_size, allocate=False):
850 """Check if there is enough HugePages in system. If allocate is set to
851 true, try to allocate more HugePages.
853 :param node: Node in the topology.
854 :param huge_mnt: HugePage mount point.
855 :param mem_size: Requested memory in MB.
856 :param allocate: Whether to allocate more memory if not enough.
861 :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
862 or increasing map count failed.
864 # TODO: split function into smaller parts.
868 # Get huge pages information
869 huge_size = DUTSetup.get_huge_page_size(node)
870 huge_free = DUTSetup.get_huge_page_free(node, huge_size)
871 huge_total = DUTSetup.get_huge_page_total(node, huge_size)
873 # Check if memory requested is available on
874 mem_size = int(mem_size)
875 if (mem_size * 1024) > (huge_free * huge_size):
876 # If we want to allocate hugepage dynamically
878 mem_needed = (mem_size * 1024) - (huge_free * huge_size)
879 huge_to_allocate = ((mem_needed // huge_size) * 2) + huge_total
880 max_map_count = huge_to_allocate*4
881 # Increase maximum number of memory map areas a process may have
882 ret_code, _, _ = ssh.exec_command_sudo(
883 f"echo \"{max_map_count}\" | "
884 f"sudo tee /proc/sys/vm/max_map_count"
886 if int(ret_code) != 0:
888 f"Increase map count failed on {node[u'host']}"
890 # Increase hugepage count
891 ret_code, _, _ = ssh.exec_command_sudo(
892 f"echo \"{huge_to_allocate}\" | "
893 f"sudo tee /proc/sys/vm/nr_hugepages"
895 if int(ret_code) != 0:
897 f"Mount huge pages failed on {node[u'host']}"
899 # If we do not want to allocate dynamically end with error
902 f"Not enough free huge pages: {huge_free}, "
903 f"{huge_free * huge_size} MB"
905 # Check if huge pages mount point exist
907 ret_code, stdout, _ = ssh.exec_command(u"cat /proc/mounts")
908 if int(ret_code) == 0:
909 for line in stdout.splitlines():
910 # Try to find something like:
911 # none /mnt/huge hugetlbfs rw,realtime,pagesize=2048k 0 0
913 if mount[2] == u"hugetlbfs" and mount[1] == huge_mnt:
916 # If huge page mount point not exist create one
918 ret_code, _, _ = ssh.exec_command_sudo(f"mkdir -p {huge_mnt}")
919 if int(ret_code) != 0:
921 f"Create mount dir failed on {node[u'host']}"
923 ret_code, _, _ = ssh.exec_command_sudo(
924 f"mount -t hugetlbfs -o pagesize=2048k none {huge_mnt}"
926 if int(ret_code) != 0:
928 f"Mount huge pages failed on {node[u'host']}"