1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """DUT setup library."""
16 from robot.api import logger
18 from resources.libraries.python.topology import NodeType, Topology
19 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
20 from resources.libraries.python.constants import Constants
21 from resources.libraries.python.VatExecutor import VatExecutor
22 from resources.libraries.python.VPPUtil import VPPUtil
25 class DUTSetup(object):
26 """Contains methods for setting up DUTs."""
29 def get_service_logs(node, service):
30 """Get specific service unit logs by journalctl from node.
32 :param node: Node in the topology.
33 :param service: Service unit name.
40 ssh.exec_command_sudo('journalctl --no-pager --unit={name} '
41 '--since="$(echo `systemctl show -p '
42 'ActiveEnterTimestamp {name}` | '
43 'awk \'{{print $2 $3}}\')"'.
46 raise RuntimeError('DUT {host} failed to get logs from unit {name}'.
47 format(host=node['host'], name=service))
50 def get_service_logs_on_all_duts(nodes, service):
51 """Get specific service unit logs by journalctl from all DUTs.
53 :param nodes: Nodes in the topology.
54 :param service: Service unit name.
58 for node in nodes.values():
59 if node['type'] == NodeType.DUT:
60 DUTSetup.get_service_logs(node, service)
63 def start_service(node, service):
64 """Start up the named service on node.
66 :param node: Node in the topology.
67 :param service: Service unit name.
73 # We are doing restart. With this we do not care if service
76 ssh.exec_command_sudo('service {name} restart'.
77 format(name=service), timeout=120)
79 raise RuntimeError('DUT {host} failed to start service {name}'.
80 format(host=node['host'], name=service))
82 DUTSetup.get_service_logs(node, service)
85 def start_vpp_service_on_all_duts(nodes):
86 """Start up the VPP service on all nodes.
88 :param nodes: Nodes in the topology.
91 for node in nodes.values():
92 if node['type'] == NodeType.DUT:
93 DUTSetup.start_service(node, Constants.VPP_UNIT)
96 def vpp_show_version_verbose(node):
97 """Run "show version verbose" CLI command.
99 :param node: Node to run command on.
103 vat.execute_script("show_version_verbose.vat", node, json_out=False)
106 vat.script_should_have_passed()
107 except AssertionError:
108 raise RuntimeError('Failed to get VPP version on host: {name}'.
109 format(name=node['host']))
112 def show_vpp_version_on_all_duts(nodes):
113 """Show VPP version verbose on all DUTs.
115 :param nodes: VPP nodes
118 for node in nodes.values():
119 if node['type'] == NodeType.DUT:
120 DUTSetup.vpp_show_version_verbose(node)
123 def vpp_show_interfaces(node):
124 """Run "show interface" CLI command.
126 :param node: Node to run command on.
130 vat.execute_script("show_interface.vat", node, json_out=False)
133 vat.script_should_have_passed()
134 except AssertionError:
135 raise RuntimeError('Failed to get VPP interfaces on host: {name}'.
136 format(name=node['host']))
139 def vpp_api_trace_save(node):
140 """Run "api trace save" CLI command.
142 :param node: Node to run command on.
146 vat.execute_script("api_trace_save.vat", node, json_out=False)
149 def vpp_api_trace_dump(node):
150 """Run "api trace custom-dump" CLI command.
152 :param node: Node to run command on.
156 vat.execute_script("api_trace_dump.vat", node, json_out=False)
159 def setup_all_duts(nodes):
160 """Prepare all DUTs in given topology for test execution."""
161 for node in nodes.values():
162 if node['type'] == NodeType.DUT:
163 DUTSetup.setup_dut(node)
167 """Run script over SSH to setup the DUT node.
169 :param node: DUT node to set up.
172 :raises Exception: If the DUT setup fails.
178 ssh.exec_command('sudo -Sn bash {0}/{1}/dut_setup.sh'.
179 format(Constants.REMOTE_FW_DIR,
180 Constants.RESOURCES_LIB_SH), timeout=120)
182 raise RuntimeError('DUT test setup script failed at node {name}'.
183 format(name=node['host']))
186 def get_vpp_pid(node):
187 """Get PID of running VPP process.
189 :param node: DUT node.
193 :raises RuntimeError: If it is not possible to get the PID.
200 logger.trace('Try {}: Get VPP PID'.format(i))
201 ret_code, stdout, stderr = ssh.exec_command('pidof vpp')
204 raise RuntimeError('Not possible to get PID of VPP process '
205 'on node: {0}\n {1}'.
206 format(node['host'], stdout + stderr))
208 if len(stdout.splitlines()) == 1:
210 elif not stdout.splitlines():
211 logger.debug("No VPP PID found on node {0}".
212 format(node['host']))
215 logger.debug("More then one VPP PID found on node {0}".
216 format(node['host']))
218 for line in stdout.splitlines():
219 ret_list.append(int(line))
225 def get_vpp_pids(nodes):
226 """Get PID of running VPP process on all DUTs.
228 :param nodes: DUT nodes.
235 for node in nodes.values():
236 if node['type'] == NodeType.DUT:
237 pids[node['host']] = DUTSetup.get_vpp_pid(node)
241 def vpp_show_crypto_device_mapping(node):
242 """Run "show crypto device mapping" CLI command.
244 :param node: Node to run command on.
248 vat.execute_script("show_crypto_device_mapping.vat", node,
252 def crypto_device_verify(node, force_init=False, numvfs=32):
253 """Verify if Crypto QAT device virtual functions are initialized on all
254 DUTs. If parameter force initialization is set to True, then try to
255 initialize or remove VFs on QAT.
257 :param node: DUT node.
258 :param force_init: If True then try to initialize to specific value.
259 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
261 :type force_init: bool
264 :raises RuntimeError: If QAT VFs are not created and force init is set
267 pci_addr = Topology.get_cryptodev(node)
268 sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
270 if sriov_numvfs != numvfs:
272 # QAT is not initialized and we want to initialize with numvfs
273 DUTSetup.crypto_device_init(node, numvfs)
275 raise RuntimeError('QAT device failed to create VFs on {host}'.
276 format(host=node['host']))
279 def crypto_device_init(node, numvfs):
280 """Init Crypto QAT device virtual functions on DUT.
282 :param node: DUT node.
283 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
287 :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
289 pci_addr = Topology.get_cryptodev(node)
291 # QAT device must be re-bound to kernel driver before initialization.
292 DUTSetup.verify_kernel_module(node, 'qat_dh895xcc', force_load=True)
294 # Stop VPP to prevent deadlock.
295 VPPUtil.stop_vpp_service(node)
297 current_driver = DUTSetup.get_pci_dev_driver(
298 node, pci_addr.replace(':', r'\:'))
299 if current_driver is not None:
300 DUTSetup.pci_driver_unbind(node, pci_addr)
302 # Bind to kernel driver.
303 DUTSetup.pci_driver_bind(node, pci_addr, 'dh895xcc')
305 # Initialize QAT VFs.
307 DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
310 def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
311 """Get PCI address of Virtual Function.
313 :param node: DUT node.
314 :param pf_pci_addr: Physical Function PCI address.
315 :param vf_id: Virtual Function number.
317 :type pf_pci_addr: str
319 :returns: Virtual Function PCI address.
321 :raises RuntimeError: If failed to get Virtual Function PCI address.
324 "'basename $(readlink /sys/bus/pci/devices/{pci}/virtfn{vf_id})'".\
325 format(pci=pf_pci_addr, vf_id=vf_id)
326 message = 'Failed to get virtual function PCI address.'
328 stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
331 return stdout.strip()
334 def get_sriov_numvfs(node, pf_pci_addr):
335 """Get number of SR-IOV VFs.
337 :param node: DUT node.
338 :param pf_pci_addr: Physical Function PCI device address.
340 :type pf_pci_addr: str
341 :returns: Number of VFs.
343 :raises RuntimeError: If PCI device is not SR-IOV capable.
345 command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'.\
346 format(pci=pf_pci_addr.replace(':', r'\:'))
347 message = 'PCI device {pci} is not a SR-IOV device.'.\
348 format(pci=pf_pci_addr)
351 stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
354 sriov_numvfs = int(stdout)
356 logger.trace('Reading sriov_numvfs info failed on {host}'.
357 format(host=node['host']))
362 def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
363 """Init or reset SR-IOV virtual functions by setting its number on PCI
364 device on DUT. Setting to zero removes all VFs.
366 :param node: DUT node.
367 :param pf_pci_addr: Physical Function PCI device address.
368 :param numvfs: Number of VFs to initialize, 0 - removes the VFs.
370 :type pf_pci_addr: str
372 :raises RuntimeError: Failed to create VFs on PCI.
375 "'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'".\
376 format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
377 message = 'Failed to create {num} VFs on {pci} device on {host}'.\
378 format(num=numvfs, pci=pf_pci_addr, host=node['host'])
380 exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
383 def pci_driver_unbind(node, pci_addr):
384 """Unbind PCI device from current driver on node.
386 :param node: DUT node.
387 :param pci_addr: PCI device address.
390 :raises RuntimeError: If PCI device unbind failed.
393 "'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'".\
394 format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
395 message = 'Failed to unbind PCI device {pci} on {host}'.\
396 format(pci=pci_addr, host=node['host'])
398 exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
401 def pci_driver_bind(node, pci_addr, driver):
402 """Bind PCI device to driver on node.
404 :param node: DUT node.
405 :param pci_addr: PCI device address.
406 :param driver: Driver to bind.
410 :raises RuntimeError: If PCI device bind failed.
412 message = 'Failed to bind PCI device {pci} to {driver} on host {host}'.\
413 format(pci=pci_addr, driver=driver, host=node['host'])
416 "'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
417 format(driver=driver, pci=pci_addr.replace(':', r'\:'))
419 exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
422 "'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
423 format(pci=pci_addr, driver=driver)
425 exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
428 "'echo | tee /sys/bus/pci/devices/{pci}/driver_override'".\
429 format(pci=pci_addr.replace(':', r'\:'))
431 exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
434 def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
435 """Unbind Virtual Function from driver on node.
437 :param node: DUT node.
438 :param pf_pci_addr: PCI device address.
439 :param vf_id: Virtual Function ID.
441 :type pf_pci_addr: str
443 :raises RuntimeError: If Virtual Function unbind failed.
445 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
446 vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
447 format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
450 "'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'".\
451 format(vf_pci_addr=vf_pci_addr, vf_path=vf_path)
453 message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
454 format(vf_pci_addr=vf_pci_addr, host=node['host'])
456 exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
459 def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
460 """Bind Virtual Function to driver on node.
462 :param node: DUT node.
463 :param pf_pci_addr: PCI device address.
464 :param vf_id: Virtual Function ID.
465 :param driver: Driver to bind.
467 :type pf_pci_addr: str
470 :raises RuntimeError: If PCI device bind failed.
472 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
473 vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
474 format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
476 message = 'Failed to bind VF {vf_pci_addr} to {driver} on {host}'.\
477 format(vf_pci_addr=vf_pci_addr, driver=driver, host=node['host'])
480 "'echo {driver} | tee {vf_path}/driver_override'".\
481 format(driver=driver, vf_path=vf_path)
483 exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
486 "'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
487 format(vf_pci_addr=vf_pci_addr, driver=driver)
489 exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
492 "'echo | tee {vf_path}/driver_override'".\
493 format(vf_path=vf_path)
495 exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
498 def get_pci_dev_driver(node, pci_addr):
499 """Get current PCI device driver on node.
502 # lspci -vmmks 0000:00:05.0
504 Class: Ethernet controller
506 Device: Virtio network device
507 SVendor: Red Hat, Inc
512 :param node: DUT node.
513 :param pci_addr: PCI device address.
516 :returns: Driver or None
517 :raises RuntimeError: If PCI rescan or lspci command execution failed.
518 :raises RuntimeError: If it is not possible to get the interface driver
519 information from the node.
525 logger.trace('Try number {0}: Get PCI device driver'.format(i))
527 cmd = 'lspci -vmmks {0}'.format(pci_addr)
528 ret_code, stdout, _ = ssh.exec_command(cmd)
530 raise RuntimeError("'{0}' failed on '{1}'"
531 .format(cmd, node['host']))
533 for line in stdout.splitlines():
539 name, value = line.split("\t", 1)
541 if name == "Driver:":
543 if name == 'Driver:':
547 logger.trace('Driver for PCI device {} not found, executing '
548 'pci rescan and retrying'.format(pci_addr))
549 cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
550 ret_code, _, _ = ssh.exec_command_sudo(cmd)
551 if int(ret_code) != 0:
552 raise RuntimeError("'{0}' failed on '{1}'"
553 .format(cmd, node['host']))
558 def verify_kernel_module(node, module, force_load=False):
559 """Verify if kernel module is loaded on node. If parameter force
560 load is set to True, then try to load the modules.
563 :param module: Module to verify.
564 :param force_load: If True then try to load module.
567 :type force_load: bool
568 :raises RuntimeError: If module is not loaded or failed to load.
570 command = 'grep -w {module} /proc/modules'.format(module=module)
571 message = 'Kernel module {module} is not loaded on host {host}'.\
572 format(module=module, host=node['host'])
575 exec_cmd_no_error(node, command, timeout=30, sudo=False,
579 # Module is not loaded and we want to load it
580 DUTSetup.load_kernel_module(node, module)
585 def verify_kernel_module_on_all_duts(nodes, module, force_load=False):
586 """Verify if kernel module is loaded on all DUTs. If parameter force
587 load is set to True, then try to load the modules.
589 :param node: DUT nodes.
590 :param module: Module to verify.
591 :param force_load: If True then try to load module.
594 :type force_load: bool
596 for node in nodes.values():
597 if node['type'] == NodeType.DUT:
598 DUTSetup.verify_kernel_module(node, module, force_load)
601 def verify_uio_driver_on_all_duts(nodes):
602 """Verify if uio driver kernel module is loaded on all DUTs. If module
603 is not present it will try to load it.
605 :param node: DUT nodes.
608 for node in nodes.values():
609 if node['type'] == NodeType.DUT:
610 uio_driver = Topology.get_uio_driver(node)
611 DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
614 def load_kernel_module(node, module):
615 """Load kernel module on node.
617 :param node: DUT node.
618 :param module: Module to load.
622 :raises RuntimeError: If loading failed.
624 command = 'modprobe {module}'.format(module=module)
625 message = 'Failed to load {module} on host {host}'.\
626 format(module=module, host=node['host'])
628 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
631 def vpp_enable_traces_on_all_duts(nodes):
632 """Enable vpp packet traces on all DUTs in the given topology.
634 :param nodes: Nodes in the topology.
637 for node in nodes.values():
638 if node['type'] == NodeType.DUT:
639 DUTSetup.vpp_enable_traces_on_dut(node)
642 def vpp_enable_traces_on_dut(node):
643 """Enable vpp packet traces on the DUT node.
645 :param node: DUT node to set up.
650 vat.execute_script("enable_dpdk_traces.vat", node, json_out=False)
651 vat.execute_script("enable_vhost_user_traces.vat", node, json_out=False)
652 vat.execute_script("enable_memif_traces.vat", node, json_out=False)
655 def install_vpp_on_all_duts(nodes, vpp_pkg_dir, vpp_rpm_pkgs, vpp_deb_pkgs):
656 """Install VPP on all DUT nodes.
658 :param nodes: Nodes in the topology.
659 :param vpp_pkg_dir: Path to directory where VPP packages are stored.
660 :param vpp_rpm_pkgs: List of VPP rpm packages to be installed.
661 :param vpp_deb_pkgs: List of VPP deb packages to be installed.
663 :type vpp_pkg_dir: str
664 :type vpp_rpm_pkgs: list
665 :type vpp_deb_pkgs: list
666 :raises RuntimeError: If failed to remove or install VPP.
669 logger.debug("Installing VPP")
671 for node in nodes.values():
672 if node['type'] == NodeType.DUT:
673 logger.debug("Installing VPP on node {0}".format(node['host']))
678 cmd = "[[ -f /etc/redhat-release ]]"
679 return_code, _, _ = ssh.exec_command(cmd)
680 if not int(return_code):
681 # workaroud - uninstall existing vpp installation until
682 # start-testcase script is updated on all virl servers
683 rpm_pkgs_remove = "vpp*"
684 cmd_u = 'yum -y remove "{0}"'.format(rpm_pkgs_remove)
685 r_rcode, _, r_err = ssh.exec_command_sudo(cmd_u, timeout=90)
687 raise RuntimeError('Failed to remove previous VPP'
688 'installation on host {0}:\n{1}'
689 .format(node['host'], r_err))
691 rpm_pkgs = "*.rpm ".join(str(vpp_pkg_dir + pkg)
692 for pkg in vpp_rpm_pkgs) + "*.rpm"
693 cmd_i = "rpm -ivh {0}".format(rpm_pkgs)
694 ret_code, _, err = ssh.exec_command_sudo(cmd_i, timeout=90)
696 raise RuntimeError('Failed to install VPP on host {0}:'
697 '\n{1}'.format(node['host'], err))
699 ssh.exec_command_sudo("rpm -qai vpp*")
700 logger.info("VPP installed on node {0}".
701 format(node['host']))
703 # workaroud - uninstall existing vpp installation until
704 # start-testcase script is updated on all virl servers
705 deb_pkgs_remove = "vpp*"
706 cmd_u = 'apt-get purge -y "{0}"'.format(deb_pkgs_remove)
707 r_rcode, _, r_err = ssh.exec_command_sudo(cmd_u, timeout=90)
709 raise RuntimeError('Failed to remove previous VPP'
710 'installation on host {0}:\n{1}'
711 .format(node['host'], r_err))
712 deb_pkgs = "*.deb ".join(str(vpp_pkg_dir + pkg)
713 for pkg in vpp_deb_pkgs) + "*.deb"
714 cmd_i = "dpkg -i --force-all {0}".format(deb_pkgs)
715 ret_code, _, err = ssh.exec_command_sudo(cmd_i, timeout=90)
717 raise RuntimeError('Failed to install VPP on host {0}:'
718 '\n{1}'.format(node['host'], err))
720 ssh.exec_command_sudo("dpkg -l | grep vpp")
721 logger.info("VPP installed on node {0}".
722 format(node['host']))
727 def verify_vpp_on_dut(node):
728 """Verify that VPP is installed on DUT node.
730 :param node: DUT node.
732 :raises RuntimeError: If failed to restart VPP, get VPP version
733 or get VPP interfaces.
736 logger.debug("Verify VPP on node {0}".format(node['host']))
738 DUTSetup.vpp_show_version_verbose(node)
739 DUTSetup.vpp_show_interfaces(node)
742 def verify_vpp_on_all_duts(nodes):
743 """Verify that VPP is installed on all DUT nodes.
745 :param nodes: Nodes in the topology.
749 logger.debug("Verify VPP on all DUTs")
751 DUTSetup.start_vpp_service_on_all_duts(nodes)
753 for node in nodes.values():
754 if node['type'] == NodeType.DUT:
755 DUTSetup.verify_vpp_on_dut(node)
759 def get_huge_page_size(node):
760 """Get default size of huge pages in system.
762 :param node: Node in the topology.
764 :returns: Default size of free huge pages in system.
766 :raises RuntimeError: If reading failed for three times.
772 ret_code, stdout, _ = ssh.exec_command_sudo(
773 "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
776 huge_size = int(stdout)
778 logger.trace('Reading huge page size information failed')
782 raise RuntimeError('Getting huge page size information failed.')
786 def get_huge_page_free(node, huge_size):
787 """Get number of free huge pages in system.
789 :param node: Node in the topology.
790 :param huge_size: Size of hugepages.
793 :returns: Number of free huge pages in system.
795 :raises RuntimeError: If reading failed for three times.
797 # TODO: add numa aware option
802 ret_code, stdout, _ = ssh.exec_command_sudo(
803 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
807 huge_free = int(stdout)
809 logger.trace('Reading free huge pages information failed')
813 raise RuntimeError('Getting free huge pages information failed.')
817 def get_huge_page_total(node, huge_size):
818 """Get total number of huge pages in system.
820 :param node: Node in the topology.
821 :param huge_size: Size of hugepages.
825 :returns: Total number of huge pages in system.
827 :raises RuntimeError: If reading failed for three times.
829 # TODO: add numa aware option
834 ret_code, stdout, _ = ssh.exec_command_sudo(
835 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
839 huge_total = int(stdout)
841 logger.trace('Reading total huge pages information failed')
845 raise RuntimeError('Getting total huge pages information failed.')
849 def check_huge_page(node, huge_mnt, mem_size, allocate=False):
850 """Check if there is enough HugePages in system. If allocate is set to
851 true, try to allocate more HugePages.
853 :param node: Node in the topology.
854 :param huge_mnt: HugePage mount point.
855 :param mem_size: Requested memory in MB.
856 :param allocate: Whether to allocate more memory if not enough.
862 :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
863 or increasing map count failed.
865 # TODO: split function into smaller parts.
869 # Get huge pages information
870 huge_size = DUTSetup.get_huge_page_size(node)
871 huge_free = DUTSetup.get_huge_page_free(node, huge_size)
872 huge_total = DUTSetup.get_huge_page_total(node, huge_size)
874 # Check if memory reqested is available on host
875 if (mem_size * 1024) > (huge_free * huge_size):
876 # If we want to allocate hugepage dynamically
878 mem_needed = (mem_size * 1024) - (huge_free * huge_size)
879 huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
880 max_map_count = huge_to_allocate*4
881 # Increase maximum number of memory map areas a process may have
882 ret_code, _, _ = ssh.exec_command_sudo(
883 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
884 format(max_map_count))
885 if int(ret_code) != 0:
886 raise RuntimeError('Increase map count failed on {host}'.
887 format(host=node['host']))
888 # Increase hugepage count
889 ret_code, _, _ = ssh.exec_command_sudo(
890 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
891 format(huge_to_allocate))
892 if int(ret_code) != 0:
893 raise RuntimeError('Mount huge pages failed on {host}'.
894 format(host=node['host']))
895 # If we do not want to allocate dynamicaly end with error
897 raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
898 format(huge_free, huge_free * huge_size))
899 # Check if huge pages mount point exist
901 ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
902 if int(ret_code) == 0:
903 for line in stdout.splitlines():
904 # Try to find something like:
905 # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
907 if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
910 # If huge page mount point not exist create one
912 ret_code, _, _ = ssh.exec_command_sudo(
913 'mkdir -p {mnt}'.format(mnt=huge_mnt))
914 if int(ret_code) != 0:
915 raise RuntimeError('Create mount dir failed on {host}'.
916 format(host=node['host']))
917 ret_code, _, _ = ssh.exec_command_sudo(
918 'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
919 format(mnt=huge_mnt))
920 if int(ret_code) != 0:
921 raise RuntimeError('Mount huge pages failed on {host}'.
922 format(host=node['host']))