Fix various pylint violations
[csit.git] / resources / libraries / python / DUTSetup.py
index 9c78cfe..8f9e94d 100644 (file)
@@ -42,7 +42,7 @@ class DUTSetup(object):
                                   'ActiveEnterTimestamp {name}` | '
                                   'awk \'{{print $2 $3}}\')"'.
                                   format(name=service))
-        if int(ret_code) != 0:
+        if int(ret_code):
             raise RuntimeError('DUT {host} failed to get logs from unit {name}'.
                                format(host=node['host'], name=service))
 
@@ -75,7 +75,7 @@ class DUTSetup(object):
         ret_code, _, _ = \
             ssh.exec_command_sudo('service {name} restart'.
                                   format(name=service), timeout=120)
-        if int(ret_code) != 0:
+        if int(ret_code):
             raise RuntimeError('DUT {host} failed to start service {name}'.
                                format(host=node['host'], name=service))
 
@@ -178,7 +178,7 @@ class DUTSetup(object):
             ssh.exec_command('sudo -Sn bash {0}/{1}/dut_setup.sh'.
                              format(Constants.REMOTE_FW_DIR,
                                     Constants.RESOURCES_LIB_SH), timeout=120)
-        if int(ret_code) != 0:
+        if int(ret_code):
             raise RuntimeError('DUT test setup script failed at node {name}'.
                                format(name=node['host']))
 
@@ -200,14 +200,14 @@ class DUTSetup(object):
             logger.trace('Try {}: Get VPP PID'.format(i))
             ret_code, stdout, stderr = ssh.exec_command('pidof vpp')
 
-            if int(ret_code) != 0:
+            if int(ret_code):
                 raise RuntimeError('Not possible to get PID of VPP process '
                                    'on node: {0}\n {1}'.
                                    format(node['host'], stdout + stderr))
 
             if len(stdout.splitlines()) == 1:
                 return int(stdout)
-            elif len(stdout.splitlines()) == 0:
+            elif not stdout.splitlines():
                 logger.debug("No VPP PID found on node {0}".
                              format(node['host']))
                 continue
@@ -274,7 +274,7 @@ class DUTSetup(object):
         # Try to read number of VFs from PCI address of QAT device
         for _ in range(3):
             ret_code, stdout, _ = ssh.exec_command(cmd)
-            if int(ret_code) == 0:
+            if not int(ret_code):
                 try:
                     sriov_numvfs = int(stdout)
                 except ValueError:
@@ -328,7 +328,7 @@ class DUTSetup(object):
                 format(numvfs, cryptodev.replace(':', r'\:'), timeout=180)
             ret_code, _, _ = ssh.exec_command_sudo("sh -c '{0}'".format(cmd))
 
-            if int(ret_code) != 0:
+            if int(ret_code):
                 raise RuntimeError('Failed to initialize {0} VFs on QAT device '
                                    ' on host {1}'.format(numvfs, node['host']))
 
@@ -351,7 +351,7 @@ class DUTSetup(object):
             "sh -c 'echo {0} | tee /sys/bus/pci/devices/{1}/driver/unbind'"
             .format(pci_addr, pci_addr.replace(':', r'\:')), timeout=180)
 
-        if int(ret_code) != 0:
+        if int(ret_code):
             raise RuntimeError('Failed to unbind PCI device {0} from driver on '
                                'host {1}'.format(pci_addr, node['host']))
 
@@ -376,7 +376,7 @@ class DUTSetup(object):
             "sh -c 'echo {0} | tee /sys/bus/pci/drivers/{1}/bind'".format(
                 pci_addr, driver), timeout=180)
 
-        if int(ret_code) != 0:
+        if int(ret_code):
             raise RuntimeError('Failed to bind PCI device {0} to {1} driver on '
                                'host {2}'.format(pci_addr, driver,
                                                  node['host']))
@@ -385,32 +385,40 @@ class DUTSetup(object):
     def get_pci_dev_driver(node, pci_addr):
         """Get current PCI device driver on node.
 
+        .. note::
+            # lspci -vmmks 0000:00:05.0
+            Slot:   00:05.0
+            Class:  Ethernet controller
+            Vendor: Red Hat, Inc
+            Device: Virtio network device
+            SVendor:        Red Hat, Inc
+            SDevice:        Device 0001
+            PhySlot:        5
+            Driver: virtio-pci
+
         :param node: DUT node.
         :param pci_addr: PCI device address.
         :type node: dict
         :type pci_addr: str
         :returns: Driver or None
         :raises RuntimeError: If PCI rescan or lspci command execution failed.
+        :raises RuntimeError: If it is not possible to get the interface driver
+            information from the node.
         """
         ssh = SSH()
         ssh.connect(node)
 
         for i in range(3):
-            logger.trace('Try {0}: Get interface driver'.format(i))
-            cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
-            ret_code, _, _ = ssh.exec_command_sudo(cmd)
-            if int(ret_code) != 0:
-                raise RuntimeError("'{0}' failed on '{1}'"
-                                   .format(cmd, node['host']))
+            logger.trace('Try number {0}: Get PCI device driver'.format(i))
 
             cmd = 'lspci -vmmks {0}'.format(pci_addr)
             ret_code, stdout, _ = ssh.exec_command(cmd)
-            if int(ret_code) != 0:
+            if int(ret_code):
                 raise RuntimeError("'{0}' failed on '{1}'"
                                    .format(cmd, node['host']))
 
             for line in stdout.splitlines():
-                if len(line) == 0:
+                if not line:
                     continue
                 name = None
                 value = None
@@ -421,31 +429,38 @@ class DUTSetup(object):
                         return None
                 if name == 'Driver:':
                     return value
-        else:
-            return None
+
+            if i < 2:
+                logger.trace('Driver for PCI device {} not found, executing '
+                             'pci rescan and retrying'.format(pci_addr))
+                cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
+                ret_code, _, _ = ssh.exec_command_sudo(cmd)
+                if int(ret_code) != 0:
+                    raise RuntimeError("'{0}' failed on '{1}'"
+                                       .format(cmd, node['host']))
+
+        return None
 
     @staticmethod
     def kernel_module_verify(node, module, force_load=False):
-        """Verify if kernel module is loaded on all DUTs. If parameter force
+        """Verify if kernel module is loaded on node. If parameter force
         load is set to True, then try to load the modules.
 
-        :param node: DUT node.
+        :param node: Node.
         :param module: Module to verify.
         :param force_load: If True then try to load module.
         :type node: dict
         :type module: str
         :type force_load: bool
-        :returns: nothing
         :raises RuntimeError: If module is not loaded or failed to load.
         """
-
         ssh = SSH()
         ssh.connect(node)
 
         cmd = 'grep -w {0} /proc/modules'.format(module)
         ret_code, _, _ = ssh.exec_command(cmd)
 
-        if int(ret_code) != 0:
+        if int(ret_code):
             if force_load:
                 # Module is not loaded and we want to load it
                 DUTSetup.kernel_module_load(node, module)
@@ -453,6 +468,35 @@ class DUTSetup(object):
                 raise RuntimeError('Kernel module {0} is not loaded on host '
                                    '{1}'.format(module, node['host']))
 
+    @staticmethod
+    def kernel_module_verify_on_all_duts(nodes, module, force_load=False):
+        """Verify if kernel module is loaded on all DUTs. If parameter force
+        load is set to True, then try to load the modules.
+
+        :param node: DUT nodes.
+        :param module: Module to verify.
+        :param force_load: If True then try to load module.
+        :type node: dict
+        :type module: str
+        :type force_load: bool
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                DUTSetup.kernel_module_verify(node, module, force_load)
+
+    @staticmethod
+    def verify_uio_driver_on_all_duts(nodes):
+        """Verify if uio driver kernel module is loaded on all DUTs. If module
+        is not present it will try to load it.
+
+        :param node: DUT nodes.
+        :type node: dict
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                uio_driver = Topology.get_uio_driver(node)
+                DUTSetup.kernel_module_verify(node, uio_driver, force_load=True)
+
     @staticmethod
     def kernel_module_load(node, module):
         """Load kernel module on node.
@@ -470,7 +514,7 @@ class DUTSetup(object):
 
         ret_code, _, _ = ssh.exec_command_sudo("modprobe {0}".format(module))
 
-        if int(ret_code) != 0:
+        if int(ret_code):
             raise RuntimeError('Failed to load {0} kernel module on host {1}'.
                                format(module, node['host']))
 
@@ -524,13 +568,13 @@ class DUTSetup(object):
 
                 cmd = "[[ -f /etc/redhat-release ]]"
                 return_code, _, _ = ssh.exec_command(cmd)
-                if int(return_code) == 0:
+                if not int(return_code):
                     # workaroud - uninstall existing vpp installation until
                     # start-testcase script is updated on all virl servers
                     rpm_pkgs_remove = "vpp*"
                     cmd_u = 'yum -y remove "{0}"'.format(rpm_pkgs_remove)
                     r_rcode, _, r_err = ssh.exec_command_sudo(cmd_u, timeout=90)
-                    if int(r_rcode) != 0:
+                    if int(r_rcode):
                         raise RuntimeError('Failed to remove previous VPP'
                                            'installation on host {0}:\n{1}'
                                            .format(node['host'], r_err))
@@ -539,7 +583,7 @@ class DUTSetup(object):
                                              for pkg in vpp_rpm_pkgs) + "*.rpm"
                     cmd_i = "rpm -ivh {0}".format(rpm_pkgs)
                     ret_code, _, err = ssh.exec_command_sudo(cmd_i, timeout=90)
-                    if int(ret_code) != 0:
+                    if int(ret_code):
                         raise RuntimeError('Failed to install VPP on host {0}:'
                                            '\n{1}'.format(node['host'], err))
                     else:
@@ -552,7 +596,7 @@ class DUTSetup(object):
                     deb_pkgs_remove = "vpp*"
                     cmd_u = 'apt-get purge -y "{0}"'.format(deb_pkgs_remove)
                     r_rcode, _, r_err = ssh.exec_command_sudo(cmd_u, timeout=90)
-                    if int(r_rcode) != 0:
+                    if int(r_rcode):
                         raise RuntimeError('Failed to remove previous VPP'
                                            'installation on host {0}:\n{1}'
                                            .format(node['host'], r_err))
@@ -560,7 +604,7 @@ class DUTSetup(object):
                                              for pkg in vpp_deb_pkgs) + "*.deb"
                     cmd_i = "dpkg -i --force-all {0}".format(deb_pkgs)
                     ret_code, _, err = ssh.exec_command_sudo(cmd_i, timeout=90)
-                    if int(ret_code) != 0:
+                    if int(ret_code):
                         raise RuntimeError('Failed to install VPP on host {0}:'
                                            '\n{1}'.format(node['host'], err))
                     else:
@@ -600,3 +644,169 @@ class DUTSetup(object):
 
         DUTSetup.vpp_show_version_verbose(node)
         DUTSetup.vpp_show_interfaces(node)
+
+    @staticmethod
+    def get_huge_page_size(node):
+        """Get default size of huge pages in system.
+
+        :param node: Node in the topology.
+        :type node: dict
+        :returns: Default size of free huge pages in system.
+        :rtype: int
+        :raises RuntimeError: If reading failed for three times.
+        """
+        ssh = SSH()
+        ssh.connect(node)
+
+        for _ in range(3):
+            ret_code, stdout, _ = ssh.exec_command_sudo(
+                "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
+            if ret_code == 0:
+                try:
+                    huge_size = int(stdout)
+                except ValueError:
+                    logger.trace('Reading huge page size information failed')
+                else:
+                    break
+        else:
+            raise RuntimeError('Getting huge page size information failed.')
+        return huge_size
+
+    @staticmethod
+    def get_huge_page_free(node, huge_size):
+        """Get number of free huge pages in system.
+
+        :param node: Node in the topology.
+        :param huge_size: Size of hugepages.
+        :type node: dict
+        :type huge_size: int
+        :returns: Number of free huge pages in system.
+        :rtype: int
+        :raises RuntimeError: If reading failed for three times.
+        """
+        # TODO: add numa aware option
+        ssh = SSH()
+        ssh.connect(node)
+
+        for _ in range(3):
+            ret_code, stdout, _ = ssh.exec_command_sudo(
+                'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
+                format(huge_size))
+            if ret_code == 0:
+                try:
+                    huge_free = int(stdout)
+                except ValueError:
+                    logger.trace('Reading free huge pages information failed')
+                else:
+                    break
+        else:
+            raise RuntimeError('Getting free huge pages information failed.')
+        return huge_free
+
+    @staticmethod
+    def get_huge_page_total(node, huge_size):
+        """Get total number of huge pages in system.
+
+        :param node: Node in the topology.
+        :param huge_size: Size of hugepages.
+        :type node: dict
+        :type huge_size: int
+
+        :returns: Total number of huge pages in system.
+        :rtype: int
+        :raises RuntimeError: If reading failed for three times.
+        """
+        # TODO: add numa aware option
+        ssh = SSH()
+        ssh.connect(node)
+
+        for _ in range(3):
+            ret_code, stdout, _ = ssh.exec_command_sudo(
+                'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
+                format(huge_size))
+            if ret_code == 0:
+                try:
+                    huge_total = int(stdout)
+                except ValueError:
+                    logger.trace('Reading total huge pages information failed')
+                else:
+                    break
+        else:
+            raise RuntimeError('Getting total huge pages information failed.')
+        return huge_total
+
+    @staticmethod
+    def check_huge_page(node, huge_mnt, mem_size, allocate=False):
+        """Check if there is enough HugePages in system. If allocate is set to
+        true, try to allocate more HugePages.
+
+        :param node: Node in the topology.
+        :param huge_mnt: HugePage mount point.
+        :param mem_size: Requested memory in MB.
+        :param allocate: Whether to allocate more memory if not enough.
+        :type node: dict
+        :type huge_mnt: str
+        :type mem_size: str
+        :type allocate: bool
+
+        :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
+        or increasing map count failed.
+        """
+        # TODO: split function into smaller parts.
+        ssh = SSH()
+        ssh.connect(node)
+
+        # Get huge pages information
+        huge_size = DUTSetup.get_huge_page_size(node)
+        huge_free = DUTSetup.get_huge_page_free(node, huge_size)
+        huge_total = DUTSetup.get_huge_page_total(node, huge_size)
+
+        # Check if memory reqested is available on host
+        if (mem_size * 1024) > (huge_free * huge_size):
+            # If we want to allocate hugepage dynamically
+            if allocate:
+                mem_needed = (mem_size * 1024) - (huge_free * huge_size)
+                huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
+                max_map_count = huge_to_allocate*4
+                # Increase maximum number of memory map areas a process may have
+                ret_code, _, _ = ssh.exec_command_sudo(
+                    'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
+                    format(max_map_count))
+                if int(ret_code) != 0:
+                    raise RuntimeError('Increase map count failed on {host}'.
+                                       format(host=node['host']))
+                # Increase hugepage count
+                ret_code, _, _ = ssh.exec_command_sudo(
+                    'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
+                    format(huge_to_allocate))
+                if int(ret_code) != 0:
+                    raise RuntimeError('Mount huge pages failed on {host}'.
+                                       format(host=node['host']))
+            # If we do not want to allocate dynamicaly end with error
+            else:
+                raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
+                                   format(huge_free, huge_free * huge_size))
+        # Check if huge pages mount point exist
+        has_huge_mnt = False
+        ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
+        if int(ret_code) == 0:
+            for line in stdout.splitlines():
+                # Try to find something like:
+                # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
+                mount = line.split()
+                if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
+                    has_huge_mnt = True
+                    break
+        # If huge page mount point not exist create one
+        if not has_huge_mnt:
+            ret_code, _, _ = ssh.exec_command_sudo(
+                'mkdir -p {mnt}'.format(mnt=huge_mnt))
+            if int(ret_code) != 0:
+                raise RuntimeError('Create mount dir failed on {host}'.
+                                   format(host=node['host']))
+            ret_code, _, _ = ssh.exec_command_sudo(
+                'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
+                format(mnt=huge_mnt))
+            if int(ret_code) != 0:
+                raise RuntimeError('Mount huge pages failed on {host}'.
+                                   format(host=node['host']))