DUTSetup: FIX Format symbol
[csit.git] / resources / libraries / python / DUTSetup.py
index a20b2d7..16acfba 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 """DUT setup library."""
 
+from time import sleep
 from robot.api import logger
 
-from resources.libraries.python.constants import Constants
-from resources.libraries.python.ssh import SSH, exec_cmd_no_error
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
 from resources.libraries.python.topology import NodeType, Topology
 
 
-class DUTSetup(object):
+class DUTSetup:
     """Contains methods for setting up DUTs."""
 
     @staticmethod
@@ -32,20 +33,16 @@ class DUTSetup(object):
         :type node: dict
         :type service: str
         """
-        if DUTSetup.running_in_container(node):
-            command = ('echo $(< /var/log/supervisord.log);'
-                       'echo $(< /tmp/*supervisor*.log)')
-        else:
-            command = ('journalctl --no-pager --unit={name} '
-                       '--since="$(echo `systemctl show -p '
-                       'ActiveEnterTimestamp {name}` | '
-                       'awk \'{{print $2 $3}}\')"'.
-                       format(name=service))
-        message = 'Node {host} failed to get logs from unit {name}'.\
-            format(host=node['host'], name=service)
+        command = u"cat /tmp/*supervisor*.log"\
+            if DUTSetup.running_in_container(node) \
+            else f"journalctl --no-pager _SYSTEMD_INVOCATION_ID=$(systemctl " \
+            f"show -p InvocationID --value {service})"
+
+        message = f"Node {node[u'host']} failed to get logs from unit {service}"
 
-        exec_cmd_no_error(node, command, timeout=30, sudo=True,
-                          message=message)
+        exec_cmd_no_error(
+            node, command, timeout=30, sudo=True, message=message
+        )
 
     @staticmethod
     def get_service_logs_on_all_duts(nodes, service):
@@ -57,9 +54,42 @@ class DUTSetup(object):
         :type service: str
         """
         for node in nodes.values():
-            if node['type'] == NodeType.DUT:
+            if node[u"type"] == NodeType.DUT:
                 DUTSetup.get_service_logs(node, service)
 
+    @staticmethod
+    def restart_service(node, service):
+        """Restart the named service on node.
+
+        :param node: Node in the topology.
+        :param service: Service unit name.
+        :type node: dict
+        :type service: str
+        """
+        command = f"supervisorctl restart {service}" \
+            if DUTSetup.running_in_container(node) \
+            else f"service {service} restart"
+        message = f"Node {node[u'host']} failed to restart service {service}"
+
+        exec_cmd_no_error(
+            node, command, timeout=180, sudo=True, message=message
+        )
+
+        DUTSetup.get_service_logs(node, service)
+
+    @staticmethod
+    def restart_service_on_all_duts(nodes, service):
+        """Restart the named service on all DUTs.
+
+        :param nodes: Nodes in the topology.
+        :param service: Service unit name.
+        :type nodes: dict
+        :type service: str
+        """
+        for node in nodes.values():
+            if node[u"type"] == NodeType.DUT:
+                DUTSetup.restart_service(node, service)
+
     @staticmethod
     def start_service(node, service):
         """Start up the named service on node.
@@ -69,14 +99,15 @@ class DUTSetup(object):
         :type node: dict
         :type service: str
         """
-        if DUTSetup.running_in_container(node):
-            command = 'supervisorctl restart {name}'.format(name=service)
-        else:
-            command = 'service {name} restart'.format(name=service)
-        message = 'Node {host} failed to start service {name}'.\
-            format(host=node['host'], name=service)
+        # TODO: change command to start once all parent function updated.
+        command = f"supervisorctl restart {service}" \
+            if DUTSetup.running_in_container(node) \
+            else f"service {service} restart"
+        message = f"Node {node[u'host']} failed to start service {service}"
 
-        exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
+        exec_cmd_no_error(
+            node, command, timeout=180, sudo=True, message=message
+        )
 
         DUTSetup.get_service_logs(node, service)
 
@@ -84,13 +115,13 @@ class DUTSetup(object):
     def start_service_on_all_duts(nodes, service):
         """Start up the named service on all DUTs.
 
-        :param node: Nodes in the topology.
+        :param nodes: Nodes in the topology.
         :param service: Service unit name.
-        :type node: dict
+        :type nodes: dict
         :type service: str
         """
         for node in nodes.values():
-            if node['type'] == NodeType.DUT:
+            if node[u"type"] == NodeType.DUT:
                 DUTSetup.start_service(node, service)
 
     @staticmethod
@@ -102,64 +133,87 @@ class DUTSetup(object):
         :type node: dict
         :type service: str
         """
-        if DUTSetup.running_in_container(node):
-            command = 'supervisorctl stop {name}'.format(name=service)
-        else:
-            command = 'service {name} stop'.format(name=service)
-        message = 'Node {host} failed to stop service {name}'.\
-            format(host=node['host'], name=service)
+        DUTSetup.get_service_logs(node, service)
 
-        exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
+        command = f"supervisorctl stop {service}" \
+            if DUTSetup.running_in_container(node) \
+            else f"service {service} stop"
+        message = f"Node {node[u'host']} failed to stop service {service}"
 
-        DUTSetup.get_service_logs(node, service)
+        exec_cmd_no_error(
+            node, command, timeout=180, sudo=True, message=message
+        )
 
     @staticmethod
     def stop_service_on_all_duts(nodes, service):
         """Stop the named service on all DUTs.
 
-        :param node: Nodes in the topology.
+        :param nodes: Nodes in the topology.
         :param service: Service unit name.
-        :type node: dict
+        :type nodes: dict
         :type service: str
         """
         for node in nodes.values():
-            if node['type'] == NodeType.DUT:
+            if node[u"type"] == NodeType.DUT:
                 DUTSetup.stop_service(node, service)
 
     @staticmethod
-    def setup_dut(node):
-        """Run script over SSH to setup the DUT node.
+    def kill_program(node, program, namespace=None):
+        """Kill program on the specified topology node.
 
-        :param node: DUT node to set up.
+        :param node: Topology node.
+        :param program: Program name.
+        :param namespace: Namespace program is running in.
         :type node: dict
-
-        :raises Exception: If the DUT setup fails.
+        :type program: str
+        :type namespace: str
         """
-        command = 'bash {0}/{1}/dut_setup.sh'.\
-            format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH)
-        message = 'DUT test setup script failed at node {name}'.\
-            format(name=node['host'])
-
-        exec_cmd_no_error(node, command, timeout=120, sudo=True,
-                          message=message)
+        host = node[u"host"]
+        cmd_timeout = 5
+        if namespace in (None, u"default"):
+            shell_cmd = u"sh -c"
+        else:
+            shell_cmd = f"ip netns exec {namespace} sh -c"
+
+        pgrep_cmd = f"{shell_cmd} \'pgrep -c {program}\'"
+        _, stdout, _ = exec_cmd(node, pgrep_cmd, timeout=cmd_timeout,
+                                sudo=True)
+        if int(stdout) == 0:
+            logger.trace(f"{program} is not running on {host}")
+            return
+        exec_cmd(node, f"{shell_cmd} \'pkill {program}\'",
+                 timeout=cmd_timeout, sudo=True)
+        for attempt in range(5):
+            _, stdout, _ = exec_cmd(node, pgrep_cmd, timeout=cmd_timeout,
+                                    sudo=True)
+            if int(stdout) == 0:
+                logger.trace(f"Attempt {attempt}: {program} is dead on {host}")
+                return
+            sleep(1)
+        logger.trace(f"SIGKILLing {program} on {host}")
+        exec_cmd(node, f"{shell_cmd} \'pkill -9 {program}\'",
+                 timeout=cmd_timeout, sudo=True)
 
     @staticmethod
-    def setup_all_duts(nodes):
-        """Run script over SSH to setup all DUT nodes.
+    def verify_program_installed(node, program):
+        """Verify that program is installed on the specified topology node.
 
-        :param nodes: Topology nodes.
-        :type nodes: dict
+        :param node: Topology node.
+        :param program: Program name.
+        :type node: dict
+        :type program: str
         """
-        for node in nodes.values():
-            if node['type'] == NodeType.DUT:
-                DUTSetup.setup_dut(node)
+        cmd = f"command -v {program}"
+        exec_cmd_no_error(node, cmd, message=f"{program} is not installed")
 
     @staticmethod
-    def get_vpp_pid(node):
-        """Get PID of running VPP process.
+    def get_pid(node, process):
+        """Get PID of running process.
 
         :param node: DUT node.
+        :param process: process name.
         :type node: dict
+        :type process: str
         :returns: PID
         :rtype: int
         :raises RuntimeError: If it is not possible to get the PID.
@@ -167,30 +221,28 @@ class DUTSetup(object):
         ssh = SSH()
         ssh.connect(node)
 
+        retval = None
         for i in range(3):
-            logger.trace('Try {}: Get VPP PID'.format(i))
-            ret_code, stdout, stderr = ssh.exec_command('pidof vpp')
+            logger.trace(f"Try {i}: Get {process} PID")
+            ret_code, stdout, stderr = ssh.exec_command(f"pidof {process}")
 
             if int(ret_code):
-                raise RuntimeError('Not possible to get PID of VPP process '
-                                   'on node: {0}\n {1}'.
-                                   format(node['host'], stdout + stderr))
-
-            if len(stdout.splitlines()) == 1:
-                return int(stdout)
-            elif not stdout.splitlines():
-                logger.debug("No VPP PID found on node {0}".
-                             format(node['host']))
+                raise RuntimeError(
+                    f"Not possible to get PID of {process} process on node: "
+                    f"{node[u'host']}\n {stdout + stderr}"
+                )
+
+            pid_list = stdout.split()
+            if len(pid_list) == 1:
+                return [int(stdout)]
+            if not pid_list:
+                logger.debug(f"No {process} PID found on node {node[u'host']}")
                 continue
-            else:
-                logger.debug("More then one VPP PID found on node {0}".
-                             format(node['host']))
-                ret_list = list()
-                for line in stdout.splitlines():
-                    ret_list.append(int(line))
-                return ret_list
+            logger.debug(f"More than one {process} PID found " \
+                         f"on node {node[u'host']}")
+            retval = [int(pid) for pid in pid_list]
 
-        return None
+        return retval
 
     @staticmethod
     def get_vpp_pids(nodes):
@@ -203,22 +255,24 @@ class DUTSetup(object):
         """
         pids = dict()
         for node in nodes.values():
-            if node['type'] == NodeType.DUT:
-                pids[node['host']] = DUTSetup.get_vpp_pid(node)
+            if node[u"type"] == NodeType.DUT:
+                pids[node[u"host"]] = DUTSetup.get_pid(node, u"vpp")
         return pids
 
     @staticmethod
-    def crypto_device_verify(node, force_init=False, numvfs=32):
+    def crypto_device_verify(node, crypto_type, numvfs, force_init=False):
         """Verify if Crypto QAT device virtual functions are initialized on all
         DUTs. If parameter force initialization is set to True, then try to
         initialize or remove VFs on QAT.
 
         :param node: DUT node.
-        :param force_init: If True then try to initialize to specific value.
+        :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
         :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
+        :param force_init: If True then try to initialize to specific value.
         :type node: dict
-        :type force_init: bool
+        :type crypto_type: string
         :type numvfs: int
+        :type force_init: bool
         :returns: nothing
         :raises RuntimeError: If QAT VFs are not created and force init is set
                               to False.
@@ -229,37 +283,52 @@ class DUTSetup(object):
         if sriov_numvfs != numvfs:
             if force_init:
                 # QAT is not initialized and we want to initialize with numvfs
-                DUTSetup.crypto_device_init(node, numvfs)
+                DUTSetup.crypto_device_init(node, crypto_type, numvfs)
             else:
-                raise RuntimeError('QAT device failed to create VFs on {host}'.
-                                   format(host=node['host']))
+                raise RuntimeError(
+                    f"QAT device failed to create VFs on {node[u'host']}"
+                )
 
     @staticmethod
-    def crypto_device_init(node, numvfs):
+    def crypto_device_init(node, crypto_type, numvfs):
         """Init Crypto QAT device virtual functions on DUT.
 
         :param node: DUT node.
+        :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
         :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
         :type node: dict
+        :type crypto_type: string
         :type numvfs: int
         :returns: nothing
         :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
         """
+        if crypto_type == u"HW_DH895xcc":
+            kernel_mod = u"qat_dh895xcc"
+            kernel_drv = u"dh895xcc"
+        elif crypto_type == u"HW_C3xxx":
+            kernel_mod = u"qat_c3xxx"
+            kernel_drv = u"c3xxx"
+        else:
+            raise RuntimeError(
+                f"Unsupported crypto device type on {node[u'host']}"
+            )
+
         pci_addr = Topology.get_cryptodev(node)
 
         # QAT device must be re-bound to kernel driver before initialization.
-        DUTSetup.verify_kernel_module(node, 'qat_dh895xcc', force_load=True)
+        DUTSetup.verify_kernel_module(node, kernel_mod, force_load=True)
 
         # Stop VPP to prevent deadlock.
         DUTSetup.stop_service(node, Constants.VPP_UNIT)
 
         current_driver = DUTSetup.get_pci_dev_driver(
-            node, pci_addr.replace(':', r'\:'))
+            node, pci_addr.replace(u":", r"\:")
+        )
         if current_driver is not None:
             DUTSetup.pci_driver_unbind(node, pci_addr)
 
         # Bind to kernel driver.
-        DUTSetup.pci_driver_bind(node, pci_addr, 'dh895xcc')
+        DUTSetup.pci_driver_bind(node, pci_addr, kernel_drv)
 
         # Initialize QAT VFs.
         if numvfs > 0:
@@ -276,16 +345,16 @@ class DUTSetup(object):
         :type pf_pci_addr: str
         :type vf_id: int
         :returns: Virtual Function PCI address.
-        :rtype: int
+        :rtype: str
         :raises RuntimeError: If failed to get Virtual Function PCI address.
         """
-        command = "sh -c "\
-            "'basename $(readlink /sys/bus/pci/devices/{pci}/virtfn{vf_id})'".\
-            format(pci=pf_pci_addr, vf_id=vf_id)
-        message = 'Failed to get virtual function PCI address.'
+        command = f"sh -c \"basename $(readlink " \
+            f"/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id})\""
+        message = u"Failed to get virtual function PCI address."
 
-        stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
-                                      message=message)
+        stdout, _ = exec_cmd_no_error(
+            node, command, timeout=30, sudo=True, message=message
+        )
 
         return stdout.strip()
 
@@ -301,19 +370,20 @@ class DUTSetup(object):
         :rtype: int
         :raises RuntimeError: If PCI device is not SR-IOV capable.
         """
-        command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'.\
-            format(pci=pf_pci_addr.replace(':', r'\:'))
-        message = 'PCI device {pci} is not a SR-IOV device.'.\
-            format(pci=pf_pci_addr)
+        pci = pf_pci_addr.replace(u":", r"\:")
+        command = f"cat /sys/bus/pci/devices/{pci}/sriov_numvfs"
+        message = f"PCI device {pf_pci_addr} is not a SR-IOV device."
 
         for _ in range(3):
-            stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
-                                          message=message)
+            stdout, _ = exec_cmd_no_error(
+                node, command, timeout=30, sudo=True, message=message
+            )
             try:
                 sriov_numvfs = int(stdout)
             except ValueError:
-                logger.trace('Reading sriov_numvfs info failed on {host}'.
-                             format(host=node['host']))
+                logger.trace(
+                    f"Reading sriov_numvfs info failed on {node[u'host']}"
+                )
             else:
                 return sriov_numvfs
 
@@ -330,14 +400,29 @@ class DUTSetup(object):
         :type numvfs: int
         :raises RuntimeError: Failed to create VFs on PCI.
         """
-        command = "sh -c "\
-            "'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'".\
-            format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
-        message = 'Failed to create {num} VFs on {pci} device on {host}'.\
-            format(num=numvfs, pci=pf_pci_addr, host=node['host'])
-
-        exec_cmd_no_error(node, command, timeout=120, sudo=True,
-                          message=message)
+        cmd = f"test -f /sys/bus/pci/devices/{pf_pci_addr}/sriov_numvfs"
+        sriov_unsupported, _, _ = exec_cmd(node, cmd)
+        # if sriov_numvfs doesn't exist, then sriov_unsupported != 0
+        if int(sriov_unsupported):
+            if numvfs == 0:
+                # sriov is not supported and we want 0 VFs
+                # no need to do anything
+                return
+
+            raise RuntimeError(
+                f"Can't configure {numvfs} VFs on {pf_pci_addr} device "
+                f"on {node[u'host']} since it doesn't support SR-IOV."
+            )
+
+        pci = pf_pci_addr.replace(u":", r"\:")
+        command = f"sh -c \"echo {numvfs} | " \
+            f"tee /sys/bus/pci/devices/{pci}/sriov_numvfs\""
+        message = f"Failed to create {numvfs} VFs on {pf_pci_addr} device " \
+            f"on {node[u'host']}"
+
+        exec_cmd_no_error(
+            node, command, timeout=120, sudo=True, message=message
+        )
 
     @staticmethod
     def pci_driver_unbind(node, pci_addr):
@@ -349,14 +434,26 @@ class DUTSetup(object):
         :type pci_addr: str
         :raises RuntimeError: If PCI device unbind failed.
         """
-        command = "sh -c "\
-            "'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'".\
-            format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
-        message = 'Failed to unbind PCI device {pci} on {host}'.\
-            format(pci=pci_addr, host=node['host'])
+        pci = pci_addr.replace(u":", r"\:")
+        command = f"sh -c \"echo {pci_addr} | " \
+            f"tee /sys/bus/pci/devices/{pci}/driver/unbind\""
+        message = f"Failed to unbind PCI device {pci_addr} on {node[u'host']}"
 
-        exec_cmd_no_error(node, command, timeout=120, sudo=True,
-                          message=message)
+        exec_cmd_no_error(
+            node, command, timeout=120, sudo=True, message=message
+        )
+
+    @staticmethod
+    def pci_driver_unbind_list(node, *pci_addrs):
+        """Unbind PCI devices from current driver on node.
+
+        :param node: DUT node.
+        :param pci_addrs: PCI device addresses.
+        :type node: dict
+        :type pci_addrs: list
+        """
+        for pci_addr in pci_addrs:
+            DUTSetup.pci_driver_unbind(node, pci_addr)
 
     @staticmethod
     def pci_driver_bind(node, pci_addr, driver):
@@ -370,29 +467,29 @@ class DUTSetup(object):
         :type driver: str
         :raises RuntimeError: If PCI device bind failed.
         """
-        message = 'Failed to bind PCI device {pci} to {driver} on host {host}'.\
-            format(pci=pci_addr, driver=driver, host=node['host'])
+        message = f"Failed to bind PCI device {pci_addr} to {driver} " \
+            f"on host {node[u'host']}"
+        pci = pci_addr.replace(u":", r"\:")
+        command = f"sh -c \"echo {driver} | " \
+            f"tee /sys/bus/pci/devices/{pci}/driver_override\""
 
-        command = "sh -c "\
-            "'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
-            format(driver=driver, pci=pci_addr.replace(':', r'\:'))
+        exec_cmd_no_error(
+            node, command, timeout=120, sudo=True, message=message
+        )
 
-        exec_cmd_no_error(node, command, timeout=120, sudo=True,
-                          message=message)
+        command = f"sh -c \"echo {pci_addr} | " \
+            f"tee /sys/bus/pci/drivers/{driver}/bind\""
 
-        command = "sh -c "\
-            "'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
-            format(pci=pci_addr, driver=driver)
+        exec_cmd_no_error(
+            node, command, timeout=120, sudo=True, message=message
+        )
 
-        exec_cmd_no_error(node, command, timeout=120, sudo=True,
-                          message=message)
+        command = f"sh -c \"echo  | " \
+            f"tee /sys/bus/pci/devices/{pci}/driver_override\""
 
-        command = "sh -c "\
-            "'echo  | tee /sys/bus/pci/devices/{pci}/driver_override'".\
-            format(pci=pci_addr.replace(':', r'\:'))
-
-        exec_cmd_no_error(node, command, timeout=120, sudo=True,
-                          message=message)
+        exec_cmd_no_error(
+            node, command, timeout=120, sudo=True, message=message
+        )
 
     @staticmethod
     def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
@@ -407,18 +504,15 @@ class DUTSetup(object):
         :raises RuntimeError: If Virtual Function unbind failed.
         """
         vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
-        vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
-            format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
-
-        command = "sh -c "\
-            "'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'".\
-            format(vf_pci_addr=vf_pci_addr, vf_path=vf_path)
+        pf_pci = pf_pci_addr.replace(u":", r"\:")
+        vf_path = f"/sys/bus/pci/devices/{pf_pci}/virtfn{vf_id}"
 
-        message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
-            format(vf_pci_addr=vf_pci_addr, host=node['host'])
+        command = f"sh -c \"echo {vf_pci_addr} | tee {vf_path}/driver/unbind\""
+        message = f"Failed to unbind VF {vf_pci_addr} on {node[u'host']}"
 
-        exec_cmd_no_error(node, command, timeout=120, sudo=True,
-                          message=message)
+        exec_cmd_no_error(
+            node, command, timeout=120, sudo=True, message=message
+        )
 
     @staticmethod
     def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
@@ -435,92 +529,52 @@ class DUTSetup(object):
         :raises RuntimeError: If PCI device bind failed.
         """
         vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
-        vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
-            format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
-
-        message = 'Failed to bind VF {vf_pci_addr} to {driver} on {host}'.\
-            format(vf_pci_addr=vf_pci_addr, driver=driver, host=node['host'])
+        pf_pci = pf_pci_addr.replace(u":", r'\:')
+        vf_path = f"/sys/bus/pci/devices/{pf_pci}/virtfn{vf_id}"
 
-        command = "sh -c "\
-            "'echo {driver} | tee {vf_path}/driver_override'".\
-            format(driver=driver, vf_path=vf_path)
+        message = f"Failed to bind VF {vf_pci_addr} to {driver} " \
+            f"on {node[u'host']}"
+        command = f"sh -c \"echo {driver} | tee {vf_path}/driver_override\""
 
-        exec_cmd_no_error(node, command, timeout=120, sudo=True,
-                          message=message)
+        exec_cmd_no_error(
+            node, command, timeout=120, sudo=True, message=message
+        )
 
-        command = "sh -c "\
-            "'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
-            format(vf_pci_addr=vf_pci_addr, driver=driver)
+        command = f"sh -c \"echo {vf_pci_addr} | " \
+            f"tee /sys/bus/pci/drivers/{driver}/bind\""
 
-        exec_cmd_no_error(node, command, timeout=120, sudo=True,
-                          message=message)
+        exec_cmd_no_error(
+            node, command, timeout=120, sudo=True, message=message
+        )
 
-        command = "sh -c "\
-            "'echo  | tee {vf_path}/driver_override'".\
-            format(vf_path=vf_path)
+        command = f"sh -c \"echo  | tee {vf_path}/driver_override\""
 
-        exec_cmd_no_error(node, command, timeout=120, sudo=True,
-                          message=message)
+        exec_cmd_no_error(
+            node, command, timeout=120, sudo=True, message=message
+        )
 
     @staticmethod
     def get_pci_dev_driver(node, pci_addr):
         """Get current PCI device driver on node.
 
-        .. note::
-            # lspci -vmmks 0000:00:05.0
-            Slot:   00:05.0
-            Class:  Ethernet controller
-            Vendor: Red Hat, Inc
-            Device: Virtio network device
-            SVendor:        Red Hat, Inc
-            SDevice:        Device 0001
-            PhySlot:        5
-            Driver: virtio-pci
-
         :param node: DUT node.
         :param pci_addr: PCI device address.
         :type node: dict
         :type pci_addr: str
         :returns: Driver or None
-        :raises RuntimeError: If PCI rescan or lspci command execution failed.
         :raises RuntimeError: If it is not possible to get the interface driver
             information from the node.
         """
-        ssh = SSH()
-        ssh.connect(node)
-
-        for i in range(3):
-            logger.trace('Try number {0}: Get PCI device driver'.format(i))
-
-            cmd = 'lspci -vmmks {0}'.format(pci_addr)
-            ret_code, stdout, _ = ssh.exec_command(cmd)
-            if int(ret_code):
-                raise RuntimeError("'{0}' failed on '{1}'"
-                                   .format(cmd, node['host']))
-
-            for line in stdout.splitlines():
-                if not line:
-                    continue
-                name = None
-                value = None
-                try:
-                    name, value = line.split("\t", 1)
-                except ValueError:
-                    if name == "Driver:":
-                        return None
-                if name == 'Driver:':
-                    return value
-
-            if i < 2:
-                logger.trace('Driver for PCI device {} not found, executing '
-                             'pci rescan and retrying'.format(pci_addr))
-                cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
-                ret_code, _, _ = ssh.exec_command_sudo(cmd)
-                if int(ret_code) != 0:
-                    raise RuntimeError("'{0}' failed on '{1}'"
-                                       .format(cmd, node['host']))
-
-        return None
+        driver_path = f"/sys/bus/pci/devices/{pci_addr}/driver"
+        cmd = f"test -d {driver_path}"
+        ret_code, ret_val, _ = exec_cmd(node, cmd)
+        if int(ret_code):
+            # the directory doesn't exist which means the device is not bound
+            # to any driver
+            return None
+        cmd = f"basename $(readlink -f {driver_path})"
+        ret_val, _ = exec_cmd_no_error(node, cmd)
+        return ret_val.strip()
 
     @staticmethod
     def verify_kernel_module(node, module, force_load=False):
@@ -535,13 +589,14 @@ class DUTSetup(object):
         :type force_load: bool
         :raises RuntimeError: If module is not loaded or failed to load.
         """
-        command = 'grep -w {module} /proc/modules'.format(module=module)
-        message = 'Kernel module {module} is not loaded on host {host}'.\
-            format(module=module, host=node['host'])
+        command = f"grep -w {module} /proc/modules"
+        message = f"Kernel module {module} is not loaded " \
+            f"on host {node[u'host']}"
 
         try:
-            exec_cmd_no_error(node, command, timeout=30, sudo=False,
-                              message=message)
+            exec_cmd_no_error(
+                node, command, timeout=30, sudo=False, message=message
+            )
         except RuntimeError:
             if force_load:
                 # Module is not loaded and we want to load it
@@ -554,15 +609,15 @@ class DUTSetup(object):
         """Verify if kernel module is loaded on all DUTs. If parameter force
         load is set to True, then try to load the modules.
 
-        :param node: DUT nodes.
+        :param nodes: DUT nodes.
         :param module: Module to verify.
         :param force_load: If True then try to load module.
-        :type node: dict
+        :type nodes: dict
         :type module: str
         :type force_load: bool
         """
         for node in nodes.values():
-            if node['type'] == NodeType.DUT:
+            if node[u"type"] == NodeType.DUT:
                 DUTSetup.verify_kernel_module(node, module, force_load)
 
     @staticmethod
@@ -570,11 +625,11 @@ class DUTSetup(object):
         """Verify if uio driver kernel module is loaded on all DUTs. If module
         is not present it will try to load it.
 
-        :param node: DUT nodes.
-        :type node: dict
+        :param nodes: DUT nodes.
+        :type nodes: dict
         """
         for node in nodes.values():
-            if node['type'] == NodeType.DUT:
+            if node[u"type"] == NodeType.DUT:
                 uio_driver = Topology.get_uio_driver(node)
                 DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
 
@@ -589,15 +644,15 @@ class DUTSetup(object):
         :returns: nothing
         :raises RuntimeError: If loading failed.
         """
-        command = 'modprobe {module}'.format(module=module)
-        message = 'Failed to load {module} on host {host}'.\
-            format(module=module, host=node['host'])
+        command = f"modprobe {module}"
+        message = f"Failed to load {module} on host {node[u'host']}"
 
         exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
 
     @staticmethod
     def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
-        """Install VPP on all DUT nodes.
+        """Install VPP on all DUT nodes. Start the VPP service in case of
+        systemd is not available or does not support autostart.
 
         :param nodes: Nodes in the topology.
         :param vpp_pkg_dir: Path to directory where VPP packages are stored.
@@ -606,29 +661,47 @@ class DUTSetup(object):
         :raises RuntimeError: If failed to remove or install VPP.
         """
         for node in nodes.values():
-            message = 'Failed to install VPP on host {host}!'.\
-                format(host=node['host'])
-            if node['type'] == NodeType.DUT:
-                command = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true'
+            message = f"Failed to install VPP on host {node[u'host']}!"
+            if node[u"type"] == NodeType.DUT:
+                command = u"ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true"
                 exec_cmd_no_error(node, command, sudo=True)
 
-                command = '. /etc/lsb-release; echo "${DISTRIB_ID}"'
+                command = u". /etc/lsb-release; echo \"${DISTRIB_ID}\""
                 stdout, _ = exec_cmd_no_error(node, command)
 
-                if stdout.strip() == 'Ubuntu':
-                    exec_cmd_no_error(node, 'apt-get purge -y "*vpp*" || true',
-                                      timeout=120, sudo=True)
-                    exec_cmd_no_error(node, 'dpkg -i --force-all {dir}*.deb'.
-                                      format(dir=vpp_pkg_dir), timeout=120,
-                                      sudo=True, message=message)
-                    exec_cmd_no_error(node, 'dpkg -l | grep vpp', sudo=True)
+                if stdout.strip() == u"Ubuntu":
+                    exec_cmd_no_error(
+                        node, u"apt-get purge -y '*vpp*' || true",
+                        timeout=120, sudo=True
+                    )
+                    # workaround to avoid installation of vpp-api-python
+                    exec_cmd_no_error(
+                        node, f"rm -f {vpp_pkg_dir}vpp-api-python.deb",
+                        timeout=120, sudo=True
+                    )
+                    exec_cmd_no_error(
+                        node, f"dpkg -i --force-all {vpp_pkg_dir}*.deb",
+                        timeout=120, sudo=True, message=message
+                    )
+                    exec_cmd_no_error(node, u"dpkg -l | grep vpp", sudo=True)
+                    if DUTSetup.running_in_container(node):
+                        DUTSetup.restart_service(node, Constants.VPP_UNIT)
                 else:
-                    exec_cmd_no_error(node, 'yum -y remove "*vpp*" || true',
-                                      timeout=120, sudo=True)
-                    exec_cmd_no_error(node, 'rpm -ivh {dir}*.rpm'.
-                                      format(dir=vpp_pkg_dir), timeout=120,
-                                      sudo=True, message=message)
-                    exec_cmd_no_error(node, 'rpm -qai *vpp*', sudo=True)
+                    exec_cmd_no_error(
+                        node, u"yum -y remove '*vpp*' || true",
+                        timeout=120, sudo=True
+                    )
+                    # workaround to avoid installation of vpp-api-python
+                    exec_cmd_no_error(
+                        node, f"rm -f {vpp_pkg_dir}vpp-api-python.rpm",
+                        timeout=120, sudo=True
+                    )
+                    exec_cmd_no_error(
+                        node, f"rpm -ivh {vpp_pkg_dir}*.rpm",
+                        timeout=120, sudo=True, message=message
+                    )
+                    exec_cmd_no_error(node, u"rpm -qai '*vpp*'", sudo=True)
+                    DUTSetup.restart_service(node, Constants.VPP_UNIT)
 
     @staticmethod
     def running_in_container(node):
@@ -637,14 +710,15 @@ class DUTSetup(object):
         :param node: Topology node.
         :type node: dict
         :returns: True if running in docker container, false if not or failed
-        to detect.
+            to detect.
         :rtype: bool
         """
-        command = "fgrep docker /proc/1/cgroup"
-        message = 'Failed to get cgroup settings.'
+        command = u"fgrep docker /proc/1/cgroup"
+        message = u"Failed to get cgroup settings."
         try:
-            exec_cmd_no_error(node, command, timeout=30, sudo=False,
-                              message=message)
+            exec_cmd_no_error(
+                node, command, timeout=30, sudo=False, message=message
+            )
         except RuntimeError:
             return False
         return True
@@ -661,176 +735,106 @@ class DUTSetup(object):
         :rtype: str
         :raises RuntimeError: If getting output failed.
         """
-        command = "docker inspect --format='"\
-            "{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid)
-        message = 'Failed to get directory of {uuid} on host {host}'.\
-            format(uuid=uuid, host=node['host'])
+        command = f"docker inspect " \
+            f"--format='{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}"
+        message = f"Failed to get directory of {uuid} on host {node[u'host']}"
 
         stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
         return stdout.strip()
 
     @staticmethod
-    def get_huge_page_size(node):
-        """Get default size of huge pages in system.
+    def get_hugepages_info(node, hugesize=None):
+        """Get number of huge pages in system.
 
         :param node: Node in the topology.
+        :param hugesize: Size of hugepages. Default system huge size if None.
         :type node: dict
-        :returns: Default size of free huge pages in system.
-        :rtype: int
-        :raises RuntimeError: If reading failed for three times.
-        """
-        ssh = SSH()
-        ssh.connect(node)
-
-        for _ in range(3):
-            ret_code, stdout, _ = ssh.exec_command_sudo(
-                "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
-            if ret_code == 0:
-                try:
-                    huge_size = int(stdout)
-                except ValueError:
-                    logger.trace('Reading huge page size information failed')
-                else:
-                    break
-        else:
-            raise RuntimeError('Getting huge page size information failed.')
-        return huge_size
-
-    @staticmethod
-    def get_huge_page_free(node, huge_size):
-        """Get number of free huge pages in system.
-
-        :param node: Node in the topology.
-        :param huge_size: Size of hugepages.
-        :type node: dict
-        :type huge_size: int
-        :returns: Number of free huge pages in system.
-        :rtype: int
-        :raises RuntimeError: If reading failed for three times.
-        """
-        # TODO: add numa aware option
-        ssh = SSH()
-        ssh.connect(node)
-
-        for _ in range(3):
-            ret_code, stdout, _ = ssh.exec_command_sudo(
-                'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
-                format(huge_size))
-            if ret_code == 0:
-                try:
-                    huge_free = int(stdout)
-                except ValueError:
-                    logger.trace('Reading free huge pages information failed')
-                else:
-                    break
-        else:
-            raise RuntimeError('Getting free huge pages information failed.')
-        return huge_free
-
-    @staticmethod
-    def get_huge_page_total(node, huge_size):
-        """Get total number of huge pages in system.
-
-        :param node: Node in the topology.
-        :param huge_size: Size of hugepages.
-        :type node: dict
-        :type huge_size: int
-
-        :returns: Total number of huge pages in system.
-        :rtype: int
-        :raises RuntimeError: If reading failed for three times.
+        :type hugesize: int
+        :returns: Number of huge pages in system.
+        :rtype: dict
+        :raises RuntimeError: If reading failed.
         """
-        # TODO: add numa aware option
-        ssh = SSH()
-        ssh.connect(node)
-
-        for _ in range(3):
-            ret_code, stdout, _ = ssh.exec_command_sudo(
-                'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
-                format(huge_size))
-            if ret_code == 0:
-                try:
-                    huge_total = int(stdout)
-                except ValueError:
-                    logger.trace('Reading total huge pages information failed')
-                else:
-                    break
-        else:
-            raise RuntimeError('Getting total huge pages information failed.')
-        return huge_total
+        if not hugesize:
+            hugesize = "$(grep Hugepagesize /proc/meminfo | awk '{ print $2 }')"
+        command = f"cat /sys/kernel/mm/hugepages/hugepages-{hugesize}kB/*"
+        stdout, _ = exec_cmd_no_error(node, command)
+        try:
+            line = stdout.splitlines()
+            return {
+                "free_hugepages": int(line[0]),
+                "nr_hugepages": int(line[1]),
+                "nr_hugepages_mempolicy": int(line[2]),
+                "nr_overcommit_hugepages": int(line[3]),
+                "resv_hugepages": int(line[4]),
+                "surplus_hugepages": int(line[5])
+            }
+        except ValueError:
+            logger.trace(u"Reading huge pages information failed!")
 
     @staticmethod
-    def check_huge_page(node, huge_mnt, mem_size, allocate=False):
+    def check_huge_page(
+            node, huge_mnt, mem_size, hugesize=2048, allocate=False):
         """Check if there is enough HugePages in system. If allocate is set to
         true, try to allocate more HugePages.
 
         :param node: Node in the topology.
         :param huge_mnt: HugePage mount point.
-        :param mem_size: Requested memory in MB.
+        :param mem_size: Reqeusted memory in MB.
+        :param hugesize: HugePage size in KB.
         :param allocate: Whether to allocate more memory if not enough.
         :type node: dict
         :type huge_mnt: str
-        :type mem_size: str
+        :type mem_size: int
+        :type hugesize: int
         :type allocate: bool
-
         :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
-        or increasing map count failed.
+            or increasing map count failed.
         """
-        # TODO: split function into smaller parts.
-        ssh = SSH()
-        ssh.connect(node)
-
-        # Get huge pages information
-        huge_size = DUTSetup.get_huge_page_size(node)
-        huge_free = DUTSetup.get_huge_page_free(node, huge_size)
-        huge_total = DUTSetup.get_huge_page_total(node, huge_size)
+        # Get huge pages information.
+        hugepages = DUTSetup.get_hugepages_info(node, hugesize=hugesize)
+
+        # Check if hugepages requested are available on node.
+        if hugepages[u"nr_overcommit_hugepages"]:
+            # If overcommit is used, we need to know how many additional pages
+            # we can allocate
+            huge_available = hugepages[u"nr_overcommit_hugepages"] - \
+                hugepages[u"surplus_hugepages"]
+        else:
+            # Fallbacking to free_hugepages which were used before to detect.
+            huge_available = hugepages[u"free_hugepages"]
 
-        # Check if memory reqested is available on host
-        if (mem_size * 1024) > (huge_free * huge_size):
-            # If we want to allocate hugepage dynamically
+        if ((mem_size * 1024) // hugesize) > huge_available:
+            # If we want to allocate hugepage dynamically.
             if allocate:
-                mem_needed = (mem_size * 1024) - (huge_free * huge_size)
-                huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
-                max_map_count = huge_to_allocate*4
-                # Increase maximum number of memory map areas a process may have
-                ret_code, _, _ = ssh.exec_command_sudo(
-                    'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
-                    format(max_map_count))
-                if int(ret_code) != 0:
-                    raise RuntimeError('Increase map count failed on {host}'.
-                                       format(host=node['host']))
-                # Increase hugepage count
-                ret_code, _, _ = ssh.exec_command_sudo(
-                    'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
-                    format(huge_to_allocate))
-                if int(ret_code) != 0:
-                    raise RuntimeError('Mount huge pages failed on {host}'.
-                                       format(host=node['host']))
-            # If we do not want to allocate dynamicaly end with error
+                huge_needed = ((mem_size * 1024) // hugesize) - huge_available
+                huge_to_allocate = huge_needed + hugepages[u"nr_hugepages"]
+                max_map_count = huge_to_allocate * 4
+                # Check if huge pages mount point exist.
+                try:
+                    exec_cmd_no_error(node, u"fgrep 'hugetlbfs' /proc/mounts")
+                except RuntimeError:
+                    exec_cmd_no_error(node, f"mkdir -p {huge_mnt}", sudo=True)
+                    exec_cmd_no_error(
+                        node,
+                        f"mount -t hugetlbfs -o pagesize={hugesize}k none "
+                        f"{huge_mnt}",
+                        sudo=True)
+                # Increase maximum number of memory map areas for process.
+                exec_cmd_no_error(
+                    node,
+                    f"echo \"{max_map_count}\" | "
+                    f"sudo tee /proc/sys/vm/max_map_count",
+                    message=f"Increase map count failed on {node[u'host']}!"
+                )
+                # Increase hugepage count.
+                exec_cmd_no_error(
+                    node,
+                    f"echo \"{huge_to_allocate}\" | "
+                    f"sudo tee /proc/sys/vm/nr_hugepages",
+                    message=f"Mount huge pages failed on {node[u'host']}!"
+                )
+            # If we do not want to allocate dynamically end with error.
             else:
-                raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
-                                   format(huge_free, huge_free * huge_size))
-        # Check if huge pages mount point exist
-        has_huge_mnt = False
-        ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
-        if int(ret_code) == 0:
-            for line in stdout.splitlines():
-                # Try to find something like:
-                # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
-                mount = line.split()
-                if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
-                    has_huge_mnt = True
-                    break
-        # If huge page mount point not exist create one
-        if not has_huge_mnt:
-            ret_code, _, _ = ssh.exec_command_sudo(
-                'mkdir -p {mnt}'.format(mnt=huge_mnt))
-            if int(ret_code) != 0:
-                raise RuntimeError('Create mount dir failed on {host}'.
-                                   format(host=node['host']))
-            ret_code, _, _ = ssh.exec_command_sudo(
-                'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
-                format(mnt=huge_mnt))
-            if int(ret_code) != 0:
-                raise RuntimeError('Mount huge pages failed on {host}'.
-                                   format(host=node['host']))
+                raise RuntimeError(
+                    f"Not enough availablehuge pages: {huge_available}!"
+                )