FIX: VPP PIDs can also be separated by spaces not only by line breaks
[csit.git] / resources / libraries / python / DUTSetup.py
index ca37d9e..631bff4 100644 (file)
 
 """DUT setup library."""
 
-import os
-
 from robot.api import logger
 
-from resources.libraries.python.topology import NodeType, Topology
-from resources.libraries.python.ssh import SSH
 from resources.libraries.python.constants import Constants
-from resources.libraries.python.VatExecutor import VatExecutor
-from resources.libraries.python.VPPUtil import VPPUtil
+from resources.libraries.python.ssh import SSH, exec_cmd_no_error
+from resources.libraries.python.topology import NodeType, Topology
 
 
 class DUTSetup(object):
     """Contains methods for setting up DUTs."""
-    @staticmethod
-    def start_vpp_service_on_all_duts(nodes):
-        """Start up the VPP service on all nodes.
-
-        :param nodes: Nodes in the topology.
-        :type nodes: dict
-        """
-        ssh = SSH()
-        for node in nodes.values():
-            if node['type'] == NodeType.DUT:
-                ssh.connect(node)
-                (ret_code, stdout, stderr) = \
-                    ssh.exec_command_sudo('service vpp restart', timeout=120)
-                if int(ret_code) != 0:
-                    raise Exception('DUT {0} failed to start VPP service'.
-                                    format(node['host']))
 
     @staticmethod
-    def vpp_show_version_verbose(node):
-        """Run "show version verbose" CLI command.
+    def get_service_logs(node, service):
+        """Get specific service unit logs from node.
 
-        :param node: Node to run command on.
+        :param node: Node in the topology.
+        :param service: Service unit name.
         :type node: dict
+        :type service: str
         """
-        vat = VatExecutor()
-        vat.execute_script("show_version_verbose.vat", node, json_out=False)
+        if DUTSetup.running_in_container(node):
+            command = ('echo $(< /var/log/supervisord.log);'
+                       'echo $(< /tmp/*supervisor*.log)')
+        else:
+            command = ('journalctl --no-pager --unit={name} '
+                       '--since="$(echo `systemctl show -p '
+                       'ActiveEnterTimestamp {name}` | '
+                       'awk \'{{print $2 $3}}\')"'.
+                       format(name=service))
+        message = 'Node {host} failed to get logs from unit {name}'.\
+            format(host=node['host'], name=service)
 
-        try:
-            vat.script_should_have_passed()
-        except AssertionError:
-            raise RuntimeError('Failed to get VPP version on host: {}'.
-                               format(node['host']))
+        exec_cmd_no_error(node, command, timeout=30, sudo=True,
+                          message=message)
 
     @staticmethod
-    def show_vpp_version_on_all_duts(nodes):
-        """Show VPP version verbose on all DUTs.
+    def get_service_logs_on_all_duts(nodes, service):
+        """Get specific service unit logs from all DUTs.
 
-        :param nodes: VPP nodes
+        :param nodes: Nodes in the topology.
+        :param service: Service unit name.
         :type nodes: dict
+        :type service: str
         """
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
-                DUTSetup.vpp_show_version_verbose(node)
+                DUTSetup.get_service_logs(node, service)
 
     @staticmethod
-    def vpp_show_interfaces(node):
-        """Run "show interface" CLI command.
+    def start_service(node, service):
+        """Start up the named service on node.
 
-        :param node: Node to run command on.
+        :param node: Node in the topology.
+        :param service: Service unit name.
         :type node: dict
+        :type service: str
         """
-        vat = VatExecutor()
-        vat.execute_script("show_interface.vat", node, json_out=False)
+        if DUTSetup.running_in_container(node):
+            command = 'supervisorctl restart {name}'.format(name=service)
+        else:
+            command = 'service {name} restart'.format(name=service)
+        message = 'Node {host} failed to start service {name}'.\
+            format(host=node['host'], name=service)
 
-        try:
-            vat.script_should_have_passed()
-        except AssertionError:
-            raise RuntimeError('Failed to get VPP interfaces on host: {}'.
-                               format(node['host']))
+        exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
+
+        DUTSetup.get_service_logs(node, service)
 
     @staticmethod
-    def vpp_api_trace_save(node):
-        """Run "api trace save" CLI command.
+    def start_service_on_all_duts(nodes, service):
+        """Start up the named service on all DUTs.
 
-        :param node: Node to run command on.
+        :param node: Nodes in the topology.
+        :param service: Service unit name.
         :type node: dict
+        :type service: str
         """
-        vat = VatExecutor()
-        vat.execute_script("api_trace_save.vat", node, json_out=False)
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                DUTSetup.start_service(node, service)
 
     @staticmethod
-    def vpp_api_trace_dump(node):
-        """Run "api trace custom-dump" CLI command.
+    def stop_service(node, service):
+        """Stop the named service on node.
 
-        :param node: Node to run command on.
+        :param node: Node in the topology.
+        :param service: Service unit name.
         :type node: dict
+        :type service: str
         """
-        vat = VatExecutor()
-        vat.execute_script("api_trace_dump.vat", node, json_out=False)
+        if DUTSetup.running_in_container(node):
+            command = 'supervisorctl stop {name}'.format(name=service)
+        else:
+            command = 'service {name} stop'.format(name=service)
+        message = 'Node {host} failed to stop service {name}'.\
+            format(host=node['host'], name=service)
+
+        exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
+
+        DUTSetup.get_service_logs(node, service)
 
     @staticmethod
-    def setup_all_duts(nodes):
-        """Prepare all DUTs in given topology for test execution."""
+    def stop_service_on_all_duts(nodes, service):
+        """Stop the named service on all DUTs.
+
+        :param node: Nodes in the topology.
+        :param service: Service unit name.
+        :type node: dict
+        :type service: str
+        """
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
-                DUTSetup.setup_dut(node)
+                DUTSetup.stop_service(node, service)
 
     @staticmethod
     def setup_dut(node):
@@ -122,18 +135,24 @@ class DUTSetup(object):
 
         :raises Exception: If the DUT setup fails.
         """
-        ssh = SSH()
-        ssh.connect(node)
+        command = 'bash {0}/{1}/dut_setup.sh'.\
+            format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH)
+        message = 'DUT test setup script failed at node {name}'.\
+            format(name=node['host'])
 
-        (ret_code, stdout, stderr) = \
-            ssh.exec_command('sudo -Sn bash {0}/{1}/dut_setup.sh'.
-                             format(Constants.REMOTE_FW_DIR,
-                                    Constants.RESOURCES_LIB_SH), timeout=120)
-        if int(ret_code) != 0:
-            logger.debug('DUT {0} setup script failed: "{1}"'.
-                         format(node['host'], stdout + stderr))
-            raise Exception('DUT test setup script failed at node {}'.
-                            format(node['host']))
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
+
+    @staticmethod
+    def setup_all_duts(nodes):
+        """Run script over SSH to setup all DUT nodes.
+
+        :param nodes: Topology nodes.
+        :type nodes: dict
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                DUTSetup.setup_dut(node)
 
     @staticmethod
     def get_vpp_pid(node):
@@ -143,9 +162,8 @@ class DUTSetup(object):
         :type node: dict
         :returns: PID
         :rtype: int
-        :raises RuntimeError if it is not possible to get the PID.
+        :raises RuntimeError: If it is not possible to get the PID.
         """
-
         ssh = SSH()
         ssh.connect(node)
 
@@ -153,24 +171,22 @@ class DUTSetup(object):
             logger.trace('Try {}: Get VPP PID'.format(i))
             ret_code, stdout, stderr = ssh.exec_command('pidof vpp')
 
-            if int(ret_code) != 0:
+            if int(ret_code):
                 raise RuntimeError('Not possible to get PID of VPP process '
                                    'on node: {0}\n {1}'.
                                    format(node['host'], stdout + stderr))
 
-            if len(stdout.splitlines()) == 1:
+            pid_list = stdout.split()
+            if len(pid_list) == 1:
                 return int(stdout)
-            elif len(stdout.splitlines()) == 0:
+            elif not pid_list:
                 logger.debug("No VPP PID found on node {0}".
                              format(node['host']))
                 continue
             else:
                 logger.debug("More then one VPP PID found on node {0}".
                              format(node['host']))
-                ret_list = list()
-                for line in stdout.splitlines():
-                    ret_list.append(int(line))
-                return ret_list
+                return [int(pid) for pid in pid_list]
 
         return None
 
@@ -183,29 +199,17 @@ class DUTSetup(object):
         :returns: PIDs
         :rtype: dict
         """
-
         pids = dict()
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
                 pids[node['host']] = DUTSetup.get_vpp_pid(node)
         return pids
 
-    @staticmethod
-    def vpp_show_crypto_device_mapping(node):
-        """Run "show crypto device mapping" CLI command.
-
-        :param node: Node to run command on.
-        :type node: dict
-        """
-        vat = VatExecutor()
-        vat.execute_script("show_crypto_device_mapping.vat", node,
-                           json_out=False)
-
     @staticmethod
     def crypto_device_verify(node, force_init=False, numvfs=32):
         """Verify if Crypto QAT device virtual functions are initialized on all
         DUTs. If parameter force initialization is set to True, then try to
-        initialize or disable QAT.
+        initialize or remove VFs on QAT.
 
         :param node: DUT node.
         :param force_init: If True then try to initialize to specific value.
@@ -214,37 +218,19 @@ class DUTSetup(object):
         :type force_init: bool
         :type numvfs: int
         :returns: nothing
-        :raises RuntimeError: If QAT is not initialized or failed to initialize.
+        :raises RuntimeError: If QAT VFs are not created and force init is set
+                              to False.
         """
+        pci_addr = Topology.get_cryptodev(node)
+        sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
 
-        ssh = SSH()
-        ssh.connect(node)
-
-        cryptodev = Topology.get_cryptodev(node)
-        cmd = 'cat /sys/bus/pci/devices/{0}/sriov_numvfs'.\
-            format(cryptodev.replace(':', r'\:'))
-
-        # Try to read number of VFs from PCI address of QAT device
-        for _ in range(3):
-            ret_code, stdout, _ = ssh.exec_command(cmd)
-            if int(ret_code) == 0:
-                try:
-                    sriov_numvfs = int(stdout)
-                except ValueError:
-                    logger.trace('Reading sriov_numvfs info failed on {0}'.
-                                 format(node['host']))
-                else:
-                    if sriov_numvfs != numvfs:
-                        if force_init:
-                            # QAT is not initialized and we want to initialize
-                            # with numvfs
-                            DUTSetup.crypto_device_init(node, numvfs)
-                        else:
-                            raise RuntimeError('QAT device {0} is not '
-                                               'initialized to {1} on host {2}'
-                                               .format(cryptodev, numvfs,
-                                                       node['host']))
-                    break
+        if sriov_numvfs != numvfs:
+            if force_init:
+                # QAT is not initialized and we want to initialize with numvfs
+                DUTSetup.crypto_device_init(node, numvfs)
+            else:
+                raise RuntimeError('QAT device failed to create VFs on {host}'.
+                                   format(host=node['host']))
 
     @staticmethod
     def crypto_device_init(node, numvfs):
@@ -257,33 +243,99 @@ class DUTSetup(object):
         :returns: nothing
         :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
         """
-        cryptodev = Topology.get_cryptodev(node)
+        pci_addr = Topology.get_cryptodev(node)
 
-        # QAT device must be re-bound to kernel driver before initialization
-        driver = 'dh895xcc'
-        kernel_module = 'qat_dh895xcc'
-        current_driver = DUTSetup.get_pci_dev_driver(
-            node, cryptodev.replace(':', r'\:'))
+        # QAT device must be re-bound to kernel driver before initialization.
+        DUTSetup.verify_kernel_module(node, 'qat_dh895xcc', force_load=True)
 
-        DUTSetup.kernel_module_verify(node, kernel_module, force_load=True)
+        # Stop VPP to prevent deadlock.
+        DUTSetup.stop_service(node, Constants.VPP_UNIT)
 
-        VPPUtil.stop_vpp_service(node)
+        current_driver = DUTSetup.get_pci_dev_driver(
+            node, pci_addr.replace(':', r'\:'))
         if current_driver is not None:
-            DUTSetup.pci_driver_unbind(node, cryptodev)
-        DUTSetup.pci_driver_bind(node, cryptodev, driver)
+            DUTSetup.pci_driver_unbind(node, pci_addr)
 
-        ssh = SSH()
-        ssh.connect(node)
+        # Bind to kernel driver.
+        DUTSetup.pci_driver_bind(node, pci_addr, 'dh895xcc')
 
-        # Initialize QAT VFs
+        # Initialize QAT VFs.
         if numvfs > 0:
-            cmd = 'echo "{0}" | tee /sys/bus/pci/devices/{1}/sriov_numvfs'.\
-                format(numvfs, cryptodev.replace(':', r'\:'), timeout=180)
-            ret_code, _, _ = ssh.exec_command_sudo("sh -c '{0}'".format(cmd))
+            DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
 
-            if int(ret_code) != 0:
-                raise RuntimeError('Failed to initialize {0} VFs on QAT device '
-                                   ' on host {1}'.format(numvfs, node['host']))
+    @staticmethod
+    def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
+        """Get PCI address of Virtual Function.
+
+        :param node: DUT node.
+        :param pf_pci_addr: Physical Function PCI address.
+        :param vf_id: Virtual Function number.
+        :type node: dict
+        :type pf_pci_addr: str
+        :type vf_id: int
+        :returns: Virtual Function PCI address.
+        :rtype: int
+        :raises RuntimeError: If failed to get Virtual Function PCI address.
+        """
+        command = "sh -c "\
+            "'basename $(readlink /sys/bus/pci/devices/{pci}/virtfn{vf_id})'".\
+            format(pci=pf_pci_addr, vf_id=vf_id)
+        message = 'Failed to get virtual function PCI address.'
+
+        stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
+                                      message=message)
+
+        return stdout.strip()
+
+    @staticmethod
+    def get_sriov_numvfs(node, pf_pci_addr):
+        """Get number of SR-IOV VFs.
+
+        :param node: DUT node.
+        :param pf_pci_addr: Physical Function PCI device address.
+        :type node: dict
+        :type pf_pci_addr: str
+        :returns: Number of VFs.
+        :rtype: int
+        :raises RuntimeError: If PCI device is not SR-IOV capable.
+        """
+        command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'.\
+            format(pci=pf_pci_addr.replace(':', r'\:'))
+        message = 'PCI device {pci} is not a SR-IOV device.'.\
+            format(pci=pf_pci_addr)
+
+        for _ in range(3):
+            stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
+                                          message=message)
+            try:
+                sriov_numvfs = int(stdout)
+            except ValueError:
+                logger.trace('Reading sriov_numvfs info failed on {host}'.
+                             format(host=node['host']))
+            else:
+                return sriov_numvfs
+
+    @staticmethod
+    def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
+        """Init or reset SR-IOV virtual functions by setting its number on PCI
+        device on DUT. Setting to zero removes all VFs.
+
+        :param node: DUT node.
+        :param pf_pci_addr: Physical Function PCI device address.
+        :param numvfs: Number of VFs to initialize, 0 - removes the VFs.
+        :type node: dict
+        :type pf_pci_addr: str
+        :type numvfs: int
+        :raises RuntimeError: Failed to create VFs on PCI.
+        """
+        command = "sh -c "\
+            "'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'".\
+            format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
+        message = 'Failed to create {num} VFs on {pci} device on {host}'.\
+            format(num=numvfs, pci=pf_pci_addr, host=node['host'])
+
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
     @staticmethod
     def pci_driver_unbind(node, pci_addr):
@@ -293,20 +345,16 @@ class DUTSetup(object):
         :param pci_addr: PCI device address.
         :type node: dict
         :type pci_addr: str
-        :returns: nothing
         :raises RuntimeError: If PCI device unbind failed.
         """
+        command = "sh -c "\
+            "'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'".\
+            format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
+        message = 'Failed to unbind PCI device {pci} on {host}'.\
+            format(pci=pci_addr, host=node['host'])
 
-        ssh = SSH()
-        ssh.connect(node)
-
-        ret_code, _, _ = ssh.exec_command_sudo(
-            "sh -c 'echo {0} | tee /sys/bus/pci/devices/{1}/driver/unbind'"
-            .format(pci_addr, pci_addr.replace(':', r'\:')), timeout=180)
-
-        if int(ret_code) != 0:
-            raise RuntimeError('Failed to unbind PCI device {0} from driver on '
-                               'host {1}'.format(pci_addr, node['host']))
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
     @staticmethod
     def pci_driver_bind(node, pci_addr, driver):
@@ -318,52 +366,138 @@ class DUTSetup(object):
         :type node: dict
         :type pci_addr: str
         :type driver: str
-        :returns: nothing
         :raises RuntimeError: If PCI device bind failed.
         """
+        message = 'Failed to bind PCI device {pci} to {driver} on host {host}'.\
+            format(pci=pci_addr, driver=driver, host=node['host'])
 
-        ssh = SSH()
-        ssh.connect(node)
+        command = "sh -c "\
+            "'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
+            format(driver=driver, pci=pci_addr.replace(':', r'\:'))
+
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
-        ret_code, _, _ = ssh.exec_command_sudo(
-            "sh -c 'echo {0} | tee /sys/bus/pci/drivers/{1}/bind'".format(
-                pci_addr, driver), timeout=180)
+        command = "sh -c "\
+            "'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
+            format(pci=pci_addr, driver=driver)
 
-        if int(ret_code) != 0:
-            raise RuntimeError('Failed to bind PCI device {0} to {1} driver on '
-                               'host {2}'.format(pci_addr, driver,
-                                                 node['host']))
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
+
+        command = "sh -c "\
+            "'echo  | tee /sys/bus/pci/devices/{pci}/driver_override'".\
+            format(pci=pci_addr.replace(':', r'\:'))
+
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
+
+    @staticmethod
+    def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
+        """Unbind Virtual Function from driver on node.
+
+        :param node: DUT node.
+        :param pf_pci_addr: PCI device address.
+        :param vf_id: Virtual Function ID.
+        :type node: dict
+        :type pf_pci_addr: str
+        :type vf_id: int
+        :raises RuntimeError: If Virtual Function unbind failed.
+        """
+        vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
+        vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
+            format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
+
+        command = "sh -c "\
+            "'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'".\
+            format(vf_pci_addr=vf_pci_addr, vf_path=vf_path)
+
+        message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
+            format(vf_pci_addr=vf_pci_addr, host=node['host'])
+
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
+
+    @staticmethod
+    def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
+        """Bind Virtual Function to driver on node.
+
+        :param node: DUT node.
+        :param pf_pci_addr: PCI device address.
+        :param vf_id: Virtual Function ID.
+        :param driver: Driver to bind.
+        :type node: dict
+        :type pf_pci_addr: str
+        :type vf_id: int
+        :type driver: str
+        :raises RuntimeError: If PCI device bind failed.
+        """
+        vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
+        vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
+            format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
+
+        message = 'Failed to bind VF {vf_pci_addr} to {driver} on {host}'.\
+            format(vf_pci_addr=vf_pci_addr, driver=driver, host=node['host'])
+
+        command = "sh -c "\
+            "'echo {driver} | tee {vf_path}/driver_override'".\
+            format(driver=driver, vf_path=vf_path)
+
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
+
+        command = "sh -c "\
+            "'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
+            format(vf_pci_addr=vf_pci_addr, driver=driver)
+
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
+
+        command = "sh -c "\
+            "'echo  | tee {vf_path}/driver_override'".\
+            format(vf_path=vf_path)
+
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
     @staticmethod
     def get_pci_dev_driver(node, pci_addr):
         """Get current PCI device driver on node.
 
+        .. note::
+            # lspci -vmmks 0000:00:05.0
+            Slot:   00:05.0
+            Class:  Ethernet controller
+            Vendor: Red Hat, Inc
+            Device: Virtio network device
+            SVendor:        Red Hat, Inc
+            SDevice:        Device 0001
+            PhySlot:        5
+            Driver: virtio-pci
+
         :param node: DUT node.
         :param pci_addr: PCI device address.
         :type node: dict
         :type pci_addr: str
         :returns: Driver or None
         :raises RuntimeError: If PCI rescan or lspci command execution failed.
+        :raises RuntimeError: If it is not possible to get the interface driver
+            information from the node.
         """
         ssh = SSH()
         ssh.connect(node)
 
         for i in range(3):
-            logger.trace('Try {0}: Get interface driver'.format(i))
-            cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
-            ret_code, _, _ = ssh.exec_command_sudo(cmd)
-            if int(ret_code) != 0:
-                raise RuntimeError("'{0}' failed on '{1}'"
-                                   .format(cmd, node['host']))
+            logger.trace('Try number {0}: Get PCI device driver'.format(i))
 
             cmd = 'lspci -vmmks {0}'.format(pci_addr)
             ret_code, stdout, _ = ssh.exec_command(cmd)
-            if int(ret_code) != 0:
+            if int(ret_code):
                 raise RuntimeError("'{0}' failed on '{1}'"
                                    .format(cmd, node['host']))
 
             for line in stdout.splitlines():
-                if len(line) == 0:
+                if not line:
                     continue
                 name = None
                 value = None
@@ -374,179 +508,327 @@ class DUTSetup(object):
                         return None
                 if name == 'Driver:':
                     return value
-        else:
-            return None
+
+            if i < 2:
+                logger.trace('Driver for PCI device {} not found, executing '
+                             'pci rescan and retrying'.format(pci_addr))
+                cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
+                ret_code, _, _ = ssh.exec_command_sudo(cmd)
+                if int(ret_code) != 0:
+                    raise RuntimeError("'{0}' failed on '{1}'"
+                                       .format(cmd, node['host']))
+
+        return None
 
     @staticmethod
-    def kernel_module_verify(node, module, force_load=False):
-        """Verify if kernel module is loaded on all DUTs. If parameter force
+    def verify_kernel_module(node, module, force_load=False):
+        """Verify if kernel module is loaded on node. If parameter force
         load is set to True, then try to load the modules.
 
-        :param node: DUT node.
+        :param node: Node.
         :param module: Module to verify.
         :param force_load: If True then try to load module.
         :type node: dict
         :type module: str
         :type force_load: bool
-        :returns: nothing
         :raises RuntimeError: If module is not loaded or failed to load.
         """
+        command = 'grep -w {module} /proc/modules'.format(module=module)
+        message = 'Kernel module {module} is not loaded on host {host}'.\
+            format(module=module, host=node['host'])
 
-        ssh = SSH()
-        ssh.connect(node)
-
-        cmd = 'grep -w {0} /proc/modules'.format(module)
-        ret_code, _, _ = ssh.exec_command(cmd)
-
-        if int(ret_code) != 0:
+        try:
+            exec_cmd_no_error(node, command, timeout=30, sudo=False,
+                              message=message)
+        except RuntimeError:
             if force_load:
                 # Module is not loaded and we want to load it
-                DUTSetup.kernel_module_load(node, module)
+                DUTSetup.load_kernel_module(node, module)
             else:
-                raise RuntimeError('Kernel module {0} is not loaded on host '
-                                   '{1}'.format(module, node['host']))
+                raise
 
     @staticmethod
-    def kernel_module_load(node, module):
-        """Load kernel module on node.
+    def verify_kernel_module_on_all_duts(nodes, module, force_load=False):
+        """Verify if kernel module is loaded on all DUTs. If parameter force
+        load is set to True, then try to load the modules.
 
-        :param node: DUT node.
-        :param module: Module to load.
+        :param node: DUT nodes.
+        :param module: Module to verify.
+        :param force_load: If True then try to load module.
         :type node: dict
         :type module: str
-        :returns: nothing
-        :raises RuntimeError: If loading failed.
+        :type force_load: bool
         """
-
-        ssh = SSH()
-        ssh.connect(node)
-
-        ret_code, _, _ = ssh.exec_command_sudo("modprobe {0}".format(module))
-
-        if int(ret_code) != 0:
-            raise RuntimeError('Failed to load {0} kernel module on host {1}'.
-                               format(module, node['host']))
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                DUTSetup.verify_kernel_module(node, module, force_load)
 
     @staticmethod
-    def vpp_enable_traces_on_all_duts(nodes):
-        """Enable vpp packet traces on all DUTs in the given topology.
+    def verify_uio_driver_on_all_duts(nodes):
+        """Verify if uio driver kernel module is loaded on all DUTs. If module
+        is not present it will try to load it.
 
-        :param nodes: Nodes in the topology.
-        :type nodes: dict
+        :param node: DUT nodes.
+        :type node: dict
         """
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
-                DUTSetup.vpp_enable_traces_on_dut(node)
+                uio_driver = Topology.get_uio_driver(node)
+                DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
 
     @staticmethod
-    def vpp_enable_traces_on_dut(node):
-        """Enable vpp packet traces on the DUT node.
+    def load_kernel_module(node, module):
+        """Load kernel module on node.
 
-        :param node: DUT node to set up.
+        :param node: DUT node.
+        :param module: Module to load.
         :type node: dict
+        :type module: str
+        :returns: nothing
+        :raises RuntimeError: If loading failed.
         """
+        command = 'modprobe {module}'.format(module=module)
+        message = 'Failed to load {module} on host {host}'.\
+            format(module=module, host=node['host'])
 
-        vat = VatExecutor()
-        vat.execute_script("enable_dpdk_traces.vat", node, json_out=False)
-        vat.execute_script("enable_vhost_user_traces.vat", node, json_out=False)
+        exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
 
     @staticmethod
-    def install_vpp_on_all_duts(nodes, vpp_pkg_dir, vpp_rpm_pkgs, vpp_deb_pkgs):
+    def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
         """Install VPP on all DUT nodes.
 
         :param nodes: Nodes in the topology.
         :param vpp_pkg_dir: Path to directory where VPP packages are stored.
-        :param vpp_rpm_pkgs: List of VPP rpm packages to be installed.
-        :param vpp_deb_pkgs: List of VPP deb packages to be installed.
         :type nodes: dict
         :type vpp_pkg_dir: str
-        :type vpp_rpm_pkgs: list
-        :type vpp_deb_pkgs: list
-        :raises: RuntimeError if failed to remove or install VPP
+        :raises RuntimeError: If failed to remove or install VPP.
         """
-
-        logger.debug("Installing VPP")
-
         for node in nodes.values():
+            message = 'Failed to install VPP on host {host}!'.\
+                format(host=node['host'])
             if node['type'] == NodeType.DUT:
-                logger.debug("Installing VPP on node {0}".format(node['host']))
-
-                ssh = SSH()
-                ssh.connect(node)
-
-                if os.path.isfile("/etc/redhat-release"):
-                    # workaroud - uninstall existing vpp installation until
-                    # start-testcase script is updated on all virl servers
-                    rpm_pkgs_remove = " ".join(vpp_rpm_pkgs)
-                    r_rcode, _, r_err = ssh.exec_command_sudo(
-                        "rpm -e {0}".format(rpm_pkgs_remove), timeout=90)
-                    if int(r_rcode) != 0:
-                        raise RuntimeError('Failed to remove previous VPP'
-                                           'installation on host {0}:\n{1}'
-                                           .format(node['host']), r_err)
-
-                    rpm_pkgs = "*.rpm ".join(str(vpp_pkg_dir + pkg)
-                                             for pkg in vpp_rpm_pkgs) + "*.rpm"
-                    ret_code, _, err = ssh.exec_command_sudo(
-                        "rpm -ivh {0}".format(rpm_pkgs), timeout=90)
-                    if int(ret_code) != 0:
-                        raise RuntimeError('Failed to install VPP on host {0}:'
-                                           '\n{1}'.format(node['host']), err)
-                    else:
-                        ssh.exec_command_sudo("rpm -qai vpp*")
-                        logger.info("VPP installed on node {0}".
-                                    format(node['host']))
+                command = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true'
+                exec_cmd_no_error(node, command, sudo=True)
+
+                command = '. /etc/lsb-release; echo "${DISTRIB_ID}"'
+                stdout, _ = exec_cmd_no_error(node, command)
+
+                if stdout.strip() == 'Ubuntu':
+                    exec_cmd_no_error(node, 'apt-get purge -y "*vpp*" || true',
+                                      timeout=120, sudo=True)
+                    exec_cmd_no_error(node, 'dpkg -i --force-all {dir}*.deb'.
+                                      format(dir=vpp_pkg_dir), timeout=120,
+                                      sudo=True, message=message)
+                    exec_cmd_no_error(node, 'dpkg -l | grep vpp', sudo=True)
                 else:
-                    # workaroud - uninstall existing vpp installation until
-                    # start-testcase script is updated on all virl servers
-                    deb_pkgs_remove = " ".join(vpp_deb_pkgs)
-                    r_rcode, _, r_err = ssh.exec_command_sudo(
-                        "dpkg --purge {0}".format(deb_pkgs_remove), timeout=90)
-                    if int(r_rcode) != 0:
-                        raise RuntimeError('Failed to remove previous VPP'
-                                           'installation on host {0}:\n{1}'
-                                           .format(node['host']), r_err)
-                    deb_pkgs = "*.deb ".join(str(vpp_pkg_dir + pkg)
-                                             for pkg in vpp_deb_pkgs) + "*.deb"
-                    ret_code, _, err = ssh.exec_command_sudo(
-                        "dpkg -i --force-all {0}".format(deb_pkgs), timeout=90)
-                    if int(ret_code) != 0:
-                        raise RuntimeError('Failed to install VPP on host {0}:'
-                                           '\n{1}'.format(node['host']), err)
-                    else:
-                        ssh.exec_command_sudo("dpkg -l | grep vpp")
-                        logger.info("VPP installed on node {0}".
-                                    format(node['host']))
-
-                ssh.disconnect(node)
+                    exec_cmd_no_error(node, 'yum -y remove "*vpp*" || true',
+                                      timeout=120, sudo=True)
+                    exec_cmd_no_error(node, 'rpm -ivh {dir}*.rpm'.
+                                      format(dir=vpp_pkg_dir), timeout=120,
+                                      sudo=True, message=message)
+                    exec_cmd_no_error(node, 'rpm -qai *vpp*', sudo=True)
 
     @staticmethod
-    def verify_vpp_on_all_duts(nodes):
-        """Verify that VPP is installed on all DUT nodes.
+    def running_in_container(node):
+        """This method tests if topology node is running inside container.
 
-        :param nodes: Nodes in the topology.
-        :type nodes: dict
+        :param node: Topology node.
+        :type node: dict
+        :returns: True if running in docker container, false if not or failed
+        to detect.
+        :rtype: bool
+        """
+        command = "fgrep docker /proc/1/cgroup"
+        message = 'Failed to get cgroup settings.'
+        try:
+            exec_cmd_no_error(node, command, timeout=30, sudo=False,
+                              message=message)
+        except RuntimeError:
+            return False
+        return True
+
+    @staticmethod
+    def get_docker_mergeddir(node, uuid):
+        """Get Docker overlay for MergedDir diff.
+
+        :param node: DUT node.
+        :param uuid: Docker UUID.
+        :type node: dict
+        :type uuid: str
+        :returns: Docker container MergedDir.
+        :rtype: str
+        :raises RuntimeError: If getting output failed.
         """
+        command = "docker inspect --format='"\
+            "{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid)
+        message = 'Failed to get directory of {uuid} on host {host}'.\
+            format(uuid=uuid, host=node['host'])
 
-        logger.debug("Verify VPP on all DUTs")
+        stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
+        return stdout.strip()
 
-        DUTSetup.start_vpp_service_on_all_duts(nodes)
+    @staticmethod
+    def get_huge_page_size(node):
+        """Get default size of huge pages in system.
 
-        for node in nodes.values():
-            if node['type'] == NodeType.DUT:
-                DUTSetup.verify_vpp_on_dut(node)
+        :param node: Node in the topology.
+        :type node: dict
+        :returns: Default size of free huge pages in system.
+        :rtype: int
+        :raises RuntimeError: If reading failed for three times.
+        """
+        ssh = SSH()
+        ssh.connect(node)
+
+        for _ in range(3):
+            ret_code, stdout, _ = ssh.exec_command_sudo(
+                "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
+            if ret_code == 0:
+                try:
+                    huge_size = int(stdout)
+                except ValueError:
+                    logger.trace('Reading huge page size information failed')
+                else:
+                    break
+        else:
+            raise RuntimeError('Getting huge page size information failed.')
+        return huge_size
 
     @staticmethod
-    def verify_vpp_on_dut(node):
-        """Verify that VPP is installed on DUT node.
+    def get_huge_page_free(node, huge_size):
+        """Get number of free huge pages in system.
 
-        :param node: DUT node.
+        :param node: Node in the topology.
+        :param huge_size: Size of hugepages.
         :type node: dict
-        :raises: RuntimeError if failed to restart VPP, get VPP version or
-        get VPP interfaces
+        :type huge_size: int
+        :returns: Number of free huge pages in system.
+        :rtype: int
+        :raises RuntimeError: If reading failed for three times.
         """
+        # TODO: add numa aware option
+        ssh = SSH()
+        ssh.connect(node)
 
-        logger.debug("Verify VPP on node {0}".format(node['host']))
+        for _ in range(3):
+            ret_code, stdout, _ = ssh.exec_command_sudo(
+                'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
+                format(huge_size))
+            if ret_code == 0:
+                try:
+                    huge_free = int(stdout)
+                except ValueError:
+                    logger.trace('Reading free huge pages information failed')
+                else:
+                    break
+        else:
+            raise RuntimeError('Getting free huge pages information failed.')
+        return huge_free
 
-        DUTSetup.vpp_show_version_verbose(node)
-        DUTSetup.vpp_show_interfaces(node)
+    @staticmethod
+    def get_huge_page_total(node, huge_size):
+        """Get total number of huge pages in system.
+
+        :param node: Node in the topology.
+        :param huge_size: Size of hugepages.
+        :type node: dict
+        :type huge_size: int
+
+        :returns: Total number of huge pages in system.
+        :rtype: int
+        :raises RuntimeError: If reading failed for three times.
+        """
+        # TODO: add numa aware option
+        ssh = SSH()
+        ssh.connect(node)
+
+        for _ in range(3):
+            ret_code, stdout, _ = ssh.exec_command_sudo(
+                'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
+                format(huge_size))
+            if ret_code == 0:
+                try:
+                    huge_total = int(stdout)
+                except ValueError:
+                    logger.trace('Reading total huge pages information failed')
+                else:
+                    break
+        else:
+            raise RuntimeError('Getting total huge pages information failed.')
+        return huge_total
+
+    @staticmethod
+    def check_huge_page(node, huge_mnt, mem_size, allocate=False):
+        """Check if there is enough HugePages in system. If allocate is set to
+        true, try to allocate more HugePages.
+
+        :param node: Node in the topology.
+        :param huge_mnt: HugePage mount point.
+        :param mem_size: Requested memory in MB.
+        :param allocate: Whether to allocate more memory if not enough.
+        :type node: dict
+        :type huge_mnt: str
+        :type mem_size: str
+        :type allocate: bool
+
+        :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
+        or increasing map count failed.
+        """
+        # TODO: split function into smaller parts.
+        ssh = SSH()
+        ssh.connect(node)
+
+        # Get huge pages information
+        huge_size = DUTSetup.get_huge_page_size(node)
+        huge_free = DUTSetup.get_huge_page_free(node, huge_size)
+        huge_total = DUTSetup.get_huge_page_total(node, huge_size)
+
+        # Check if memory reqested is available on host
+        if (mem_size * 1024) > (huge_free * huge_size):
+            # If we want to allocate hugepage dynamically
+            if allocate:
+                mem_needed = (mem_size * 1024) - (huge_free * huge_size)
+                huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
+                max_map_count = huge_to_allocate*4
+                # Increase maximum number of memory map areas a process may have
+                ret_code, _, _ = ssh.exec_command_sudo(
+                    'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
+                    format(max_map_count))
+                if int(ret_code) != 0:
+                    raise RuntimeError('Increase map count failed on {host}'.
+                                       format(host=node['host']))
+                # Increase hugepage count
+                ret_code, _, _ = ssh.exec_command_sudo(
+                    'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
+                    format(huge_to_allocate))
+                if int(ret_code) != 0:
+                    raise RuntimeError('Mount huge pages failed on {host}'.
+                                       format(host=node['host']))
+            # If we do not want to allocate dynamicaly end with error
+            else:
+                raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
+                                   format(huge_free, huge_free * huge_size))
+        # Check if huge pages mount point exist
+        has_huge_mnt = False
+        ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
+        if int(ret_code) == 0:
+            for line in stdout.splitlines():
+                # Try to find something like:
+                # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
+                mount = line.split()
+                if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
+                    has_huge_mnt = True
+                    break
+        # If huge page mount point not exist create one
+        if not has_huge_mnt:
+            ret_code, _, _ = ssh.exec_command_sudo(
+                'mkdir -p {mnt}'.format(mnt=huge_mnt))
+            if int(ret_code) != 0:
+                raise RuntimeError('Create mount dir failed on {host}'.
+                                   format(host=node['host']))
+            ret_code, _, _ = ssh.exec_command_sudo(
+                'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
+                format(mnt=huge_mnt))
+            if int(ret_code) != 0:
+                raise RuntimeError('Mount huge pages failed on {host}'.
+                                   format(host=node['host']))