Fix pylint error in PapiExecutor
[csit.git] / resources / libraries / python / DUTSetup.py
index 84862d4..67edefb 100644 (file)
 
 from robot.api import logger
 
 
 from robot.api import logger
 
-from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.Constants import Constants
 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
-from resources.libraries.python.constants import Constants
-from resources.libraries.python.VatExecutor import VatExecutor
-from resources.libraries.python.VPPUtil import VPPUtil
+from resources.libraries.python.topology import NodeType, Topology
 
 
 class DUTSetup(object):
 
 
 class DUTSetup(object):
@@ -27,28 +25,31 @@ class DUTSetup(object):
 
     @staticmethod
     def get_service_logs(node, service):
 
     @staticmethod
     def get_service_logs(node, service):
-        """Get specific service unit logs by journalctl from node.
+        """Get specific service unit logs from node.
 
         :param node: Node in the topology.
         :param service: Service unit name.
         :type node: dict
         :type service: str
         """
 
         :param node: Node in the topology.
         :param service: Service unit name.
         :type node: dict
         :type service: str
         """
-        ssh = SSH()
-        ssh.connect(node)
-        ret_code, _, _ = \
-            ssh.exec_command_sudo('journalctl --no-pager --unit={name} '
-                                  '--since="$(echo `systemctl show -p '
-                                  'ActiveEnterTimestamp {name}` | '
-                                  'awk \'{{print $2 $3}}\')"'.
-                                  format(name=service))
-        if int(ret_code):
-            raise RuntimeError('DUT {host} failed to get logs from unit {name}'.
-                               format(host=node['host'], name=service))
+        if DUTSetup.running_in_container(node):
+            command = ('echo $(< /var/log/supervisord.log);'
+                       'echo $(< /tmp/*supervisor*.log)')
+        else:
+            command = ('journalctl --no-pager --unit={name} '
+                       '--since="$(echo `systemctl show -p '
+                       'ActiveEnterTimestamp {name}` | '
+                       'awk \'{{print $2 $3}}\')"'.
+                       format(name=service))
+        message = 'Node {host} failed to get logs from unit {name}'.\
+            format(host=node['host'], name=service)
+
+        exec_cmd_no_error(node, command, timeout=30, sudo=True,
+                          message=message)
 
     @staticmethod
     def get_service_logs_on_all_duts(nodes, service):
 
     @staticmethod
     def get_service_logs_on_all_duts(nodes, service):
-        """Get specific service unit logs by journalctl from all DUTs.
+        """Get specific service unit logs from all DUTs.
 
         :param nodes: Nodes in the topology.
         :param service: Service unit name.
 
         :param nodes: Nodes in the topology.
         :param service: Service unit name.
@@ -68,99 +69,62 @@ class DUTSetup(object):
         :type node: dict
         :type service: str
         """
         :type node: dict
         :type service: str
         """
-        ssh = SSH()
-        ssh.connect(node)
-        # We are doing restart. With this we do not care if service
-        # was running or not.
-        ret_code, _, _ = \
-            ssh.exec_command_sudo('service {name} restart'.
-                                  format(name=service), timeout=120)
-        if int(ret_code):
-            raise RuntimeError('DUT {host} failed to start service {name}'.
-                               format(host=node['host'], name=service))
-
-        DUTSetup.get_service_logs(node, service)
+        if DUTSetup.running_in_container(node):
+            command = 'supervisorctl restart {name}'.format(name=service)
+        else:
+            command = 'service {name} restart'.format(name=service)
+        message = 'Node {host} failed to start service {name}'.\
+            format(host=node['host'], name=service)
 
 
-    @staticmethod
-    def start_vpp_service_on_all_duts(nodes):
-        """Start up the VPP service on all nodes.
+        exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
 
 
-        :param nodes: Nodes in the topology.
-        :type nodes: dict
-        """
-        for node in nodes.values():
-            if node['type'] == NodeType.DUT:
-                DUTSetup.start_service(node, Constants.VPP_UNIT)
+        DUTSetup.get_service_logs(node, service)
 
     @staticmethod
 
     @staticmethod
-    def vpp_show_version_verbose(node):
-        """Run "show version verbose" CLI command.
+    def start_service_on_all_duts(nodes, service):
+        """Start up the named service on all DUTs.
 
 
-        :param node: Node to run command on.
+        :param node: Nodes in the topology.
+        :param service: Service unit name.
         :type node: dict
         :type node: dict
-        """
-        vat = VatExecutor()
-        vat.execute_script("show_version_verbose.vat", node, json_out=False)
-
-        try:
-            vat.script_should_have_passed()
-        except AssertionError:
-            raise RuntimeError('Failed to get VPP version on host: {name}'.
-                               format(name=node['host']))
-
-    @staticmethod
-    def show_vpp_version_on_all_duts(nodes):
-        """Show VPP version verbose on all DUTs.
-
-        :param nodes: VPP nodes
-        :type nodes: dict
+        :type service: str
         """
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
         """
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
-                DUTSetup.vpp_show_version_verbose(node)
+                DUTSetup.start_service(node, service)
 
     @staticmethod
 
     @staticmethod
-    def vpp_show_interfaces(node):
-        """Run "show interface" CLI command.
+    def stop_service(node, service):
+        """Stop the named service on node.
 
 
-        :param node: Node to run command on.
+        :param node: Node in the topology.
+        :param service: Service unit name.
         :type node: dict
         :type node: dict
+        :type service: str
         """
         """
-        vat = VatExecutor()
-        vat.execute_script("show_interface.vat", node, json_out=False)
-
-        try:
-            vat.script_should_have_passed()
-        except AssertionError:
-            raise RuntimeError('Failed to get VPP interfaces on host: {name}'.
-                               format(name=node['host']))
+        if DUTSetup.running_in_container(node):
+            command = 'supervisorctl stop {name}'.format(name=service)
+        else:
+            command = 'service {name} stop'.format(name=service)
+        message = 'Node {host} failed to stop service {name}'.\
+            format(host=node['host'], name=service)
 
 
-    @staticmethod
-    def vpp_api_trace_save(node):
-        """Run "api trace save" CLI command.
+        exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
 
 
-        :param node: Node to run command on.
-        :type node: dict
-        """
-        vat = VatExecutor()
-        vat.execute_script("api_trace_save.vat", node, json_out=False)
+        DUTSetup.get_service_logs(node, service)
 
     @staticmethod
 
     @staticmethod
-    def vpp_api_trace_dump(node):
-        """Run "api trace custom-dump" CLI command.
+    def stop_service_on_all_duts(nodes, service):
+        """Stop the named service on all DUTs.
 
 
-        :param node: Node to run command on.
+        :param node: Nodes in the topology.
+        :param service: Service unit name.
         :type node: dict
         :type node: dict
+        :type service: str
         """
         """
-        vat = VatExecutor()
-        vat.execute_script("api_trace_dump.vat", node, json_out=False)
-
-    @staticmethod
-    def setup_all_duts(nodes):
-        """Prepare all DUTs in given topology for test execution."""
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
-                DUTSetup.setup_dut(node)
+                DUTSetup.stop_service(node, service)
 
     @staticmethod
     def setup_dut(node):
 
     @staticmethod
     def setup_dut(node):
@@ -171,16 +135,24 @@ class DUTSetup(object):
 
         :raises Exception: If the DUT setup fails.
         """
 
         :raises Exception: If the DUT setup fails.
         """
-        ssh = SSH()
-        ssh.connect(node)
+        command = 'bash {0}/{1}/dut_setup.sh'.\
+            format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH)
+        message = 'DUT test setup script failed at node {name}'.\
+            format(name=node['host'])
 
 
-        ret_code, _, _ = \
-            ssh.exec_command('sudo -Sn bash {0}/{1}/dut_setup.sh'.
-                             format(Constants.REMOTE_FW_DIR,
-                                    Constants.RESOURCES_LIB_SH), timeout=120)
-        if int(ret_code):
-            raise RuntimeError('DUT test setup script failed at node {name}'.
-                               format(name=node['host']))
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
+
+    @staticmethod
+    def setup_all_duts(nodes):
+        """Run script over SSH to setup all DUT nodes.
+
+        :param nodes: Topology nodes.
+        :type nodes: dict
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                DUTSetup.setup_dut(node)
 
     @staticmethod
     def get_vpp_pid(node):
 
     @staticmethod
     def get_vpp_pid(node):
@@ -192,7 +164,6 @@ class DUTSetup(object):
         :rtype: int
         :raises RuntimeError: If it is not possible to get the PID.
         """
         :rtype: int
         :raises RuntimeError: If it is not possible to get the PID.
         """
-
         ssh = SSH()
         ssh.connect(node)
 
         ssh = SSH()
         ssh.connect(node)
 
@@ -205,19 +176,17 @@ class DUTSetup(object):
                                    'on node: {0}\n {1}'.
                                    format(node['host'], stdout + stderr))
 
                                    'on node: {0}\n {1}'.
                                    format(node['host'], stdout + stderr))
 
-            if len(stdout.splitlines()) == 1:
+            pid_list = stdout.split()
+            if len(pid_list) == 1:
                 return int(stdout)
                 return int(stdout)
-            elif not stdout.splitlines():
+            elif not pid_list:
                 logger.debug("No VPP PID found on node {0}".
                              format(node['host']))
                 continue
             else:
                 logger.debug("More then one VPP PID found on node {0}".
                              format(node['host']))
                 logger.debug("No VPP PID found on node {0}".
                              format(node['host']))
                 continue
             else:
                 logger.debug("More then one VPP PID found on node {0}".
                              format(node['host']))
-                ret_list = list()
-                for line in stdout.splitlines():
-                    ret_list.append(int(line))
-                return ret_list
+                return [int(pid) for pid in pid_list]
 
         return None
 
 
         return None
 
@@ -230,7 +199,6 @@ class DUTSetup(object):
         :returns: PIDs
         :rtype: dict
         """
         :returns: PIDs
         :rtype: dict
         """
-
         pids = dict()
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
         pids = dict()
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
@@ -238,28 +206,19 @@ class DUTSetup(object):
         return pids
 
     @staticmethod
         return pids
 
     @staticmethod
-    def vpp_show_crypto_device_mapping(node):
-        """Run "show crypto device mapping" CLI command.
-
-        :param node: Node to run command on.
-        :type node: dict
-        """
-        vat = VatExecutor()
-        vat.execute_script("show_crypto_device_mapping.vat", node,
-                           json_out=False)
-
-    @staticmethod
-    def crypto_device_verify(node, force_init=False, numvfs=32):
+    def crypto_device_verify(node, crypto_type, numvfs, force_init=False):
         """Verify if Crypto QAT device virtual functions are initialized on all
         DUTs. If parameter force initialization is set to True, then try to
         initialize or remove VFs on QAT.
 
         :param node: DUT node.
         """Verify if Crypto QAT device virtual functions are initialized on all
         DUTs. If parameter force initialization is set to True, then try to
         initialize or remove VFs on QAT.
 
         :param node: DUT node.
-        :param force_init: If True then try to initialize to specific value.
+        :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
         :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
         :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
+        :param force_init: If True then try to initialize to specific value.
         :type node: dict
         :type node: dict
-        :type force_init: bool
+        :type crypto_type: string
         :type numvfs: int
         :type numvfs: int
+        :type force_init: bool
         :returns: nothing
         :raises RuntimeError: If QAT VFs are not created and force init is set
                               to False.
         :returns: nothing
         :raises RuntimeError: If QAT VFs are not created and force init is set
                               to False.
@@ -270,29 +229,41 @@ class DUTSetup(object):
         if sriov_numvfs != numvfs:
             if force_init:
                 # QAT is not initialized and we want to initialize with numvfs
         if sriov_numvfs != numvfs:
             if force_init:
                 # QAT is not initialized and we want to initialize with numvfs
-                DUTSetup.crypto_device_init(node, numvfs)
+                DUTSetup.crypto_device_init(node, crypto_type, numvfs)
             else:
                 raise RuntimeError('QAT device failed to create VFs on {host}'.
                                    format(host=node['host']))
 
     @staticmethod
             else:
                 raise RuntimeError('QAT device failed to create VFs on {host}'.
                                    format(host=node['host']))
 
     @staticmethod
-    def crypto_device_init(node, numvfs):
+    def crypto_device_init(node, crypto_type, numvfs):
         """Init Crypto QAT device virtual functions on DUT.
 
         :param node: DUT node.
         """Init Crypto QAT device virtual functions on DUT.
 
         :param node: DUT node.
+        :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
         :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
         :type node: dict
         :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
         :type node: dict
+        :type crypto_type: string
         :type numvfs: int
         :returns: nothing
         :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
         """
         :type numvfs: int
         :returns: nothing
         :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
         """
+        if crypto_type == "HW_DH895xcc":
+            kernel_mod = "qat_dh895xcc"
+            kernel_drv = "dh895xcc"
+        elif crypto_type == "HW_C3xxx":
+            kernel_mod = "qat_c3xxx"
+            kernel_drv = "c3xxx"
+        else:
+            raise RuntimeError('Unsupported crypto device type on {host}'.
+                               format(host=node['host']))
+
         pci_addr = Topology.get_cryptodev(node)
 
         # QAT device must be re-bound to kernel driver before initialization.
         pci_addr = Topology.get_cryptodev(node)
 
         # QAT device must be re-bound to kernel driver before initialization.
-        DUTSetup.verify_kernel_module(node, 'qat_dh895xcc', force_load=True)
+        DUTSetup.verify_kernel_module(node, kernel_mod, force_load=True)
 
         # Stop VPP to prevent deadlock.
 
         # Stop VPP to prevent deadlock.
-        VPPUtil.stop_vpp_service(node)
+        DUTSetup.stop_service(node, Constants.VPP_UNIT)
 
         current_driver = DUTSetup.get_pci_dev_driver(
             node, pci_addr.replace(':', r'\:'))
 
         current_driver = DUTSetup.get_pci_dev_driver(
             node, pci_addr.replace(':', r'\:'))
@@ -300,7 +271,7 @@ class DUTSetup(object):
             DUTSetup.pci_driver_unbind(node, pci_addr)
 
         # Bind to kernel driver.
             DUTSetup.pci_driver_unbind(node, pci_addr)
 
         # Bind to kernel driver.
-        DUTSetup.pci_driver_bind(node, pci_addr, 'dh895xcc')
+        DUTSetup.pci_driver_bind(node, pci_addr, kernel_drv)
 
         # Initialize QAT VFs.
         if numvfs > 0:
 
         # Initialize QAT VFs.
         if numvfs > 0:
@@ -377,7 +348,8 @@ class DUTSetup(object):
         message = 'Failed to create {num} VFs on {pci} device on {host}'.\
             format(num=numvfs, pci=pf_pci_addr, host=node['host'])
 
         message = 'Failed to create {num} VFs on {pci} device on {host}'.\
             format(num=numvfs, pci=pf_pci_addr, host=node['host'])
 
-        exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
     @staticmethod
     def pci_driver_unbind(node, pci_addr):
 
     @staticmethod
     def pci_driver_unbind(node, pci_addr):
@@ -395,7 +367,8 @@ class DUTSetup(object):
         message = 'Failed to unbind PCI device {pci} on {host}'.\
             format(pci=pci_addr, host=node['host'])
 
         message = 'Failed to unbind PCI device {pci} on {host}'.\
             format(pci=pci_addr, host=node['host'])
 
-        exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
     @staticmethod
     def pci_driver_bind(node, pci_addr, driver):
 
     @staticmethod
     def pci_driver_bind(node, pci_addr, driver):
@@ -416,19 +389,22 @@ class DUTSetup(object):
             "'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
             format(driver=driver, pci=pci_addr.replace(':', r'\:'))
 
             "'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
             format(driver=driver, pci=pci_addr.replace(':', r'\:'))
 
-        exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
         command = "sh -c "\
             "'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
             format(pci=pci_addr, driver=driver)
 
 
         command = "sh -c "\
             "'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
             format(pci=pci_addr, driver=driver)
 
-        exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
         command = "sh -c "\
             "'echo  | tee /sys/bus/pci/devices/{pci}/driver_override'".\
             format(pci=pci_addr.replace(':', r'\:'))
 
 
         command = "sh -c "\
             "'echo  | tee /sys/bus/pci/devices/{pci}/driver_override'".\
             format(pci=pci_addr.replace(':', r'\:'))
 
-        exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
     @staticmethod
     def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
 
     @staticmethod
     def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
@@ -453,7 +429,8 @@ class DUTSetup(object):
         message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
             format(vf_pci_addr=vf_pci_addr, host=node['host'])
 
         message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
             format(vf_pci_addr=vf_pci_addr, host=node['host'])
 
-        exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
     @staticmethod
     def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
 
     @staticmethod
     def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
@@ -480,19 +457,22 @@ class DUTSetup(object):
             "'echo {driver} | tee {vf_path}/driver_override'".\
             format(driver=driver, vf_path=vf_path)
 
             "'echo {driver} | tee {vf_path}/driver_override'".\
             format(driver=driver, vf_path=vf_path)
 
-        exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
         command = "sh -c "\
             "'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
             format(vf_pci_addr=vf_pci_addr, driver=driver)
 
 
         command = "sh -c "\
             "'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
             format(vf_pci_addr=vf_pci_addr, driver=driver)
 
-        exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
         command = "sh -c "\
             "'echo  | tee {vf_path}/driver_override'".\
             format(vf_path=vf_path)
 
 
         command = "sh -c "\
             "'echo  | tee {vf_path}/driver_override'".\
             format(vf_path=vf_path)
 
-        exec_cmd_no_error(node, command, timeout=60, sudo=True, message=message)
+        exec_cmd_no_error(node, command, timeout=120, sudo=True,
+                          message=message)
 
     @staticmethod
     def get_pci_dev_driver(node, pci_addr):
 
     @staticmethod
     def get_pci_dev_driver(node, pci_addr):
@@ -628,132 +608,78 @@ class DUTSetup(object):
         exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
 
     @staticmethod
         exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
 
     @staticmethod
-    def vpp_enable_traces_on_all_duts(nodes):
-        """Enable vpp packet traces on all DUTs in the given topology.
-
-        :param nodes: Nodes in the topology.
-        :type nodes: dict
-        """
-        for node in nodes.values():
-            if node['type'] == NodeType.DUT:
-                DUTSetup.vpp_enable_traces_on_dut(node)
-
-    @staticmethod
-    def vpp_enable_traces_on_dut(node):
-        """Enable vpp packet traces on the DUT node.
-
-        :param node: DUT node to set up.
-        :type node: dict
-        """
-
-        vat = VatExecutor()
-        vat.execute_script("enable_dpdk_traces.vat", node, json_out=False)
-        vat.execute_script("enable_vhost_user_traces.vat", node, json_out=False)
-        vat.execute_script("enable_memif_traces.vat", node, json_out=False)
-
-    @staticmethod
-    def install_vpp_on_all_duts(nodes, vpp_pkg_dir, vpp_rpm_pkgs, vpp_deb_pkgs):
+    def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
         """Install VPP on all DUT nodes.
 
         :param nodes: Nodes in the topology.
         :param vpp_pkg_dir: Path to directory where VPP packages are stored.
         """Install VPP on all DUT nodes.
 
         :param nodes: Nodes in the topology.
         :param vpp_pkg_dir: Path to directory where VPP packages are stored.
-        :param vpp_rpm_pkgs: List of VPP rpm packages to be installed.
-        :param vpp_deb_pkgs: List of VPP deb packages to be installed.
         :type nodes: dict
         :type vpp_pkg_dir: str
         :type nodes: dict
         :type vpp_pkg_dir: str
-        :type vpp_rpm_pkgs: list
-        :type vpp_deb_pkgs: list
         :raises RuntimeError: If failed to remove or install VPP.
         """
         :raises RuntimeError: If failed to remove or install VPP.
         """
-
-        logger.debug("Installing VPP")
-
         for node in nodes.values():
         for node in nodes.values():
+            message = 'Failed to install VPP on host {host}!'.\
+                format(host=node['host'])
             if node['type'] == NodeType.DUT:
             if node['type'] == NodeType.DUT:
-                logger.debug("Installing VPP on node {0}".format(node['host']))
-
-                ssh = SSH()
-                ssh.connect(node)
-
-                cmd = "[[ -f /etc/redhat-release ]]"
-                return_code, _, _ = ssh.exec_command(cmd)
-                if not int(return_code):
-                    # workaroud - uninstall existing vpp installation until
-                    # start-testcase script is updated on all virl servers
-                    rpm_pkgs_remove = "vpp*"
-                    cmd_u = 'yum -y remove "{0}"'.format(rpm_pkgs_remove)
-                    r_rcode, _, r_err = ssh.exec_command_sudo(cmd_u, timeout=90)
-                    if int(r_rcode):
-                        raise RuntimeError('Failed to remove previous VPP'
-                                           'installation on host {0}:\n{1}'
-                                           .format(node['host'], r_err))
-
-                    rpm_pkgs = "*.rpm ".join(str(vpp_pkg_dir + pkg)
-                                             for pkg in vpp_rpm_pkgs) + "*.rpm"
-                    cmd_i = "rpm -ivh {0}".format(rpm_pkgs)
-                    ret_code, _, err = ssh.exec_command_sudo(cmd_i, timeout=90)
-                    if int(ret_code):
-                        raise RuntimeError('Failed to install VPP on host {0}:'
-                                           '\n{1}'.format(node['host'], err))
-                    else:
-                        ssh.exec_command_sudo("rpm -qai vpp*")
-                        logger.info("VPP installed on node {0}".
-                                    format(node['host']))
+                command = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true'
+                exec_cmd_no_error(node, command, sudo=True)
+
+                command = '. /etc/lsb-release; echo "${DISTRIB_ID}"'
+                stdout, _ = exec_cmd_no_error(node, command)
+
+                if stdout.strip() == 'Ubuntu':
+                    exec_cmd_no_error(node, 'apt-get purge -y "*vpp*" || true',
+                                      timeout=120, sudo=True)
+                    exec_cmd_no_error(node, 'dpkg -i --force-all {dir}*.deb'.
+                                      format(dir=vpp_pkg_dir), timeout=120,
+                                      sudo=True, message=message)
+                    exec_cmd_no_error(node, 'dpkg -l | grep vpp', sudo=True)
                 else:
                 else:
-                    # workaroud - uninstall existing vpp installation until
-                    # start-testcase script is updated on all virl servers
-                    deb_pkgs_remove = "vpp*"
-                    cmd_u = 'apt-get purge -y "{0}"'.format(deb_pkgs_remove)
-                    r_rcode, _, r_err = ssh.exec_command_sudo(cmd_u, timeout=90)
-                    if int(r_rcode):
-                        raise RuntimeError('Failed to remove previous VPP'
-                                           'installation on host {0}:\n{1}'
-                                           .format(node['host'], r_err))
-                    deb_pkgs = "*.deb ".join(str(vpp_pkg_dir + pkg)
-                                             for pkg in vpp_deb_pkgs) + "*.deb"
-                    cmd_i = "dpkg -i --force-all {0}".format(deb_pkgs)
-                    ret_code, _, err = ssh.exec_command_sudo(cmd_i, timeout=90)
-                    if int(ret_code):
-                        raise RuntimeError('Failed to install VPP on host {0}:'
-                                           '\n{1}'.format(node['host'], err))
-                    else:
-                        ssh.exec_command_sudo("dpkg -l | grep vpp")
-                        logger.info("VPP installed on node {0}".
-                                    format(node['host']))
-
-                ssh.disconnect(node)
+                    exec_cmd_no_error(node, 'yum -y remove "*vpp*" || true',
+                                      timeout=120, sudo=True)
+                    exec_cmd_no_error(node, 'rpm -ivh {dir}*.rpm'.
+                                      format(dir=vpp_pkg_dir), timeout=120,
+                                      sudo=True, message=message)
+                    exec_cmd_no_error(node, 'rpm -qai *vpp*', sudo=True)
 
     @staticmethod
 
     @staticmethod
-    def verify_vpp_on_dut(node):
-        """Verify that VPP is installed on DUT node.
+    def running_in_container(node):
+        """This method tests if topology node is running inside container.
 
 
-        :param node: DUT node.
+        :param node: Topology node.
         :type node: dict
         :type node: dict
-        :raises RuntimeError: If failed to restart VPP, get VPP version
-            or get VPP interfaces.
+        :returns: True if running in docker container, false if not or failed
+        to detect.
+        :rtype: bool
         """
         """
-
-        logger.debug("Verify VPP on node {0}".format(node['host']))
-
-        DUTSetup.vpp_show_version_verbose(node)
-        DUTSetup.vpp_show_interfaces(node)
+        command = "fgrep docker /proc/1/cgroup"
+        message = 'Failed to get cgroup settings.'
+        try:
+            exec_cmd_no_error(node, command, timeout=30, sudo=False,
+                              message=message)
+        except RuntimeError:
+            return False
+        return True
 
     @staticmethod
 
     @staticmethod
-    def verify_vpp_on_all_duts(nodes):
-        """Verify that VPP is installed on all DUT nodes.
+    def get_docker_mergeddir(node, uuid):
+        """Get Docker overlay for MergedDir diff.
 
 
-        :param nodes: Nodes in the topology.
-        :type nodes: dict
+        :param node: DUT node.
+        :param uuid: Docker UUID.
+        :type node: dict
+        :type uuid: str
+        :returns: Docker container MergedDir.
+        :rtype: str
+        :raises RuntimeError: If getting output failed.
         """
         """
+        command = "docker inspect --format='"\
+            "{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid)
+        message = 'Failed to get directory of {uuid} on host {host}'.\
+            format(uuid=uuid, host=node['host'])
 
 
-        logger.debug("Verify VPP on all DUTs")
-
-        DUTSetup.start_vpp_service_on_all_duts(nodes)
-
-        for node in nodes.values():
-            if node['type'] == NodeType.DUT:
-                DUTSetup.verify_vpp_on_dut(node)
-
+        stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
+        return stdout.strip()
 
     @staticmethod
     def get_huge_page_size(node):
 
     @staticmethod
     def get_huge_page_size(node):