feat(core): Add dpdk log level
[csit.git] / resources / libraries / python / QemuUtils.py
index 5821455..35959ce 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 """QEMU utilities library."""
 
 
 """QEMU utilities library."""
 
-from time import time, sleep
 import json
 
 import json
 
+from re import match
+from string import Template
+from time import sleep
+
 from robot.api import logger
 
 from robot.api import logger
 
-from resources.libraries.python.ssh import SSH, SSHTimeout
-from resources.libraries.python.constants import Constants
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.DpdkUtil import DpdkUtil
+from resources.libraries.python.DUTSetup import DUTSetup
+from resources.libraries.python.OptionString import OptionString
+from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
 from resources.libraries.python.topology import NodeType, Topology
 from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.VhostUser import VirtioFeaturesFlags
+from resources.libraries.python.VhostUser import VirtioFeatureMask
+from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
+
+__all__ = [u"QemuUtils"]
 
 
 
 
-class QemuUtils(object):
+class QemuUtils:
     """QEMU utilities."""
 
     """QEMU utilities."""
 
-    def __init__(self, qemu_id=1):
-        self._qemu_id = qemu_id
-        # Path to QEMU binary. Use x86_64 by default
-        self._qemu_path = '/usr/bin/'
-        self._qemu_bin = 'qemu-system-x86_64'
-        # QEMU Machine Protocol socket
-        self._qmp_sock = '/tmp/qmp{0}.sock'.format(self._qemu_id)
-        # QEMU Guest Agent socket
-        self._qga_sock = '/tmp/qga{0}.sock'.format(self._qemu_id)
-        # QEMU PID file
-        self._pid_file = '/tmp/qemu{0}.pid'.format(self._qemu_id)
-        self._qemu_opt = {}
-        # Default 1 CPU.
-        self._qemu_opt['smp'] = '-smp 1,sockets=1,cores=1,threads=1'
-        # Daemonize the QEMU process after initialization. Default one
-        # management interface.
-        self._qemu_opt['options'] = '-cpu host -daemonize -enable-kvm ' \
-            '-machine pc,accel=kvm,usb=off,mem-merge=off ' \
-            '-net nic,macaddr=52:54:00:00:{0:02x}:ff -balloon none'\
-            .format(self._qemu_id)
-        self._qemu_opt['ssh_fwd_port'] = 10021 + qemu_id
-        # Default serial console port
-        self._qemu_opt['serial_port'] = 4555 + qemu_id
-        # Default 512MB virtual RAM
-        self._qemu_opt['mem_size'] = 512
-        # Default huge page mount point, required for Vhost-user interfaces.
-        self._qemu_opt['huge_mnt'] = '/mnt/huge'
-        # Default do not allocate huge pages.
-        self._qemu_opt['huge_allocate'] = False
-        # Default image for CSIT virl setup
-        self._qemu_opt['disk_image'] = '/var/lib/vm/vhost-nested.img'
-        # VM node info dict
+    # Use one instance of class per tests.
+    ROBOT_LIBRARY_SCOPE = u"TEST CASE"
+
+    def __init__(
+            self, node, qemu_id=1, smp=1, mem=512, vnf=None,
+            img=Constants.QEMU_VM_IMAGE, page_size=u""):
+        """Initialize QemuUtil class.
+
+        :param node: Node to run QEMU on.
+        :param qemu_id: QEMU identifier.
+        :param smp: Number of virtual SMP units (cores).
+        :param mem: Amount of memory.
+        :param vnf: Network function workload.
+        :param img: QEMU disk image or kernel image path.
+        :param page_size: Hugepage Size.
+        :type node: dict
+        :type qemu_id: int
+        :type smp: int
+        :type mem: int
+        :type vnf: str
+        :type img: str
+        :type page_size: str
+        """
+        self._nic_id = 0
+        self._node = node
+        self._arch = Topology.get_node_arch(self._node)
+        self._opt = dict()
+
+        # Architecture specific options
+        if self._arch == u"aarch64":
+            self._opt[u"machine_args"] = \
+                u"virt,accel=kvm,usb=off,mem-merge=off,gic-version=3"
+            self._opt[u"console"] = u"ttyAMA0"
+        else:
+            self._opt[u"machine_args"] = u"pc,accel=kvm,usb=off,mem-merge=off"
+            self._opt[u"console"] = u"ttyS0"
+        self._testpmd_path = f"{Constants.QEMU_VM_DPDK}/build/app"
         self._vm_info = {
         self._vm_info = {
-            'type': NodeType.VM,
-            'port': self._qemu_opt['ssh_fwd_port'],
-            'username': 'cisco',
-            'password': 'cisco',
-            'interfaces': {},
+            u"host": node[u"host"],
+            u"type": NodeType.VM,
+            u"port": 10021 + qemu_id,
+            u"serial": 4555 + qemu_id,
+            u"username": 'testuser',
+            u"password": 'Csit1234',
+            u"interfaces": {},
         }
         }
-        # Virtio queue count
-        self._qemu_opt['queues'] = 1
-        self._vhost_id = 0
-        self._ssh = None
-        self._node = None
-        self._socks = [self._qmp_sock, self._qga_sock]
-
-    def qemu_set_path(self, path):
-        """Set binary path for QEMU.
-
-        :param path: Absolute path in filesystem.
-        :type path: str
+        if node[u"port"] != 22:
+            self._vm_info[u"host_port"] = node[u"port"]
+            self._vm_info[u"host_username"] = node[u"username"]
+            self._vm_info[u"host_password"] = node[u"password"]
+        # Input Options.
+        self._opt[u"qemu_id"] = qemu_id
+        self._opt[u"mem"] = int(mem)
+        self._opt[u"smp"] = int(smp)
+        self._opt[u"img"] = img
+        self._opt[u"vnf"] = vnf
+        self._opt[u"page_size"] = page_size
+
+        # Temporary files.
+        self._temp = dict()
+        self._temp[u"log"] = f"/tmp/serial_{qemu_id}.log"
+        self._temp[u"pidfile"] = f"/run/qemu_{qemu_id}.pid"
+        if img == Constants.QEMU_VM_IMAGE:
+            self._temp[u"qmp"] = f"/run/qmp_{qemu_id}.sock"
+            self._temp[u"qga"] = f"/run/qga_{qemu_id}.sock"
+        elif img == Constants.QEMU_VM_KERNEL:
+            self._opt[u"img"], _ = exec_cmd_no_error(
+                node, f"ls -1 {Constants.QEMU_VM_KERNEL}* | tail -1",
+                message=u"Qemu Kernel VM image not found!"
+            )
+            self._temp[u"ini"] = f"/etc/vm_init_{qemu_id}.conf"
+            self._opt[u"initrd"], _ = exec_cmd_no_error(
+                node, f"ls -1 {Constants.QEMU_VM_KERNEL_INITRD}* | tail -1",
+                message=u"Qemu Kernel initrd image not found!"
+            )
+        else:
+            raise RuntimeError(f"QEMU: Unknown VM image option: {img}")
+        # Computed parameters for QEMU command line.
+        self._params = OptionString(prefix=u"-")
+
+    def add_default_params(self):
+        """Set default QEMU command line parameters."""
+        mem_path = f"/dev/hugepages1G" \
+            if self._opt[u"page_size"] == u"1G" else u"/dev/hugepages"
+
+        self._params.add(u"daemonize")
+        self._params.add(u"nodefaults")
+        self._params.add_with_value(
+            u"name", f"vnf{self._opt.get(u'qemu_id')},debug-threads=on"
+        )
+        self._params.add(u"no-user-config")
+        self._params.add(u"nographic")
+        self._params.add(u"enable-kvm")
+        self._params.add_with_value(u"pidfile", self._temp.get(u"pidfile"))
+        self._params.add_with_value(u"cpu", u"host")
+
+        self._params.add_with_value(u"machine", self._opt.get(u"machine_args"))
+        self._params.add_with_value(
+            u"smp", f"{self._opt.get(u'smp')},sockets=1,"
+            f"cores={self._opt.get(u'smp')},threads=1"
+        )
+        self._params.add_with_value(
+            u"object", f"memory-backend-file,id=mem,"
+            f"size={self._opt.get(u'mem')}M,"
+            f"mem-path={mem_path},share=on"
+        )
+        self._params.add_with_value(u"m", f"{self._opt.get(u'mem')}M")
+        self._params.add_with_value(u"numa", u"node,memdev=mem")
+
+    def add_net_user(self, net="10.0.2.0/24"):
+        """Set managment port forwarding."""
+        self._params.add_with_value(
+            u"netdev", f"user,id=mgmt,net={net},"
+            f"hostfwd=tcp::{self._vm_info[u'port']}-:22"
+        )
+        self._params.add_with_value(
+            u"device", f"virtio-net,netdev=mgmt"
+        )
+
+    def add_qmp_qga(self):
+        """Set QMP, QGA management."""
+        self._params.add_with_value(
+            u"chardev", f"socket,path={self._temp.get(u'qga')},"
+            f"server,nowait,id=qga0"
+        )
+        self._params.add_with_value(
+            u"device", u"isa-serial,chardev=qga0"
+        )
+        self._params.add_with_value(
+            u"qmp", f"unix:{self._temp.get(u'qmp')},server,nowait"
+        )
+
+    def add_serial(self):
+        """Set serial to file redirect."""
+        self._params.add_with_value(
+            u"chardev", f"socket,host=127.0.0.1,"
+            f"port={self._vm_info[u'serial']},id=gnc0,server,nowait"
+        )
+        self._params.add_with_value(
+            u"device", u"isa-serial,chardev=gnc0"
+        )
+        self._params.add_with_value(
+            u"serial", f"file:{self._temp.get(u'log')}"
+        )
+
+    def add_drive_cdrom(self, drive_file, index=None):
+        """Set CD-ROM drive.
+
+        :param drive_file: Path to drive image.
+        :param index: Drive index.
+        :type drive_file: str
+        :type index: int
         """
         """
-        self._qemu_path = path
-
-    def qemu_set_smp(self, cpus, cores, threads, sockets):
-        """Set SMP option for QEMU.
-
-        :param cpus: Number of CPUs.
-        :param cores: Number of CPU cores on one socket.
-        :param threads: Number of threads on one CPU core.
-        :param sockets: Number of discrete sockets in the system.
-        :type cpus: int
-        :type cores: int
-        :type threads: int
-        :type sockets: int
+        index = f"index={index}," if index else u""
+        self._params.add_with_value(
+            u"drive", f"file={drive_file},{index}media=cdrom"
+        )
+
+    def add_drive(self, drive_file, drive_format):
+        """Set drive with custom format.
+
+        :param drive_file: Path to drive image.
+        :param drive_format: Drive image format.
+        :type drive_file: str
+        :type drive_format: str
         """
         """
-        self._qemu_opt['smp'] = '-smp {},cores={},threads={},sockets={}'.format(
-            cpus, cores, threads, sockets)
-
-    def qemu_set_ssh_fwd_port(self, fwd_port):
-        """Set host port for guest SSH forwarding.
+        self._params.add_with_value(
+            u"drive", f"file={drive_file},format={drive_format},"
+            u"cache=none,if=virtio,file.locking=off"
+        )
+
+    def add_kernelvm_params(self):
+        """Set KernelVM QEMU parameters."""
+        hugepages = 3 if self._opt[u"page_size"] == u"1G" else 512
+
+        self._params.add_with_value(
+            u"serial", f"file:{self._temp.get(u'log')}"
+        )
+        self._params.add_with_value(
+            u"fsdev", u"local,id=root9p,path=/,security_model=none"
+        )
+        self._params.add_with_value(
+            u"device", u"virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot"
+        )
+        self._params.add_with_value(
+            u"kernel", f"{self._opt.get(u'img')}"
+        )
+        self._params.add_with_value(
+            u"initrd", f"{self._opt.get(u'initrd')}"
+        )
+        self._params.add_with_value(
+            u"append", f"'ro rootfstype=9p rootflags=trans=virtio "
+            f"root=virtioroot console={self._opt.get(u'console')} "
+            f"tsc=reliable hugepages={hugepages} "
+            f"hugepagesz={self._opt.get(u'page_size')} "
+            f"init={self._temp.get(u'ini')} fastboot'"
+        )
+
+    def add_vhost_user_if(
+            self, socket, server=True, jumbo_frames=False, queue_size=None,
+            queues=1, virtio_feature_mask=None):
+        """Add Vhost-user interface.
 
 
-        :param fwd_port: Port number on host for guest SSH forwarding.
-        :type fwd_port: int
+        :param socket: Path of the unix socket.
+        :param server: If True the socket shall be a listening socket.
+        :param jumbo_frames: Set True if jumbo frames are used in the test.
+        :param queue_size: Vring queue size.
+        :param queues: Number of queues.
+        :param virtio_feature_mask: Mask of virtio features to be enabled.
+        :type socket: str
+        :type server: bool
+        :type jumbo_frames: bool
+        :type queue_size: int
+        :type queues: int
+        :type virtio_feature_mask: int
+        """
+        self._nic_id += 1
+        if jumbo_frames:
+            logger.debug(u"Jumbo frames temporarily disabled!")
+        self._params.add_with_value(
+            u"chardev", f"socket,id=char{self._nic_id},"
+            f"path={socket}{u',server' if server is True else u''}"
+        )
+        self._params.add_with_value(
+            u"netdev", f"vhost-user,id=vhost{self._nic_id},"
+            f"chardev=char{self._nic_id},queues={queues}"
+        )
+        mac = f"52:54:00:00:{self._opt.get(u'qemu_id'):02x}:" \
+            f"{self._nic_id:02x}"
+        queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \
+            if queue_size else u""
+        gso = VirtioFeatureMask.is_feature_enabled(
+            virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_GSO)
+        csum = VirtioFeatureMask.is_feature_enabled(
+            virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_CSUM)
+
+        self._params.add_with_value(
+            u"device", f"virtio-net-pci,netdev=vhost{self._nic_id},mac={mac},"
+            f"addr={self._nic_id+5}.0,mq=on,vectors={2 * queues + 2},"
+            f"csum={u'on' if csum else u'off'},"
+            f"gso={u'on' if gso else u'off'},"
+            f"guest_tso4={u'on' if gso else u'off'},"
+            f"guest_tso6={u'on' if gso else u'off'},"
+            f"guest_ecn={u'on' if gso else u'off'},"
+            f"{queue_size}"
+        )
+
+        # Add interface MAC and socket to the node dict.
+        if_data = {u"mac_address": mac, u"socket": socket}
+        if_name = f"vhost{self._nic_id}"
+        self._vm_info[u"interfaces"][if_name] = if_data
+        # Add socket to temporary file list.
+        self._temp[if_name] = socket
+
+    def add_vfio_pci_if(self, pci):
+        """Add VFIO PCI interface.
+
+        :param pci: PCI address of interface.
+        :type pci: str
         """
         """
-        self._qemu_opt['ssh_fwd_port'] = fwd_port
-        self._vm_info['port'] = fwd_port
+        self._nic_id += 1
+        self._params.add_with_value(
+            u"device", f"vfio-pci,host={pci},addr={self._nic_id+5}.0"
+        )
 
 
-    def qemu_set_serial_port(self, port):
-        """Set serial console port.
+    def create_kernelvm_config_vpp(self, **kwargs):
+        """Create QEMU VPP config files.
 
 
-        :param port: Serial console port.
-        :type port: int
+        :param kwargs: Key-value pairs to replace content of VPP configuration
+            file.
+        :type kwargs: dict
         """
         """
-        self._qemu_opt['serial_port'] = port
+        startup = f"/etc/vpp/vm_startup_{self._opt.get(u'qemu_id')}.conf"
+        running = f"/etc/vpp/vm_running_{self._opt.get(u'qemu_id')}.exec"
+
+        self._temp[u"startup"] = startup
+        self._temp[u"running"] = running
+        self._opt[u"vnf_bin"] = f"/usr/bin/vpp -c {startup}"
+
+        # Create VPP startup configuration.
+        vpp_config = VppConfigGenerator()
+        vpp_config.set_node(self._node)
+        vpp_config.add_unix_nodaemon()
+        vpp_config.add_unix_cli_listen()
+        vpp_config.add_unix_exec(running)
+        vpp_config.add_socksvr()
+        vpp_config.add_main_heap_size(u"512M")
+        vpp_config.add_main_heap_page_size(self._opt[u"page_size"])
+        vpp_config.add_default_hugepage_size(self._opt[u"page_size"])
+        vpp_config.add_statseg_size(u"512M")
+        vpp_config.add_statseg_page_size(self._opt[u"page_size"])
+        vpp_config.add_statseg_per_node_counters(u"on")
+        vpp_config.add_buffers_per_numa(107520)
+        vpp_config.add_cpu_main_core(u"0")
+        if self._opt.get(u"smp") > 1:
+            vpp_config.add_cpu_corelist_workers(f"1-{self._opt.get(u'smp')-1}")
+        vpp_config.add_plugin(u"disable", u"default")
+        vpp_config.add_plugin(u"enable", u"ping_plugin.so")
+        if "2vfpt" in self._opt.get(u'vnf'):
+            vpp_config.add_plugin(u"enable", u"avf_plugin.so")
+        if "vhost" in self._opt.get(u'vnf'):
+            vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
+            vpp_config.add_dpdk_dev(u"0000:00:06.0", u"0000:00:07.0")
+            vpp_config.add_dpdk_dev_default_rxq(kwargs[u"queues"])
+            vpp_config.add_dpdk_log_level(u".*,debug")
+            if not kwargs[u"jumbo_frames"]:
+                vpp_config.add_dpdk_no_multi_seg()
+                vpp_config.add_dpdk_no_tx_checksum_offload()
+        if "ipsec" in self._opt.get(u'vnf'):
+            vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
+            vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
+            vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
+        if "nat" in self._opt.get(u'vnf'):
+            vpp_config.add_nat(value=u"endpoint-dependent")
+            vpp_config.add_plugin(u"enable", u"nat_plugin.so")
+        vpp_config.write_config(startup)
+
+        # Create VPP running configuration.
+        template = f"{Constants.RESOURCES_TPL}/vm/{self._opt.get(u'vnf')}.exec"
+        exec_cmd_no_error(self._node, f"rm -f {running}", sudo=True)
+
+        with open(template, u"rt") as src_file:
+            src = Template(src_file.read())
+            exec_cmd_no_error(
+                self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
+                f"sudo tee {running}"
+            )
 
 
-    def qemu_set_mem_size(self, mem_size):
-        """Set virtual RAM size.
+    def create_kernelvm_config_testpmd_io(self, **kwargs):
+        """Create QEMU testpmd-io command line.
 
 
-        :param mem_size: RAM size in Mega Bytes.
-        :type mem_size: int
+        :param kwargs: Key-value pairs to construct command line parameters.
+        :type kwargs: dict
+        """
+        pmd_max_pkt_len = u"9200" if kwargs[u"jumbo_frames"] else u"1518"
+        testpmd_cmd = DpdkUtil.get_testpmd_cmdline(
+            eal_corelist=f"0-{self._opt.get(u'smp') - 1}",
+            eal_driver=False,
+            eal_pci_whitelist0=u"0000:00:06.0",
+            eal_pci_whitelist1=u"0000:00:07.0",
+            eal_in_memory=True,
+            pmd_num_mbufs=32768,
+            pmd_fwd_mode=u"io",
+            pmd_nb_ports=u"2",
+            pmd_portmask=u"0x3",
+            pmd_max_pkt_len=pmd_max_pkt_len,
+            pmd_mbuf_size=u"16384",
+            pmd_rxq=kwargs[u"queues"],
+            pmd_txq=kwargs[u"queues"],
+            pmd_tx_offloads='0x0',
+            pmd_nb_cores=str(self._opt.get(u"smp") - 1)
+        )
+
+        self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
+
+    def create_kernelvm_config_testpmd_mac(self, **kwargs):
+        """Create QEMU testpmd-mac command line.
+
+        :param kwargs: Key-value pairs to construct command line parameters.
+        :type kwargs: dict
+        """
+        pmd_max_pkt_len = u"9200" if kwargs[u"jumbo_frames"] else u"1518"
+        testpmd_cmd = DpdkUtil.get_testpmd_cmdline(
+            eal_corelist=f"0-{self._opt.get(u'smp') - 1}",
+            eal_driver=False,
+            eal_pci_whitelist0=u"0000:00:06.0",
+            eal_pci_whitelist1=u"0000:00:07.0",
+            eal_in_memory=True,
+            pmd_num_mbufs=32768,
+            pmd_fwd_mode=u"mac",
+            pmd_nb_ports=u"2",
+            pmd_portmask=u"0x3",
+            pmd_max_pkt_len=pmd_max_pkt_len,
+            pmd_mbuf_size=u"16384",
+            pmd_eth_peer_0=f"0,{kwargs[u'vif1_mac']}",
+            pmd_eth_peer_1=f"1,{kwargs[u'vif2_mac']}",
+            pmd_rxq=kwargs[u"queues"],
+            pmd_txq=kwargs[u"queues"],
+            pmd_tx_offloads=u"0x0",
+            pmd_nb_cores=str(self._opt.get(u"smp") - 1)
+        )
+
+        self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
+
+    def create_kernelvm_config_iperf3(self):
+        """Create QEMU iperf3 command line."""
+        self._opt[u"vnf_bin"] = f"mkdir /run/sshd; /usr/sbin/sshd -D -d"
+
+    def create_kernelvm_init(self, **kwargs):
+        """Create QEMU init script.
+
+        :param kwargs: Key-value pairs to replace content of init startup file.
+        :type kwargs: dict
         """
         """
-        self._qemu_opt['mem_size'] = int(mem_size)
+        init = self._temp.get(u"ini")
+        exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True)
+
+        with open(kwargs[u"template"], u"rt") as src_file:
+            src = Template(src_file.read())
+            exec_cmd_no_error(
+                self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
+                f"sudo tee {init}"
+            )
+            exec_cmd_no_error(self._node, f"chmod +x {init}", sudo=True)
 
 
-    def qemu_set_huge_mnt(self, huge_mnt):
-        """Set hugefile mount point.
+    def configure_kernelvm_vnf(self, **kwargs):
+        """Create KernelVM VNF configurations.
 
 
-        :param huge_mnt: System hugefile mount point.
-        :type huge_mnt: int
+        :param kwargs: Key-value pairs for templating configs.
+        :type kwargs: dict
         """
         """
-        self._qemu_opt['huge_mnt'] = huge_mnt
-
-    def qemu_set_huge_allocate(self):
-        """Set flag to allocate more huge pages if needed."""
-        self._qemu_opt['huge_allocate'] = True
+        if u"vpp" in self._opt.get(u"vnf"):
+            self.create_kernelvm_config_vpp(**kwargs)
+            self.create_kernelvm_init(
+                template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+                vnf_bin=self._opt.get(u"vnf_bin")
+            )
+        elif u"testpmd_io" in self._opt.get(u"vnf"):
+            self.create_kernelvm_config_testpmd_io(**kwargs)
+            self.create_kernelvm_init(
+                template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+                vnf_bin=self._opt.get(u"vnf_bin")
+            )
+        elif u"testpmd_mac" in self._opt.get(u"vnf"):
+            self.create_kernelvm_config_testpmd_mac(**kwargs)
+            self.create_kernelvm_init(
+                template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+                vnf_bin=self._opt.get(u"vnf_bin")
+            )
+        elif u"iperf3" in self._opt.get(u"vnf"):
+            qemu_id = self._opt.get(u'qemu_id') % 2
+            self.create_kernelvm_config_iperf3()
+            self.create_kernelvm_init(
+                template=f"{Constants.RESOURCES_TPL}/vm/init_iperf3.sh",
+                vnf_bin=self._opt.get(u"vnf_bin"),
+                ip_address_l=u"2.2.2.2/30" if qemu_id else u"1.1.1.1/30",
+                ip_address_r=u"2.2.2.1" if qemu_id else u"1.1.1.2",
+                ip_route_r=u"1.1.1.0/30" if qemu_id else u"2.2.2.0/30"
+            )
+        else:
+            raise RuntimeError(u"QEMU: Unsupported VNF!")
 
 
-    def qemu_set_disk_image(self, disk_image):
-        """Set disk image.
+    def get_qemu_pids(self):
+        """Get QEMU CPU pids.
 
 
-        :param disk_image: Path of the disk image.
-        :type disk_image: str
+        :returns: List of QEMU CPU pids.
+        :rtype: list of str
         """
         """
-        self._qemu_opt['disk_image'] = disk_image
+        command = f"grep -rwl 'CPU' /proc/$(sudo cat " \
+            f"{self._temp.get(u'pidfile')})/task/*/comm "
+        command += r"| xargs dirname | sed -e 's/\/.*\///g' | uniq"
+
+        stdout, _ = exec_cmd_no_error(self._node, command)
+        return stdout.splitlines()
 
     def qemu_set_affinity(self, *host_cpus):
         """Set qemu affinity by getting thread PIDs via QMP and taskset to list
 
     def qemu_set_affinity(self, *host_cpus):
         """Set qemu affinity by getting thread PIDs via QMP and taskset to list
-        of CPU cores.
+        of CPU cores. Function tries to execute 3 times to avoid race condition
+        in getting thread PIDs.
 
         :param host_cpus: List of CPU cores.
         :type host_cpus: list
         """
 
         :param host_cpus: List of CPU cores.
         :type host_cpus: list
         """
-        qemu_cpus = self._qemu_qmp_exec('query-cpus')['return']
-
-        if len(qemu_cpus) != len(host_cpus):
-            logger.debug('Host CPU count {0}, Qemu Thread count {1}'.format(
-                len(host_cpus), len(qemu_cpus)))
-            raise ValueError('Host CPU count must match Qemu Thread count')
-
-        for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):
-            cmd = 'taskset -pc {0} {1}'.format(host_cpu, qemu_cpu['thread_id'])
-            (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
-            if int(ret_code) != 0:
-                logger.debug('Set affinity failed {0}'.format(stderr))
-                raise RuntimeError('Set affinity failed on {0}'.format(
-                    self._node['host']))
+        for _ in range(3):
+            try:
+                qemu_cpus = self.get_qemu_pids()
+
+                if len(qemu_cpus) != len(host_cpus):
+                    sleep(1)
+                    continue
+                for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):
+                    command = f"taskset -pc {host_cpu} {qemu_cpu}"
+                    message = f"QEMU: Set affinity failed " \
+                        f"on {self._node[u'host']}!"
+                    exec_cmd_no_error(
+                        self._node, command, sudo=True, message=message
+                    )
+                break
+            except (RuntimeError, ValueError):
+                self.qemu_kill_all()
+                raise
+        else:
+            self.qemu_kill_all()
+            raise RuntimeError(u"Failed to set Qemu threads affinity!")
 
     def qemu_set_scheduler_policy(self):
         """Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU
 
     def qemu_set_scheduler_policy(self):
         """Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU
@@ -168,77 +528,18 @@ class QemuUtils(object):
 
         :raises RuntimeError: Set scheduler policy failed.
         """
 
         :raises RuntimeError: Set scheduler policy failed.
         """
-        qemu_cpus = self._qemu_qmp_exec('query-cpus')['return']
-
-        for qemu_cpu in qemu_cpus:
-            cmd = 'chrt -r -p 1 {0}'.format(qemu_cpu['thread_id'])
-            (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
-            if int(ret_code) != 0:
-                logger.debug('Set SCHED_RR failed {0}'.format(stderr))
-                raise RuntimeError('Set SCHED_RR failed on {0}'.format(
-                    self._node['host']))
-
-    def qemu_set_node(self, node):
-        """Set node to run QEMU on.
-
-        :param node: Node to run QEMU on.
-        :type node: dict
-        """
-        self._node = node
-        self._ssh = SSH()
-        self._ssh.connect(node)
-        self._vm_info['host'] = node['host']
-
-        arch = Topology.get_node_arch(node)
-        self._qemu_bin = 'qemu-system-{arch}'.format(arch=arch)
-
-    def qemu_add_vhost_user_if(self, socket, server=True, mac=None,
-                               jumbo_frames=False):
-        """Add Vhost-user interface.
+        try:
+            qemu_cpus = self.get_qemu_pids()
 
 
-        :param socket: Path of the unix socket.
-        :param server: If True the socket shall be a listening socket.
-        :param mac: Vhost-user interface MAC address (optional, otherwise is
-            used auto-generated MAC 52:54:00:00:xx:yy).
-        :param jumbo_frames: Set True if jumbo frames are used in the test.
-        :type socket: str
-        :type server: bool
-        :type mac: str
-        :type jumbo_frames: bool
-        """
-        self._vhost_id += 1
-        # Create unix socket character device.
-        chardev = ' -chardev socket,id=char{0},path={1}'.format(self._vhost_id,
-                                                                socket)
-        if server is True:
-            chardev += ',server'
-        self._qemu_opt['options'] += chardev
-        # Create Vhost-user network backend.
-        netdev = (' -netdev vhost-user,id=vhost{0},chardev=char{0},queues={1}'
-                  .format(self._vhost_id, self._qemu_opt['queues']))
-        self._qemu_opt['options'] += netdev
-        # If MAC is not specified use auto-generated MAC address based on
-        # template 52:54:00:00:<qemu_id>:<vhost_id>, e.g. vhost1 MAC of QEMU
-        #  with ID 1 is 52:54:00:00:01:01
-        if mac is None:
-            mac = '52:54:00:00:{0:02x}:{1:02x}'.\
-                format(self._qemu_id, self._vhost_id)
-        extend_options = 'mq=on,csum=off,gso=off,guest_tso4=off,'\
-            'guest_tso6=off,guest_ecn=off'
-        if jumbo_frames:
-            extend_options += ",mrg_rxbuf=on"
-        else:
-            extend_options += ",mrg_rxbuf=off"
-        # Create Virtio network device.
-        device = ' -device virtio-net-pci,netdev=vhost{0},mac={1},{2}'.format(
-            self._vhost_id, mac, extend_options)
-        self._qemu_opt['options'] += device
-        # Add interface MAC and socket to the node dict
-        if_data = {'mac_address': mac, 'socket': socket}
-        if_name = 'vhost{}'.format(self._vhost_id)
-        self._vm_info['interfaces'][if_name] = if_data
-        # Add socket to the socket list
-        self._socks.append(socket)
+            for qemu_cpu in qemu_cpus:
+                command = f"chrt -r -p 1 {qemu_cpu}"
+                message = f"QEMU: Set SCHED_RR failed on {self._node[u'host']}"
+                exec_cmd_no_error(
+                    self._node, command, sudo=True, message=message
+                )
+        except (RuntimeError, ValueError):
+            self.qemu_kill_all()
+            raise
 
     def _qemu_qmp_exec(self, cmd):
         """Execute QMP command.
 
     def _qemu_qmp_exec(self, cmd):
         """Execute QMP command.
@@ -252,39 +553,32 @@ class QemuUtils(object):
             response will contain the "error" keyword instead of "return".
         """
         # To enter command mode, the qmp_capabilities command must be issued.
             response will contain the "error" keyword instead of "return".
         """
         # To enter command mode, the qmp_capabilities command must be issued.
-        qmp_cmd = 'echo "{ \\"execute\\": \\"qmp_capabilities\\" }' \
-                  '{ \\"execute\\": \\"' + cmd + \
-                  '\\" }" | sudo -S socat - UNIX-CONNECT:' + self._qmp_sock
-
-        (ret_code, stdout, stderr) = self._ssh.exec_command(qmp_cmd)
-        if int(ret_code) != 0:
-            logger.debug('QMP execute failed {0}'.format(stderr))
-            raise RuntimeError('QMP execute "{0}"'
-                               ' failed on {1}'.format(cmd, self._node['host']))
-        logger.trace(stdout)
+        command = f"echo \"{{{{ \\\"execute\\\": " \
+            f"\\\"qmp_capabilities\\\" }}}}" \
+            f"{{{{ \\\"execute\\\": \\\"{cmd}\\\" }}}}\" | " \
+            f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qmp')}"
+        message = f"QMP execute '{cmd}' failed on {self._node[u'host']}"
+
+        stdout, _ = exec_cmd_no_error(
+            self._node, command, sudo=False, message=message
+        )
+
         # Skip capabilities negotiation messages.
         out_list = stdout.splitlines()
         if len(out_list) < 3:
         # Skip capabilities negotiation messages.
         out_list = stdout.splitlines()
         if len(out_list) < 3:
-            raise RuntimeError('Invalid QMP output on {0}'.format(
-                self._node['host']))
+            raise RuntimeError(f"Invalid QMP output on {self._node[u'host']}")
         return json.loads(out_list[2])
 
     def _qemu_qga_flush(self):
         return json.loads(out_list[2])
 
     def _qemu_qga_flush(self):
-        """Flush the QGA parser state
-        """
-        qga_cmd = '(printf "\xFF"; sleep 1) | sudo -S socat - UNIX-CONNECT:' + \
-                  self._qga_sock
-        #TODO: probably need something else
-        (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd)
-        if int(ret_code) != 0:
-            logger.debug('QGA execute failed {0}'.format(stderr))
-            raise RuntimeError('QGA execute "{0}" '
-                               'failed on {1}'.format(qga_cmd,
-                                                      self._node['host']))
-        logger.trace(stdout)
-        if not stdout:
-            return {}
-        return json.loads(stdout.split('\n', 1)[0])
+        """Flush the QGA parser state."""
+        command = f"(printf \"\xFF\"; sleep 1) | sudo -S socat " \
+            f"- UNIX-CONNECT:{self._temp.get(u'qga')}"
+        message = f"QGA flush failed on {self._node[u'host']}"
+        stdout, _ = exec_cmd_no_error(
+            self._node, command, sudo=False, message=message
+        )
+
+        return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict()
 
     def _qemu_qga_exec(self, cmd):
         """Execute QGA command.
 
     def _qemu_qga_exec(self, cmd):
         """Execute QGA command.
@@ -294,417 +588,196 @@ class QemuUtils(object):
         :param cmd: QGA command to execute.
         :type cmd: str
         """
         :param cmd: QGA command to execute.
         :type cmd: str
         """
-        qga_cmd = '(echo "{ \\"execute\\": \\"' + \
-                  cmd + \
-                  '\\" }"; sleep 1) | sudo -S socat - UNIX-CONNECT:' + \
-                  self._qga_sock
-        (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd)
-        if int(ret_code) != 0:
-            logger.debug('QGA execute failed {0}'.format(stderr))
-            raise RuntimeError('QGA execute "{0}"'
-                               ' failed on {1}'.format(cmd, self._node['host']))
-        logger.trace(stdout)
-        if not stdout:
-            return {}
-        return json.loads(stdout.split('\n', 1)[0])
-
-    def _wait_until_vm_boot(self, timeout=60):
-        """Wait until QEMU VM is booted.
+        command = f"(echo \"{{{{ \\\"execute\\\": " \
+            f"\\\"{cmd}\\\" }}}}\"; sleep 1) | " \
+            f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qga')}"
+        message = f"QGA execute '{cmd}' failed on {self._node[u'host']}"
+        stdout, _ = exec_cmd_no_error(
+            self._node, command, sudo=False, message=message
+        )
+
+        return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict()
+
+    def _wait_until_vm_boot(self):
+        """Wait until QEMU VM is booted."""
+        try:
+            getattr(self, f'_wait_{self._opt["vnf"]}')()
+        except AttributeError:
+            self._wait_default()
+
+    def _wait_default(self, retries=60):
+        """Wait until QEMU with VPP is booted.
+
+        :param retries: Number of retries.
+        :type retries: int
+        """
+        for _ in range(retries):
+            command = f"tail -1 {self._temp.get(u'log')}"
+            stdout = None
+            try:
+                stdout, _ = exec_cmd_no_error(self._node, command, sudo=True)
+                sleep(1)
+            except RuntimeError:
+                pass
+            if "vpp " in stdout and "built by" in stdout:
+                break
+            if u"Press enter to exit" in stdout:
+                break
+            if u"reboot: Power down" in stdout:
+                raise RuntimeError(
+                    f"QEMU: NF failed to run on {self._node[u'host']}!"
+                )
+        else:
+            raise RuntimeError(
+                f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+            )
+
+    def _wait_nestedvm(self, retries=12):
+        """Wait until QEMU with NestedVM is booted.
 
         First try to flush qga until there is output.
         Then ping QEMU guest agent each 5s until VM booted or timeout.
 
 
         First try to flush qga until there is output.
         Then ping QEMU guest agent each 5s until VM booted or timeout.
 
-        :param timeout: Waiting timeout in seconds (optional, default 60s).
-        :type timeout: int
+        :param retries: Number of retries with 5s between trials.
+        :type retries: int
         """
         """
-        start = time()
-        while True:
-            if time() - start > timeout:
-                raise RuntimeError('timeout, VM {0} not booted on {1}'.format(
-                    self._qemu_opt['disk_image'], self._node['host']))
+        for _ in range(retries):
             out = None
             try:
                 out = self._qemu_qga_flush()
             except ValueError:
             out = None
             try:
                 out = self._qemu_qga_flush()
             except ValueError:
-                logger.trace('QGA qga flush unexpected output {}'.format(out))
+                logger.trace(f"QGA qga flush unexpected output {out}")
             # Empty output - VM not booted yet
             if not out:
                 sleep(5)
             else:
                 break
             # Empty output - VM not booted yet
             if not out:
                 sleep(5)
             else:
                 break
-        while True:
-            if time() - start > timeout:
-                raise RuntimeError('timeout, VM {0} not booted on {1}'.format(
-                    self._qemu_opt['disk_image'], self._node['host']))
+        else:
+            raise RuntimeError(
+                f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+            )
+        for _ in range(retries):
             out = None
             try:
             out = None
             try:
-                out = self._qemu_qga_exec('guest-ping')
+                out = self._qemu_qga_exec(u"guest-ping")
             except ValueError:
             except ValueError:
-                logger.trace('QGA guest-ping unexpected output {}'.format(out))
-            # Empty output - VM not booted yet
+                logger.trace(f"QGA guest-ping unexpected output {out}")
+            # Empty output - VM not booted yet.
             if not out:
                 sleep(5)
             if not out:
                 sleep(5)
-            # Non-error return - VM booted
-            elif out.get('return') is not None:
+            # Non-error return - VM booted.
+            elif out.get(u"return") is not None:
                 break
                 break
-            # Skip error and wait
-            elif out.get('error') is not None:
+            # Skip error and wait.
+            elif out.get(u"error") is not None:
                 sleep(5)
             else:
                 # If there is an unexpected output from QGA guest-info, try
                 # again until timeout.
                 sleep(5)
             else:
                 # If there is an unexpected output from QGA guest-info, try
                 # again until timeout.
-                logger.trace('QGA guest-ping unexpected output {}'.format(out))
+                logger.trace(f"QGA guest-ping unexpected output {out}")
+        else:
+            raise RuntimeError(
+                f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+            )
+
+    def _wait_iperf3(self, retries=60):
+        """Wait until QEMU with iPerf3 is booted.
 
 
-        logger.trace('VM {0} booted on {1}'.format(self._qemu_opt['disk_image'],
-                                                   self._node['host']))
+        :param retries: Number of retries.
+        :type retries: int
+        """
+        grep = u"Server listening on 0.0.0.0 port 22."
+        cmd = f"fgrep '{grep}' {self._temp.get(u'log')}"
+        message = f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+        exec_cmd_no_error(
+            self._node, cmd=cmd, sudo=True, message=message, retries=retries,
+            include_reason=True
+        )
 
     def _update_vm_interfaces(self):
         """Update interface names in VM node dict."""
         # Send guest-network-get-interfaces command via QGA, output example:
         # {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"},
 
     def _update_vm_interfaces(self):
         """Update interface names in VM node dict."""
         # Send guest-network-get-interfaces command via QGA, output example:
         # {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"},
-        # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]}
-        out = self._qemu_qga_exec('guest-network-get-interfaces')
-        interfaces = out.get('return')
+        # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]}.
+        out = self._qemu_qga_exec(u"guest-network-get-interfaces")
+        interfaces = out.get(u"return")
         mac_name = {}
         if not interfaces:
         mac_name = {}
         if not interfaces:
-            raise RuntimeError('Get VM {0} interface list failed on {1}'.format(
-                self._qemu_opt['disk_image'], self._node['host']))
-        # Create MAC-name dict
+            raise RuntimeError(
+                f"Get VM interface list failed on {self._node[u'host']}"
+            )
+        # Create MAC-name dict.
         for interface in interfaces:
         for interface in interfaces:
-            if 'hardware-address' not in interface:
+            if u"hardware-address" not in interface:
                 continue
                 continue
-            mac_name[interface['hardware-address']] = interface['name']
-        # Match interface by MAC and save interface name
-        for interface in self._vm_info['interfaces'].values():
-            mac = interface.get('mac_address')
+            mac_name[interface[u"hardware-address"]] = interface[u"name"]
+        # Match interface by MAC and save interface name.
+        for interface in self._vm_info[u"interfaces"].values():
+            mac = interface.get(u"mac_address")
             if_name = mac_name.get(mac)
             if if_name is None:
             if_name = mac_name.get(mac)
             if if_name is None:
-                logger.trace('Interface name for MAC {} not found'.format(mac))
-            else:
-                interface['name'] = if_name
-
-    def _huge_page_check(self, allocate=False):
-        """Huge page check."""
-        huge_mnt = self._qemu_opt.get('huge_mnt')
-        mem_size = self._qemu_opt.get('mem_size')
-
-        # Get huge pages information
-        huge_size = self._get_huge_page_size()
-        huge_free = self._get_huge_page_free(huge_size)
-        huge_total = self._get_huge_page_total(huge_size)
-
-        # Check if memory reqested by qemu is available on host
-        if (mem_size * 1024) > (huge_free * huge_size):
-            # If we want to allocate hugepage dynamically
-            if allocate:
-                mem_needed = abs((huge_free * huge_size) - (mem_size * 1024))
-                huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
-                max_map_count = huge_to_allocate*4
-                # Increase maximum number of memory map areas a process may have
-                cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format(
-                    max_map_count)
-                (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
-                # Increase hugepage count
-                cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format(
-                    huge_to_allocate)
-                (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
-                if int(ret_code) != 0:
-                    logger.debug('Mount huge pages failed {0}'.format(stderr))
-                    raise RuntimeError('Mount huge pages failed on {0}'.format(
-                        self._node['host']))
-            # If we do not want to allocate dynamicaly end with error
+                logger.trace(f"Interface name for MAC {mac} not found")
             else:
             else:
-                raise RuntimeError(
-                    'Not enough free huge pages: {0}, '
-                    '{1} MB'.format(huge_free, huge_free * huge_size)
-                )
-        # Check if huge pages mount point exist
-        has_huge_mnt = False
-        (_, output, _) = self._ssh.exec_command('cat /proc/mounts')
-        for line in output.splitlines():
-            # Try to find something like:
-            # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
-            mount = line.split()
-            if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
-                has_huge_mnt = True
-                break
-        # If huge page mount point not exist create one
-        if not has_huge_mnt:
-            cmd = 'mkdir -p {0}'.format(huge_mnt)
-            (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
-            if int(ret_code) != 0:
-                logger.debug('Create mount dir failed: {0}'.format(stderr))
-                raise RuntimeError('Create mount dir failed on {0}'.format(
-                    self._node['host']))
-            cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format(
-                huge_mnt)
-            (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
-            if int(ret_code) != 0:
-                logger.debug('Mount huge pages failed {0}'.format(stderr))
-                raise RuntimeError('Mount huge pages failed on {0}'.format(
-                    self._node['host']))
-
-    def _get_huge_page_size(self):
-        """Get default size of huge pages in system.
-
-        :returns: Default size of free huge pages in system.
-        :rtype: int
-        :raises RuntimeError: If reading failed for three times.
-        """
-        # TODO: remove to dedicated library
-        cmd_huge_size = "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'"
-        for _ in range(3):
-            (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_size)
-            if ret == 0:
-                try:
-                    huge_size = int(out)
-                except ValueError:
-                    logger.trace('Reading huge page size information failed')
-                else:
-                    break
-        else:
-            raise RuntimeError('Getting huge page size information failed.')
-        return huge_size
-
-    def _get_huge_page_free(self, huge_size):
-        """Get total number of huge pages in system.
-
-        :param huge_size: Size of hugepages.
-        :type huge_size: int
-        :returns: Number of free huge pages in system.
-        :rtype: int
-        :raises RuntimeError: If reading failed for three times.
-        """
-        # TODO: add numa aware option
-        # TODO: remove to dedicated library
-        cmd_huge_free = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\
-            'free_hugepages'.format(huge_size)
-        for _ in range(3):
-            (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_free)
-            if ret == 0:
-                try:
-                    huge_free = int(out)
-                except ValueError:
-                    logger.trace('Reading free huge pages information failed')
-                else:
-                    break
-        else:
-            raise RuntimeError('Getting free huge pages information failed.')
-        return huge_free
-
-    def _get_huge_page_total(self, huge_size):
-        """Get total number of huge pages in system.
-
-        :param huge_size: Size of hugepages.
-        :type huge_size: int
-        :returns: Total number of huge pages in system.
-        :rtype: int
-        :raises RuntimeError: If reading failed for three times.
-        """
-        # TODO: add numa aware option
-        # TODO: remove to dedicated library
-        cmd_huge_total = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\
-            'nr_hugepages'.format(huge_size)
-        for _ in range(3):
-            (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_total)
-            if ret == 0:
-                try:
-                    huge_total = int(out)
-                except ValueError:
-                    logger.trace('Reading total huge pages information failed')
-                else:
-                    break
-        else:
-            raise RuntimeError('Getting total huge pages information failed.')
-        return huge_total
+                interface[u"name"] = if_name
 
     def qemu_start(self):
         """Start QEMU and wait until VM boot.
 
 
     def qemu_start(self):
         """Start QEMU and wait until VM boot.
 
-        .. note:: First set at least node to run QEMU on.
-        .. warning:: Starts only one VM on the node.
-
         :returns: VM node info.
         :rtype: dict
         """
         :returns: VM node info.
         :rtype: dict
         """
-        # Qemu binary path
-        bin_path = '{0}{1}'.format(self._qemu_path, self._qemu_bin)
-
-        # SSH forwarding
-        ssh_fwd = '-net user,hostfwd=tcp::{0}-:22'.format(
-            self._qemu_opt.get('ssh_fwd_port'))
-        # Memory and huge pages
-        mem = '-object memory-backend-file,id=mem,size={0}M,mem-path={1},' \
-            'share=on -m {0} -numa node,memdev=mem'.format(
-                self._qemu_opt.get('mem_size'), self._qemu_opt.get('huge_mnt'))
-
-        # By default check only if hugepages are available.
-        # If 'huge_allocate' is set to true try to allocate as well.
-        self._huge_page_check(allocate=self._qemu_opt.get('huge_allocate'))
-
-        # Disk option
-        drive = '-drive file={0},format=raw,cache=none,if=virtio'.format(
-            self._qemu_opt.get('disk_image'))
-        # Setup QMP via unix socket
-        qmp = '-qmp unix:{0},server,nowait'.format(self._qmp_sock)
-        # Setup serial console
-        serial = '-chardev socket,host=127.0.0.1,port={0},id=gnc0,server,' \
-            'nowait -device isa-serial,chardev=gnc0'.format(
-                self._qemu_opt.get('serial_port'))
-        # Setup QGA via chardev (unix socket) and isa-serial channel
-        qga = '-chardev socket,path={0},server,nowait,id=qga0 ' \
-            '-device isa-serial,chardev=qga0'.format(self._qga_sock)
-        # Graphic setup
-        graphic = '-monitor none -display none -vga none'
-        # PID file
-        pid = '-pidfile {}'.format(self._pid_file)
-
-        # Run QEMU
-        cmd = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'.format(
-            bin_path, self._qemu_opt.get('smp'), mem, ssh_fwd,
-            self._qemu_opt.get('options'), drive, qmp, serial, qga, graphic,
-            pid)
+        cmd_opts = OptionString()
+        cmd_opts.add(f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch}")
+        cmd_opts.extend(self._params)
+        message = f"QEMU: Start failed on {self._node[u'host']}!"
         try:
         try:
-            (ret_code, _, _) = self._ssh.exec_command_sudo(cmd, timeout=300)
-            if int(ret_code) != 0:
-                raise RuntimeError('QEMU start failed on {0}'.format(
-                    self._node['host']))
-            # Wait until VM boot
+            DUTSetup.check_huge_page(
+                self._node, self._opt.get(u"mem-path"),
+                int(self._opt.get(u"mem"))
+            )
+
+            exec_cmd_no_error(
+                self._node, cmd_opts, timeout=300, sudo=True, message=message
+            )
             self._wait_until_vm_boot()
             self._wait_until_vm_boot()
-        except (RuntimeError, SSHTimeout):
+        except RuntimeError:
             self.qemu_kill_all()
             self.qemu_kill_all()
-            self.qemu_clear_socks()
             raise
             raise
-        logger.trace('QEMU started successfully.')
-        # Update interface names in VM node dict
-        self._update_vm_interfaces()
-        # Return VM node dict
         return self._vm_info
 
         return self._vm_info
 
-    def qemu_quit(self):
-        """Quit the QEMU emulator."""
-        out = self._qemu_qmp_exec('quit')
-        err = out.get('error')
-        if err is not None:
-            raise RuntimeError('QEMU quit failed on {0}, error: {1}'.format(
-                self._node['host'], json.dumps(err)))
-
-    def qemu_system_powerdown(self):
-        """Power down the system (if supported)."""
-        out = self._qemu_qmp_exec('system_powerdown')
-        err = out.get('error')
-        if err is not None:
-            raise RuntimeError(
-                'QEMU system powerdown failed on {0}, '
-                'error: {1}'.format(self._node['host'], json.dumps(err))
-            )
-
-    def qemu_system_reset(self):
-        """Reset the system."""
-        out = self._qemu_qmp_exec('system_reset')
-        err = out.get('error')
-        if err is not None:
-            raise RuntimeError(
-                'QEMU system reset failed on {0}, '
-                'error: {1}'.format(self._node['host'], json.dumps(err)))
-
     def qemu_kill(self):
         """Kill qemu process."""
     def qemu_kill(self):
         """Kill qemu process."""
-        # Note: in QEMU start phase there are 3 QEMU processes because we
-        # daemonize QEMU
-        self._ssh.exec_command_sudo('chmod +r {}'.format(self._pid_file))
-        self._ssh.exec_command_sudo('kill -SIGKILL $(cat {})'
-                                    .format(self._pid_file))
-        # Delete PID file
-        cmd = 'rm -f {}'.format(self._pid_file)
-        self._ssh.exec_command_sudo(cmd)
-
-    def qemu_kill_all(self, node=None):
-        """Kill all qemu processes on DUT node if specified.
-
-        :param node: Node to kill all QEMU processes on.
-        :type node: dict
-        """
-        if node:
-            self.qemu_set_node(node)
-        self._ssh.exec_command_sudo('pkill -SIGKILL qemu')
-
-    def qemu_clear_socks(self):
-        """Remove all sockets created by QEMU."""
-        # If serial console port still open kill process
-        cmd = 'fuser -k {}/tcp'.format(self._qemu_opt.get('serial_port'))
-        self._ssh.exec_command_sudo(cmd)
-        # Delete all created sockets
-        for sock in self._socks:
-            cmd = 'rm -f {}'.format(sock)
-            self._ssh.exec_command_sudo(cmd)
-
-    def qemu_system_status(self):
-        """Return current VM status.
-
-        VM should be in following status:
-
-            - debug: QEMU running on a debugger
-            - finish-migrate: paused to finish the migration process
-            - inmigrate: waiting for an incoming migration
-            - internal-error: internal error has occurred
-            - io-error: the last IOP has failed
-            - paused: paused
-            - postmigrate: paused following a successful migrate
-            - prelaunch: QEMU was started with -S and guest has not started
-            - restore-vm: paused to restore VM state
-            - running: actively running
-            - save-vm: paused to save the VM state
-            - shutdown: shut down (and -no-shutdown is in use)
-            - suspended: suspended (ACPI S3)
-            - watchdog: watchdog action has been triggered
-            - guest-panicked: panicked as a result of guest OS panic
-
-        :returns: VM status.
+        exec_cmd(
+            self._node, f"chmod +r {self._temp.get(u'pidfile')}", sudo=True
+        )
+        exec_cmd(
+            self._node, f"kill -SIGKILL $(cat {self._temp.get(u'pidfile')})",
+            sudo=True
+        )
+
+        for value in self._temp.values():
+            exec_cmd(self._node, f"cat {value}", sudo=True)
+            exec_cmd(self._node, f"rm -f {value}", sudo=True)
+
+    def qemu_kill_all(self):
+        """Kill all qemu processes on DUT node if specified."""
+        exec_cmd(self._node, u"pkill -SIGKILL qemu", sudo=True)
+
+        for value in self._temp.values():
+            exec_cmd(self._node, f"cat {value}", sudo=True)
+            exec_cmd(self._node, f"rm -f {value}", sudo=True)
+
+    def qemu_version(self):
+        """Return Qemu version.
+
+        :returns: Qemu version.
         :rtype: str
         """
         :rtype: str
         """
-        out = self._qemu_qmp_exec('query-status')
-        ret = out.get('return')
-        if ret is not None:
-            return ret.get('status')
-        else:
-            err = out.get('error')
-            raise RuntimeError(
-                'QEMU query-status failed on {0}, '
-                'error: {1}'.format(self._node['host'], json.dumps(err)))
-
-    @staticmethod
-    def build_qemu(node, force_install=False, apply_patch=False):
-        """Build QEMU from sources.
-
-        :param node: Node to build QEMU on.
-        :param force_install: If True, then remove previous build.
-        :param apply_patch: If True, then apply patches from qemu_patches dir.
-        :type node: dict
-        :type force_install: bool
-        :type apply_patch: bool
-        :raises RuntimeError: If building QEMU failed.
-        """
-        ssh = SSH()
-        ssh.connect(node)
-
-        directory = ' --directory={0}'.format(Constants.QEMU_INSTALL_DIR)
-        if apply_patch:
-            directory += '-patch'
-        else:
-            directory += '-base'
-        version = ' --version={0}'.format(Constants.QEMU_INSTALL_VERSION)
-        force = ' --force' if force_install else ''
-        patch = ' --patch' if apply_patch else ''
-        arch = Topology.get_node_arch(node)
-        target_list = ' --target-list={0}-softmmu'.format(arch)
-
-        (ret_code, stdout, stderr) = \
-            ssh.exec_command(
-                "sudo -E sh -c '{0}/{1}/qemu_build.sh{2}{3}{4}{5}{6}'"\
-                .format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH,
-                        version, directory, force, patch, target_list), 1000)
-
-        if int(ret_code) != 0:
-            logger.debug('QEMU build failed {0}'.format(stdout + stderr))
-            raise RuntimeError('QEMU build failed on {0}'.format(node['host']))
+        command = f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch} " \
+            f"--version"
+        try:
+            stdout, _ = exec_cmd_no_error(self._node, command, sudo=True)
+            return match(r"QEMU emulator version ([\d.]*)", stdout).group(1)
+        except RuntimeError:
+            self.qemu_kill_all()
+            raise