-# Copyright (c) 2016 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""QEMU utilities library."""
-from time import time, sleep
import json
-import re
+
+from re import match
+from string import Template
+from time import sleep
from robot.api import logger
-from resources.libraries.python.ssh import SSH
-from resources.libraries.python.constants import Constants
-from resources.libraries.python.topology import NodeType
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.DpdkUtil import DpdkUtil
+from resources.libraries.python.DUTSetup import DUTSetup
+from resources.libraries.python.OptionString import OptionString
+from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
+from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.VhostUser import VirtioFeaturesFlags
+from resources.libraries.python.VhostUser import VirtioFeatureMask
+from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
+
+__all__ = [u"QemuUtils"]
-class QemuUtils(object):
+class QemuUtils:
"""QEMU utilities."""
- __QEMU_BIN = '/opt/qemu/bin/qemu-system-x86_64'
- # QEMU Machine Protocol socket
- __QMP_SOCK = '/tmp/qmp.sock'
- # QEMU Guest Agent socket
- __QGA_SOCK = '/tmp/qga.sock'
-
- def __init__(self):
- self._qemu_opt = {}
- # Default 1 CPU.
- self._qemu_opt['smp'] = '-smp 1,sockets=1,cores=1,threads=1'
- # Daemonize the QEMU process after initialization. Default one
- # management interface.
- self._qemu_opt['options'] = '-daemonize -enable-kvm ' \
- '-machine pc-1.0,accel=kvm,usb=off,mem-merge=off ' \
- '-net nic,macaddr=52:54:00:00:02:01'
- self._qemu_opt['ssh_fwd_port'] = 10022
- # Default serial console port
- self._qemu_opt['serial_port'] = 4556
- # Default 512MB virtual RAM
- self._qemu_opt['mem_size'] = 512
- # Default huge page mount point, required for Vhost-user interfaces.
- self._qemu_opt['huge_mnt'] = '/mnt/huge'
- # Default image for CSIT virl setup
- self._qemu_opt['disk_image'] = '/var/lib/vm/vhost-nested.img'
- # VM node info dict
+ # Use one instance of class per tests.
+ ROBOT_LIBRARY_SCOPE = u"TEST CASE"
+
+ def __init__(
+ self, node, qemu_id=1, smp=1, mem=512, vnf=None,
+ img=Constants.QEMU_VM_IMAGE, page_size=u""):
+ """Initialize QemuUtil class.
+
+ :param node: Node to run QEMU on.
+ :param qemu_id: QEMU identifier.
+ :param smp: Number of virtual SMP units (cores).
+ :param mem: Amount of memory.
+ :param vnf: Network function workload.
+ :param img: QEMU disk image or kernel image path.
+ :param page_size: Hugepage Size.
+ :type node: dict
+ :type qemu_id: int
+ :type smp: int
+ :type mem: int
+ :type vnf: str
+ :type img: str
+ :type page_size: str
+ """
+ self._nic_id = 0
+ self._node = node
+ self._arch = Topology.get_node_arch(self._node)
+ self._opt = dict()
+
+ # Architecture specific options
+ if self._arch == u"aarch64":
+ self._opt[u"machine_args"] = \
+ u"virt,accel=kvm,usb=off,mem-merge=off,gic-version=3"
+ self._opt[u"console"] = u"ttyAMA0"
+ else:
+ self._opt[u"machine_args"] = u"pc,accel=kvm,usb=off,mem-merge=off"
+ self._opt[u"console"] = u"ttyS0"
+ self._testpmd_path = f"{Constants.QEMU_VM_DPDK}/build/app"
self._vm_info = {
- 'type': NodeType.VM,
- 'port': 10022,
- 'username': 'cisco',
- 'password': 'cisco',
- 'interfaces': {},
+ u"host": node[u"host"],
+ u"type": NodeType.VM,
+ u"port": 10021 + qemu_id,
+ u"serial": 4555 + qemu_id,
+ u"username": 'testuser',
+ u"password": 'Csit1234',
+ u"interfaces": {},
}
- self._vhost_id = 0
- self._ssh = None
- self._node = None
- self._socks = [self.__QMP_SOCK, self.__QGA_SOCK]
-
- def qemu_set_smp(self, cpus, cores, threads, sockets):
- """Set SMP option for QEMU
-
- :param cpus: Number of CPUs.
- :param cores: Number of CPU cores on one socket.
- :param threads: Number of threads on one CPU core.
- :param sockets: Number of discrete sockets in the system.
- :type cpus: int
- :type cores: int
- :type threads: int
- :type sockets: int
+ if node[u"port"] != 22:
+ self._vm_info[u"host_port"] = node[u"port"]
+ self._vm_info[u"host_username"] = node[u"username"]
+ self._vm_info[u"host_password"] = node[u"password"]
+ # Input Options.
+ self._opt[u"qemu_id"] = qemu_id
+ self._opt[u"mem"] = int(mem)
+ self._opt[u"smp"] = int(smp)
+ self._opt[u"img"] = img
+ self._opt[u"vnf"] = vnf
+ self._opt[u"page_size"] = page_size
+
+ # Temporary files.
+ self._temp = dict()
+ self._temp[u"log"] = f"/tmp/serial_{qemu_id}.log"
+ self._temp[u"pidfile"] = f"/run/qemu_{qemu_id}.pid"
+ if img == Constants.QEMU_VM_IMAGE:
+ self._temp[u"qmp"] = f"/run/qmp_{qemu_id}.sock"
+ self._temp[u"qga"] = f"/run/qga_{qemu_id}.sock"
+ elif img == Constants.QEMU_VM_KERNEL:
+ self._opt[u"img"], _ = exec_cmd_no_error(
+ node, f"ls -1 {Constants.QEMU_VM_KERNEL}* | tail -1",
+ message=u"Qemu Kernel VM image not found!"
+ )
+ self._temp[u"ini"] = f"/etc/vm_init_{qemu_id}.conf"
+ self._opt[u"initrd"], _ = exec_cmd_no_error(
+ node, f"ls -1 {Constants.QEMU_VM_KERNEL_INITRD}* | tail -1",
+ message=u"Qemu Kernel initrd image not found!"
+ )
+ else:
+ raise RuntimeError(f"QEMU: Unknown VM image option: {img}")
+ # Computed parameters for QEMU command line.
+ self._params = OptionString(prefix=u"-")
+
+ def add_default_params(self):
+ """Set default QEMU command line parameters."""
+ mem_path = f"/dev/hugepages1G" \
+ if self._opt[u"page_size"] == u"1G" else u"/dev/hugepages"
+
+ self._params.add(u"daemonize")
+ self._params.add(u"nodefaults")
+ self._params.add_with_value(
+ u"name", f"vnf{self._opt.get(u'qemu_id')},debug-threads=on"
+ )
+ self._params.add(u"no-user-config")
+ self._params.add(u"nographic")
+ self._params.add(u"enable-kvm")
+ self._params.add_with_value(u"pidfile", self._temp.get(u"pidfile"))
+ self._params.add_with_value(u"cpu", u"host")
+
+ self._params.add_with_value(u"machine", self._opt.get(u"machine_args"))
+ self._params.add_with_value(
+ u"smp", f"{self._opt.get(u'smp')},sockets=1,"
+ f"cores={self._opt.get(u'smp')},threads=1"
+ )
+ self._params.add_with_value(
+ u"object", f"memory-backend-file,id=mem,"
+ f"size={self._opt.get(u'mem')}M,"
+ f"mem-path={mem_path},share=on"
+ )
+ self._params.add_with_value(u"m", f"{self._opt.get(u'mem')}M")
+ self._params.add_with_value(u"numa", u"node,memdev=mem")
+
+ def add_net_user(self, net="10.0.2.0/24"):
+ """Set managment port forwarding."""
+ self._params.add_with_value(
+ u"netdev", f"user,id=mgmt,net={net},"
+ f"hostfwd=tcp::{self._vm_info[u'port']}-:22"
+ )
+ self._params.add_with_value(
+ u"device", f"virtio-net,netdev=mgmt"
+ )
+
+ def add_qmp_qga(self):
+ """Set QMP, QGA management."""
+ self._params.add_with_value(
+ u"chardev", f"socket,path={self._temp.get(u'qga')},"
+ f"server,nowait,id=qga0"
+ )
+ self._params.add_with_value(
+ u"device", u"isa-serial,chardev=qga0"
+ )
+ self._params.add_with_value(
+ u"qmp", f"unix:{self._temp.get(u'qmp')},server,nowait"
+ )
+
+ def add_serial(self):
+ """Set serial to file redirect."""
+ self._params.add_with_value(
+ u"chardev", f"socket,host=127.0.0.1,"
+ f"port={self._vm_info[u'serial']},id=gnc0,server,nowait"
+ )
+ self._params.add_with_value(
+ u"device", u"isa-serial,chardev=gnc0"
+ )
+ self._params.add_with_value(
+ u"serial", f"file:{self._temp.get(u'log')}"
+ )
+
+ def add_drive_cdrom(self, drive_file, index=None):
+ """Set CD-ROM drive.
+
+ :param drive_file: Path to drive image.
+ :param index: Drive index.
+ :type drive_file: str
+ :type index: int
"""
- self._qemu_opt['smp'] = '-smp {},cores={},threads={},sockets={}'.format(
- cpus, cores, threads, sockets)
-
- def qemu_set_ssh_fwd_port(self, fwd_port):
- """Set host port for guest SSH forwarding.
-
- :param fwd_port: Port number on host for guest SSH forwarding.
- :type fwd_port: int
+ index = f"index={index}," if index else u""
+ self._params.add_with_value(
+ u"drive", f"file={drive_file},{index}media=cdrom"
+ )
+
+ def add_drive(self, drive_file, drive_format):
+ """Set drive with custom format.
+
+ :param drive_file: Path to drive image.
+ :param drive_format: Drive image format.
+ :type drive_file: str
+ :type drive_format: str
"""
- self._qemu_opt['ssh_fwd_port'] = fwd_port
- self._vm_info['port'] = fwd_port
-
- def qemu_set_serial_port(self, port):
- """Set serial console port.
+ self._params.add_with_value(
+ u"drive", f"file={drive_file},format={drive_format},"
+ u"cache=none,if=virtio,file.locking=off"
+ )
+
+ def add_kernelvm_params(self):
+ """Set KernelVM QEMU parameters."""
+ hugepages = 3 if self._opt[u"page_size"] == u"1G" else 512
+
+ self._params.add_with_value(
+ u"serial", f"file:{self._temp.get(u'log')}"
+ )
+ self._params.add_with_value(
+ u"fsdev", u"local,id=root9p,path=/,security_model=none"
+ )
+ self._params.add_with_value(
+ u"device", u"virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot"
+ )
+ self._params.add_with_value(
+ u"kernel", f"{self._opt.get(u'img')}"
+ )
+ self._params.add_with_value(
+ u"initrd", f"{self._opt.get(u'initrd')}"
+ )
+ self._params.add_with_value(
+ u"append", f"'ro rootfstype=9p rootflags=trans=virtio "
+ f"root=virtioroot console={self._opt.get(u'console')} "
+ f"tsc=reliable hugepages={hugepages} "
+ f"hugepagesz={self._opt.get(u'page_size')} "
+ f"init={self._temp.get(u'ini')} fastboot'"
+ )
+
+ def add_vhost_user_if(
+ self, socket, server=True, jumbo_frames=False, queue_size=None,
+ queues=1, virtio_feature_mask=None):
+ """Add Vhost-user interface.
- :param port: Serial console port.
- :type port: int
+ :param socket: Path of the unix socket.
+ :param server: If True the socket shall be a listening socket.
+ :param jumbo_frames: Set True if jumbo frames are used in the test.
+ :param queue_size: Vring queue size.
+ :param queues: Number of queues.
+ :param virtio_feature_mask: Mask of virtio features to be enabled.
+ :type socket: str
+ :type server: bool
+ :type jumbo_frames: bool
+ :type queue_size: int
+ :type queues: int
+ :type virtio_feature_mask: int
"""
- self._qemu_opt['serial_port'] = port
-
- def qemu_set_mem_size(self, mem_size):
- """Set virtual RAM size.
-
- :param mem_size: RAM size in Mega Bytes.
- :type mem_size: int
+ self._nic_id += 1
+ if jumbo_frames:
+ logger.debug(u"Jumbo frames temporarily disabled!")
+ self._params.add_with_value(
+ u"chardev", f"socket,id=char{self._nic_id},"
+ f"path={socket}{u',server' if server is True else u''}"
+ )
+ self._params.add_with_value(
+ u"netdev", f"vhost-user,id=vhost{self._nic_id},"
+ f"chardev=char{self._nic_id},queues={queues}"
+ )
+ mac = f"52:54:00:00:{self._opt.get(u'qemu_id'):02x}:" \
+ f"{self._nic_id:02x}"
+ queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \
+ if queue_size else u""
+ gso = VirtioFeatureMask.is_feature_enabled(
+ virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_GSO)
+ csum = VirtioFeatureMask.is_feature_enabled(
+ virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_CSUM)
+
+ self._params.add_with_value(
+ u"device", f"virtio-net-pci,netdev=vhost{self._nic_id},mac={mac},"
+ f"addr={self._nic_id+5}.0,mq=on,vectors={2 * queues + 2},"
+ f"csum={u'on' if csum else u'off'},"
+ f"gso={u'on' if gso else u'off'},"
+ f"guest_tso4={u'on' if gso else u'off'},"
+ f"guest_tso6={u'on' if gso else u'off'},"
+ f"guest_ecn={u'on' if gso else u'off'},"
+ f"{queue_size}"
+ )
+
+ # Add interface MAC and socket to the node dict.
+ if_data = {u"mac_address": mac, u"socket": socket}
+ if_name = f"vhost{self._nic_id}"
+ self._vm_info[u"interfaces"][if_name] = if_data
+ # Add socket to temporary file list.
+ self._temp[if_name] = socket
+
+ def add_vfio_pci_if(self, pci):
+ """Add VFIO PCI interface.
+
+ :param pci: PCI address of interface.
+ :type pci: str
"""
- self._qemu_opt['mem_size'] = mem_size
+ self._nic_id += 1
+ self._params.add_with_value(
+ u"device", f"vfio-pci,host={pci},addr={self._nic_id+5}.0"
+ )
- def qemu_set_huge_mnt(self, huge_mnt):
- """Set hugefile mount point.
+ def create_kernelvm_config_vpp(self, **kwargs):
+ """Create QEMU VPP config files.
- :param huge_mnt: System hugefile mount point.
- :type huge_mnt: int
+ :param kwargs: Key-value pairs to replace content of VPP configuration
+ file.
+ :type kwargs: dict
+ """
+ startup = f"/etc/vpp/vm_startup_{self._opt.get(u'qemu_id')}.conf"
+ running = f"/etc/vpp/vm_running_{self._opt.get(u'qemu_id')}.exec"
+
+ self._temp[u"startup"] = startup
+ self._temp[u"running"] = running
+ self._opt[u"vnf_bin"] = f"/usr/bin/vpp -c {startup}"
+
+ # Create VPP startup configuration.
+ vpp_config = VppConfigGenerator()
+ vpp_config.set_node(self._node)
+ vpp_config.add_unix_nodaemon()
+ vpp_config.add_unix_cli_listen()
+ vpp_config.add_unix_exec(running)
+ vpp_config.add_socksvr()
+ vpp_config.add_main_heap_size(u"512M")
+ vpp_config.add_main_heap_page_size(self._opt[u"page_size"])
+ vpp_config.add_default_hugepage_size(self._opt[u"page_size"])
+ vpp_config.add_statseg_size(u"512M")
+ vpp_config.add_statseg_page_size(self._opt[u"page_size"])
+ vpp_config.add_statseg_per_node_counters(u"on")
+ vpp_config.add_buffers_per_numa(107520)
+ vpp_config.add_cpu_main_core(u"0")
+ if self._opt.get(u"smp") > 1:
+ vpp_config.add_cpu_corelist_workers(f"1-{self._opt.get(u'smp')-1}")
+ vpp_config.add_plugin(u"disable", u"default")
+ vpp_config.add_plugin(u"enable", u"ping_plugin.so")
+ if "2vfpt" in self._opt.get(u'vnf'):
+ vpp_config.add_plugin(u"enable", u"avf_plugin.so")
+ if "vhost" in self._opt.get(u'vnf'):
+ vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
+ vpp_config.add_dpdk_dev(u"0000:00:06.0", u"0000:00:07.0")
+ vpp_config.add_dpdk_dev_default_rxq(kwargs[u"queues"])
+ vpp_config.add_dpdk_log_level(u"debug")
+ if not kwargs[u"jumbo_frames"]:
+ vpp_config.add_dpdk_no_multi_seg()
+ vpp_config.add_dpdk_no_tx_checksum_offload()
+ if "ipsec" in self._opt.get(u'vnf'):
+ vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
+ vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
+ vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
+ if "nat" in self._opt.get(u'vnf'):
+ vpp_config.add_nat(value=u"endpoint-dependent")
+ vpp_config.add_plugin(u"enable", u"nat_plugin.so")
+ vpp_config.write_config(startup)
+
+ # Create VPP running configuration.
+ template = f"{Constants.RESOURCES_TPL}/vm/{self._opt.get(u'vnf')}.exec"
+ exec_cmd_no_error(self._node, f"rm -f {running}", sudo=True)
+
+ with open(template, u"rt") as src_file:
+ src = Template(src_file.read())
+ exec_cmd_no_error(
+ self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
+ f"sudo tee {running}"
+ )
+
+ def create_kernelvm_config_testpmd_io(self, **kwargs):
+ """Create QEMU testpmd-io command line.
+
+ :param kwargs: Key-value pairs to construct command line parameters.
+ :type kwargs: dict
+ """
+ pmd_max_pkt_len = u"9200" if kwargs[u"jumbo_frames"] else u"1518"
+ testpmd_cmd = DpdkUtil.get_testpmd_cmdline(
+ eal_corelist=f"0-{self._opt.get(u'smp') - 1}",
+ eal_driver=False,
+ eal_pci_whitelist0=u"0000:00:06.0",
+ eal_pci_whitelist1=u"0000:00:07.0",
+ eal_in_memory=True,
+ pmd_num_mbufs=32768,
+ pmd_fwd_mode=u"io",
+ pmd_nb_ports=u"2",
+ pmd_portmask=u"0x3",
+ pmd_max_pkt_len=pmd_max_pkt_len,
+ pmd_mbuf_size=u"16384",
+ pmd_rxq=kwargs[u"queues"],
+ pmd_txq=kwargs[u"queues"],
+ pmd_tx_offloads='0x0',
+ pmd_nb_cores=str(self._opt.get(u"smp") - 1)
+ )
+
+ self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
+
+ def create_kernelvm_config_testpmd_mac(self, **kwargs):
+ """Create QEMU testpmd-mac command line.
+
+ :param kwargs: Key-value pairs to construct command line parameters.
+ :type kwargs: dict
+ """
+ pmd_max_pkt_len = u"9200" if kwargs[u"jumbo_frames"] else u"1518"
+ testpmd_cmd = DpdkUtil.get_testpmd_cmdline(
+ eal_corelist=f"0-{self._opt.get(u'smp') - 1}",
+ eal_driver=False,
+ eal_pci_whitelist0=u"0000:00:06.0",
+ eal_pci_whitelist1=u"0000:00:07.0",
+ eal_in_memory=True,
+ pmd_num_mbufs=32768,
+ pmd_fwd_mode=u"mac",
+ pmd_nb_ports=u"2",
+ pmd_portmask=u"0x3",
+ pmd_max_pkt_len=pmd_max_pkt_len,
+ pmd_mbuf_size=u"16384",
+ pmd_eth_peer_0=f"0,{kwargs[u'vif1_mac']}",
+ pmd_eth_peer_1=f"1,{kwargs[u'vif2_mac']}",
+ pmd_rxq=kwargs[u"queues"],
+ pmd_txq=kwargs[u"queues"],
+ pmd_tx_offloads=u"0x0",
+ pmd_nb_cores=str(self._opt.get(u"smp") - 1)
+ )
+
+ self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
+
+ def create_kernelvm_config_iperf3(self):
+ """Create QEMU iperf3 command line."""
+ self._opt[u"vnf_bin"] = f"mkdir /run/sshd; /usr/sbin/sshd -D -d"
+
+ def create_kernelvm_init(self, **kwargs):
+ """Create QEMU init script.
+
+ :param kwargs: Key-value pairs to replace content of init startup file.
+ :type kwargs: dict
+ """
+ init = self._temp.get(u"ini")
+ exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True)
+
+ with open(kwargs[u"template"], u"rt") as src_file:
+ src = Template(src_file.read())
+ exec_cmd_no_error(
+ self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
+ f"sudo tee {init}"
+ )
+ exec_cmd_no_error(self._node, f"chmod +x {init}", sudo=True)
+
+ def configure_kernelvm_vnf(self, **kwargs):
+ """Create KernelVM VNF configurations.
+
+ :param kwargs: Key-value pairs for templating configs.
+ :type kwargs: dict
"""
- self._qemu_opt['huge_mnt'] = huge_mnt
+ if u"vpp" in self._opt.get(u"vnf"):
+ self.create_kernelvm_config_vpp(**kwargs)
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
+ elif u"testpmd_io" in self._opt.get(u"vnf"):
+ self.create_kernelvm_config_testpmd_io(**kwargs)
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
+ elif u"testpmd_mac" in self._opt.get(u"vnf"):
+ self.create_kernelvm_config_testpmd_mac(**kwargs)
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
+ elif u"iperf3" in self._opt.get(u"vnf"):
+ qemu_id = self._opt.get(u'qemu_id') % 2
+ self.create_kernelvm_config_iperf3()
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init_iperf3.sh",
+ vnf_bin=self._opt.get(u"vnf_bin"),
+ ip_address_l=u"2.2.2.2/30" if qemu_id else u"1.1.1.1/30",
+ ip_address_r=u"2.2.2.1" if qemu_id else u"1.1.1.2",
+ ip_route_r=u"1.1.1.0/30" if qemu_id else u"2.2.2.0/30"
+ )
+ else:
+ raise RuntimeError(u"QEMU: Unsupported VNF!")
- def qemu_set_disk_image(self, disk_image):
- """Set disk image.
+ def get_qemu_pids(self):
+ """Get QEMU CPU pids.
- :param disk_image: Path of the disk image.
- :type disk_image: str
+ :returns: List of QEMU CPU pids.
+ :rtype: list of str
"""
- self._qemu_opt['disk_image'] = disk_image
+ command = f"grep -rwl 'CPU' /proc/$(sudo cat " \
+ f"{self._temp.get(u'pidfile')})/task/*/comm "
+ command += r"| xargs dirname | sed -e 's/\/.*\///g' | uniq"
- def qemu_set_node(self, node):
- """Set node to run QEMU on.
+ stdout, _ = exec_cmd_no_error(self._node, command)
+ return stdout.splitlines()
- :param node: Node to run QEMU on.
- :type node: dict
+ def qemu_set_affinity(self, *host_cpus):
+ """Set qemu affinity by getting thread PIDs via QMP and taskset to list
+ of CPU cores. Function tries to execute 3 times to avoid race condition
+ in getting thread PIDs.
+
+ :param host_cpus: List of CPU cores.
+ :type host_cpus: list
"""
- self._node = node
- self._ssh = SSH()
- self._ssh.connect(node)
- self._vm_info['host'] = node['host']
+ for _ in range(3):
+ try:
+ qemu_cpus = self.get_qemu_pids()
+
+ if len(qemu_cpus) != len(host_cpus):
+ sleep(1)
+ continue
+ for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):
+ command = f"taskset -pc {host_cpu} {qemu_cpu}"
+ message = f"QEMU: Set affinity failed " \
+ f"on {self._node[u'host']}!"
+ exec_cmd_no_error(
+ self._node, command, sudo=True, message=message
+ )
+ break
+ except (RuntimeError, ValueError):
+ self.qemu_kill_all()
+ raise
+ else:
+ self.qemu_kill_all()
+ raise RuntimeError(u"Failed to set Qemu threads affinity!")
- def qemu_add_vhost_user_if(self, socket, server=True, mac=None):
- """Add Vhost-user interface.
+ def qemu_set_scheduler_policy(self):
+ """Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU
+ processes.
- :param socket: Path of the unix socket.
- :param server: If True the socket shall be a listening socket.
- :param mac: Vhost-user interface MAC address (optional, otherwise is
- used autogenerated MAC 52:54:00:00:04:xx).
- :type socket: str
- :type server: bool
- :type mac: str
+ :raises RuntimeError: Set scheduler policy failed.
"""
- self._vhost_id += 1
- # Create unix socket character device.
- chardev = ' -chardev socket,id=char{0},path={1}'.format(self._vhost_id,
- socket)
- if server is True:
- chardev += ',server'
- self._qemu_opt['options'] += chardev
- # Create Vhost-user network backend.
- netdev = ' -netdev vhost-user,id=vhost{0},chardev=char{0}'.format(
- self._vhost_id)
- self._qemu_opt['options'] += netdev
- # If MAC is not specified use autogenerated 52:54:00:00:04:<vhost_id>
- # e.g. vhost1 MAC is 52:54:00:00:04:01
- if mac is None:
- mac = '52:54:00:00:04:{0:02x}'.format(self._vhost_id)
- # Create Virtio network device.
- device = ' -device virtio-net-pci,netdev=vhost{0},mac={1}'.format(
- self._vhost_id, mac)
- self._qemu_opt['options'] += device
- # Add interface MAC and socket to the node dict
- if_data = {'mac_address': mac, 'socket': socket}
- if_name = 'vhost{}'.format(self._vhost_id)
- self._vm_info['interfaces'][if_name] = if_data
- # Add socket to the socket list
- self._socks.append(socket)
+ try:
+ qemu_cpus = self.get_qemu_pids()
+
+ for qemu_cpu in qemu_cpus:
+ command = f"chrt -r -p 1 {qemu_cpu}"
+ message = f"QEMU: Set SCHED_RR failed on {self._node[u'host']}"
+ exec_cmd_no_error(
+ self._node, command, sudo=True, message=message
+ )
+ except (RuntimeError, ValueError):
+ self.qemu_kill_all()
+ raise
def _qemu_qmp_exec(self, cmd):
"""Execute QMP command.
:param cmd: QMP command to execute.
:type cmd: str
- :return: Command output in python representation of JSON format. The
+ :returns: Command output in python representation of JSON format. The
{ "return": {} } response is QMP's success response. An error
response will contain the "error" keyword instead of "return".
"""
# To enter command mode, the qmp_capabilities command must be issued.
- qmp_cmd = 'echo "{ \\"execute\\": \\"qmp_capabilities\\" }' + \
- '{ \\"execute\\": \\"' + cmd + '\\" }" | sudo -S nc -U ' + \
- self.__QMP_SOCK
- (ret_code, stdout, stderr) = self._ssh.exec_command(qmp_cmd)
- if 0 != int(ret_code):
- logger.debug('QMP execute failed {0}'.format(stderr))
- raise RuntimeError('QMP execute "{0}" failed on {1}'.format(cmd,
- self._node['host']))
- logger.trace(stdout)
+ command = f"echo \"{{{{ \\\"execute\\\": " \
+ f"\\\"qmp_capabilities\\\" }}}}" \
+ f"{{{{ \\\"execute\\\": \\\"{cmd}\\\" }}}}\" | " \
+ f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qmp')}"
+ message = f"QMP execute '{cmd}' failed on {self._node[u'host']}"
+
+ stdout, _ = exec_cmd_no_error(
+ self._node, command, sudo=False, message=message
+ )
+
# Skip capabilities negotiation messages.
out_list = stdout.splitlines()
if len(out_list) < 3:
- raise RuntimeError('Invalid QMP output on {0}'.format(
- self._node['host']))
+ raise RuntimeError(f"Invalid QMP output on {self._node[u'host']}")
return json.loads(out_list[2])
def _qemu_qga_flush(self):
- """Flush the QGA parser state
- """
- qga_cmd = 'printf "\xFF" | sudo -S nc ' \
- '-q 1 -U ' + self.__QGA_SOCK
- (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd)
- if 0 != int(ret_code):
- logger.debug('QGA execute failed {0}'.format(stderr))
- raise RuntimeError('QGA execute "{0}" failed on {1}'.format(cmd,
- self._node['host']))
- logger.trace(stdout)
- if not stdout:
- return {}
- return json.loads(stdout.split('\n', 1)[0])
+ """Flush the QGA parser state."""
+ command = f"(printf \"\xFF\"; sleep 1) | sudo -S socat " \
+ f"- UNIX-CONNECT:{self._temp.get(u'qga')}"
+ message = f"QGA flush failed on {self._node[u'host']}"
+ stdout, _ = exec_cmd_no_error(
+ self._node, command, sudo=False, message=message
+ )
+
+ return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict()
def _qemu_qga_exec(self, cmd):
"""Execute QGA command.
:param cmd: QGA command to execute.
:type cmd: str
"""
- qga_cmd = 'echo "{ \\"execute\\": \\"' + cmd + '\\" }" | sudo -S nc ' \
- '-q 1 -U ' + self.__QGA_SOCK
- (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd)
- if 0 != int(ret_code):
- logger.debug('QGA execute failed {0}'.format(stderr))
- raise RuntimeError('QGA execute "{0}" failed on {1}'.format(cmd,
- self._node['host']))
- logger.trace(stdout)
- if not stdout:
- return {}
- return json.loads(stdout.split('\n', 1)[0])
-
- def _wait_until_vm_boot(self, timeout=300):
- """Wait until QEMU VM is booted.
-
- Ping QEMU guest agent each 5s until VM booted or timeout.
-
- :param timeout: Waiting timeout in seconds (optional, default 300s).
- :type timeout: int
+ command = f"(echo \"{{{{ \\\"execute\\\": " \
+ f"\\\"{cmd}\\\" }}}}\"; sleep 1) | " \
+ f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qga')}"
+ message = f"QGA execute '{cmd}' failed on {self._node[u'host']}"
+ stdout, _ = exec_cmd_no_error(
+ self._node, command, sudo=False, message=message
+ )
+
+ return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict()
+
+ def _wait_until_vm_boot(self):
+ """Wait until QEMU VM is booted."""
+ try:
+ getattr(self, f'_wait_{self._opt["vnf"]}')()
+ except AttributeError:
+ self._wait_default()
+
+ def _wait_default(self, retries=60):
+ """Wait until QEMU with VPP is booted.
+
+ :param retries: Number of retries.
+ :type retries: int
"""
- start = time()
- while 1:
- if time() - start > timeout:
- raise RuntimeError('timeout, VM {0} not booted on {1}'.format(
- self._qemu_opt['disk_image'], self._node['host']))
- self._qemu_qga_flush()
- out = self._qemu_qga_exec('guest-ping')
+ for _ in range(retries):
+ command = f"tail -1 {self._temp.get(u'log')}"
+ stdout = None
+ try:
+ stdout, _ = exec_cmd_no_error(self._node, command, sudo=True)
+ sleep(1)
+ except RuntimeError:
+ pass
+ if "vpp " in stdout and "built by" in stdout:
+ break
+ if u"Press enter to exit" in stdout:
+ break
+ if u"reboot: Power down" in stdout:
+ raise RuntimeError(
+ f"QEMU: NF failed to run on {self._node[u'host']}!"
+ )
+ else:
+ raise RuntimeError(
+ f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+ )
+
+ def _wait_nestedvm(self, retries=12):
+ """Wait until QEMU with NestedVM is booted.
+
+ First try to flush qga until there is output.
+ Then ping QEMU guest agent each 5s until VM booted or timeout.
+
+ :param retries: Number of retries with 5s between trials.
+ :type retries: int
+ """
+ for _ in range(retries):
+ out = None
+ try:
+ out = self._qemu_qga_flush()
+ except ValueError:
+ logger.trace(f"QGA qga flush unexpected output {out}")
# Empty output - VM not booted yet
if not out:
sleep(5)
- # Non-error return - VM booted
- elif out.get('return') is not None:
+ else:
break
- # Skip error and wait
- elif out.get('error') is not None:
+ else:
+ raise RuntimeError(
+ f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+ )
+ for _ in range(retries):
+ out = None
+ try:
+ out = self._qemu_qga_exec(u"guest-ping")
+ except ValueError:
+ logger.trace(f"QGA guest-ping unexpected output {out}")
+ # Empty output - VM not booted yet.
+ if not out:
+ sleep(5)
+ # Non-error return - VM booted.
+ elif out.get(u"return") is not None:
+ break
+ # Skip error and wait.
+ elif out.get(u"error") is not None:
sleep(5)
else:
- raise RuntimeError('QGA guest-ping unexpected output {}'.format(
- out))
- logger.trace('VM {0} booted on {1}'.format(self._qemu_opt['disk_image'],
- self._node['host']))
+ # If there is an unexpected output from QGA guest-info, try
+ # again until timeout.
+ logger.trace(f"QGA guest-ping unexpected output {out}")
+ else:
+ raise RuntimeError(
+ f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+ )
+
+ def _wait_iperf3(self, retries=60):
+ """Wait until QEMU with iPerf3 is booted.
+
+ :param retries: Number of retries.
+ :type retries: int
+ """
+ grep = u"Server listening on 0.0.0.0 port 22."
+ cmd = f"fgrep '{grep}' {self._temp.get(u'log')}"
+ message = f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+ exec_cmd_no_error(
+ self._node, cmd=cmd, sudo=True, message=message, retries=retries,
+ include_reason=True
+ )
def _update_vm_interfaces(self):
"""Update interface names in VM node dict."""
# Send guest-network-get-interfaces command via QGA, output example:
# {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"},
- # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]}
- out = self._qemu_qga_exec('guest-network-get-interfaces')
- interfaces = out.get('return')
+ # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]}.
+ out = self._qemu_qga_exec(u"guest-network-get-interfaces")
+ interfaces = out.get(u"return")
mac_name = {}
if not interfaces:
- raise RuntimeError('Get VM {0} interface list failed on {1}'.format(
- self._qemu_opt['disk_image'], self._node['host']))
- # Create MAC-name dict
+ raise RuntimeError(
+ f"Get VM interface list failed on {self._node[u'host']}"
+ )
+ # Create MAC-name dict.
for interface in interfaces:
- if 'hardware-address' not in interface:
+ if u"hardware-address" not in interface:
continue
- mac_name[interface['hardware-address']] = interface['name']
- # Match interface by MAC and save interface name
- for interface in self._vm_info['interfaces'].values():
- mac = interface.get('mac_address')
+ mac_name[interface[u"hardware-address"]] = interface[u"name"]
+ # Match interface by MAC and save interface name.
+ for interface in self._vm_info[u"interfaces"].values():
+ mac = interface.get(u"mac_address")
if_name = mac_name.get(mac)
if if_name is None:
- logger.trace('Interface name for MAC {} not found'.format(mac))
+ logger.trace(f"Interface name for MAC {mac} not found")
else:
- interface['name'] = if_name
-
- def _huge_page_check(self):
- """Huge page check."""
- huge_mnt = self._qemu_opt.get('huge_mnt')
- mem_size = self._qemu_opt.get('mem_size')
- # Check size of free huge pages
- (_, output, _) = self._ssh.exec_command('grep Huge /proc/meminfo')
- regex = re.compile(r'HugePages_Free:\s+(\d+)')
- match = regex.search(output)
- huge_free = int(match.group(1))
- regex = re.compile(r'Hugepagesize:\s+(\d+)')
- match = regex.search(output)
- huge_size = int(match.group(1))
- if (mem_size * 1024) > (huge_free * huge_size):
- raise RuntimeError('Not enough free huge pages {0} kB, required '
- '{1} MB'.format(huge_free * huge_size, mem_size))
- # Check if huge pages mount point exist
- has_huge_mnt = False
- (_, output, _) = self._ssh.exec_command('cat /proc/mounts')
- for line in output.splitlines():
- # Try to find something like:
- # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
- mount = line.split()
- if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
- has_huge_mnt = True
- break
- # If huge page mount point not exist create one
- if not has_huge_mnt:
- cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format(
- huge_mnt)
- (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
- if int(ret_code) != 0:
- logger.debug('Mount huge pages failed {0}'.format(stderr))
- raise RuntimeError('Mount huge pages failed on {0}'.format(
- self._node['host']))
+ interface[u"name"] = if_name
def qemu_start(self):
"""Start QEMU and wait until VM boot.
- :return: VM node info.
+ :returns: VM node info.
:rtype: dict
- .. note:: First set at least node to run QEMU on.
- .. warning:: Starts only one VM on the node.
"""
- # SSH forwarding
- ssh_fwd = '-net user,hostfwd=tcp::{0}-:22'.format(
- self._qemu_opt.get('ssh_fwd_port'))
- # Memory and huge pages
- mem = '-object memory-backend-file,id=mem,size={0}M,mem-path={1},' \
- 'share=on -m {0} -numa node,memdev=mem'.format(
- self._qemu_opt.get('mem_size'), self._qemu_opt.get('huge_mnt'))
- self._huge_page_check()
- # Setup QMP via unix socket
- qmp = '-qmp unix:{0},server,nowait'.format(self.__QMP_SOCK)
- # Setup serial console
- serial = '-chardev socket,host=127.0.0.1,port={0},id=gnc0,server,' \
- 'nowait -device isa-serial,chardev=gnc0'.format(
- self._qemu_opt.get('serial_port'))
- # Setup QGA via chardev (unix socket) and isa-serial channel
- qga = '-chardev socket,path=/tmp/qga.sock,server,nowait,id=qga0 ' \
- '-device isa-serial,chardev=qga0'
- # Graphic setup
- graphic = '-monitor none -display none -vga none'
- # Run QEMU
- cmd = '{0} {1} {2} {3} {4} -hda {5} {6} {7} {8} {9}'.format(
- self.__QEMU_BIN, self._qemu_opt.get('smp'), mem, ssh_fwd,
- self._qemu_opt.get('options'),
- self._qemu_opt.get('disk_image'), qmp, serial, qga, graphic)
- (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd, timeout=300)
- if int(ret_code) != 0:
- logger.debug('QEMU start failed {0}'.format(stderr))
- raise RuntimeError('QEMU start failed on {0}'.format(
- self._node['host']))
- logger.trace('QEMU running')
- # Wait until VM boot
- self._wait_until_vm_boot()
- # Update interface names in VM node dict
- self._update_vm_interfaces()
- # Return VM node dict
+ cmd_opts = OptionString()
+ cmd_opts.add(f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch}")
+ cmd_opts.extend(self._params)
+ message = f"QEMU: Start failed on {self._node[u'host']}!"
+ try:
+ DUTSetup.check_huge_page(
+ self._node, self._opt.get(u"mem-path"),
+ int(self._opt.get(u"mem"))
+ )
+
+ exec_cmd_no_error(
+ self._node, cmd_opts, timeout=300, sudo=True, message=message
+ )
+ self._wait_until_vm_boot()
+ except RuntimeError:
+ self.qemu_kill_all()
+ raise
return self._vm_info
- def qemu_quit(self):
- """Quit the QEMU emulator."""
- out = self._qemu_qmp_exec('quit')
- err = out.get('error')
- if err is not None:
- raise RuntimeError('QEMU quit failed on {0}, error: {1}'.format(
- self._node['host'], json.dumps(err)))
-
- def qemu_system_powerdown(self):
- """Power down the system (if supported)."""
- out = self._qemu_qmp_exec('system_powerdown')
- err = out.get('error')
- if err is not None:
- raise RuntimeError('QEMU system powerdown failed on {0}, '
- 'error: {1}'.format(self._node['host'], json.dumps(err)))
-
- def qemu_system_reset(self):
- """Reset the system."""
- out = self._qemu_qmp_exec('system_reset')
- err = out.get('error')
- if err is not None:
- raise RuntimeError('QEMU system reset failed on {0}, '
- 'error: {1}'.format(self._node['host'], json.dumps(err)))
-
def qemu_kill(self):
"""Kill qemu process."""
- # TODO: add PID storage so that we can kill specific PID
- # Note: in QEMU start phase there are 3 QEMU processes because we
- # daemonize QEMU
- self._ssh.exec_command_sudo('pkill -SIGKILL qemu')
-
- def qemu_clear_socks(self):
- """Remove all sockets created by QEMU."""
- # If serial console port still open kill process
- cmd = 'fuser -k {}/tcp'.format(self._qemu_opt.get('serial_port'))
- self._ssh.exec_command_sudo(cmd)
- # Delete all created sockets
- for sock in self._socks:
- cmd = 'rm -f {}'.format(sock)
- self._ssh.exec_command_sudo(cmd)
-
- def qemu_system_status(self):
- """Return current VM status.
-
- VM should be in following status:
-
- - debug: QEMU running on a debugger
- - finish-migrate: paused to finish the migration process
- - inmigrate: waiting for an incoming migration
- - internal-error: internal error has occurred
- - io-error: the last IOP has failed
- - paused: paused
- - postmigrate: paused following a successful migrate
- - prelaunch: QEMU was started with -S and guest has not started
- - restore-vm: paused to restore VM state
- - running: actively running
- - save-vm: paused to save the VM state
- - shutdown: shut down (and -no-shutdown is in use)
- - suspended: suspended (ACPI S3)
- - watchdog: watchdog action has been triggered
- - guest-panicked: panicked as a result of guest OS panic
-
- :return: VM status.
+ exec_cmd(
+ self._node, f"chmod +r {self._temp.get(u'pidfile')}", sudo=True
+ )
+ exec_cmd(
+ self._node, f"kill -SIGKILL $(cat {self._temp.get(u'pidfile')})",
+ sudo=True
+ )
+
+ for value in self._temp.values():
+ exec_cmd(self._node, f"cat {value}", sudo=True)
+ exec_cmd(self._node, f"rm -f {value}", sudo=True)
+
+ def qemu_kill_all(self):
+ """Kill all qemu processes on DUT node if specified."""
+ exec_cmd(self._node, u"pkill -SIGKILL qemu", sudo=True)
+
+ for value in self._temp.values():
+ exec_cmd(self._node, f"cat {value}", sudo=True)
+ exec_cmd(self._node, f"rm -f {value}", sudo=True)
+
+ def qemu_version(self):
+ """Return Qemu version.
+
+ :returns: Qemu version.
:rtype: str
"""
- out = self._qemu_qmp_exec('query-status')
- ret = out.get('return')
- if ret is not None:
- return ret.get('status')
- else:
- err = out.get('error')
- raise RuntimeError('QEMU query-status failed on {0}, '
- 'error: {1}'.format(self._node['host'], json.dumps(err)))
-
- @staticmethod
- def build_qemu(node):
- """Build QEMU from sources.
-
- :param node: Node to build QEMU on.
- :type node: dict
- """
- ssh = SSH()
- ssh.connect(node)
-
- (ret_code, stdout, stderr) = \
- ssh.exec_command('sudo -Sn bash {0}/{1}/qemu_build.sh'.format(
- Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH), 1000)
- logger.trace(stdout)
- if 0 != int(ret_code):
- logger.debug('QEMU build failed {0}'.format(stderr))
- raise RuntimeError('QEMU build failed on {0}'.format(node['host']))
+ command = f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch} " \
+ f"--version"
+ try:
+ stdout, _ = exec_cmd_no_error(self._node, command, sudo=True)
+ return match(r"QEMU emulator version ([\d.]*)", stdout).group(1)
+ except RuntimeError:
+ self.qemu_kill_all()
+ raise