-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from resources.libraries.python.OptionString import OptionString
from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.VhostUser import VirtioFeaturesFlags
+from resources.libraries.python.VhostUser import VirtioFeatureMask
from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
-from resources.libraries.python.VPPUtil import VPPUtil
__all__ = [u"QemuUtils"]
def __init__(
self, node, qemu_id=1, smp=1, mem=512, vnf=None,
- img=Constants.QEMU_VM_IMAGE):
+ img=Constants.QEMU_VM_IMAGE, page_size=u""):
"""Initialize QemuUtil class.
:param node: Node to run QEMU on.
:param mem: Amount of memory.
:param vnf: Network function workload.
:param img: QEMU disk image or kernel image path.
+ :param page_size: Hugepage Size.
:type node: dict
:type qemu_id: int
:type smp: int
:type mem: int
:type vnf: str
:type img: str
+ :type page_size: str
"""
- self._vhost_id = 0
+ self._nic_id = 0
self._node = node
self._arch = Topology.get_node_arch(self._node)
self._opt = dict()
# Architecture specific options
if self._arch == u"aarch64":
- dpdk_target = u"arm64-armv8a"
self._opt[u"machine_args"] = \
u"virt,accel=kvm,usb=off,mem-merge=off,gic-version=3"
self._opt[u"console"] = u"ttyAMA0"
else:
- dpdk_target = u"x86_64-native"
self._opt[u"machine_args"] = u"pc,accel=kvm,usb=off,mem-merge=off"
self._opt[u"console"] = u"ttyS0"
- self._testpmd_path = f"{Constants.QEMU_VM_DPDK}/" \
- f"{dpdk_target}-linux-gcc/app"
+ self._testpmd_path = f"{Constants.QEMU_VM_DPDK}/build/app"
self._vm_info = {
u"host": node[u"host"],
u"type": NodeType.VM,
u"port": 10021 + qemu_id,
u"serial": 4555 + qemu_id,
- u"username": 'cisco',
- u"password": 'cisco',
+ u"username": 'testuser',
+ u"password": 'Csit1234',
u"interfaces": {},
}
if node[u"port"] != 22:
self._opt[u"smp"] = int(smp)
self._opt[u"img"] = img
self._opt[u"vnf"] = vnf
+ self._opt[u"page_size"] = page_size
+
# Temporary files.
self._temp = dict()
+ self._temp[u"log"] = f"/tmp/serial_{qemu_id}.log"
self._temp[u"pidfile"] = f"/run/qemu_{qemu_id}.pid"
if img == Constants.QEMU_VM_IMAGE:
- self._opt[u"vm_type"] = u"nestedvm"
self._temp[u"qmp"] = f"/run/qmp_{qemu_id}.sock"
self._temp[u"qga"] = f"/run/qga_{qemu_id}.sock"
elif img == Constants.QEMU_VM_KERNEL:
node, f"ls -1 {Constants.QEMU_VM_KERNEL}* | tail -1",
message=u"Qemu Kernel VM image not found!"
)
- self._opt[u"vm_type"] = u"kernelvm"
- self._temp[u"log"] = f"/tmp/serial_{qemu_id}.log"
self._temp[u"ini"] = f"/etc/vm_init_{qemu_id}.conf"
self._opt[u"initrd"], _ = exec_cmd_no_error(
node, f"ls -1 {Constants.QEMU_VM_KERNEL_INITRD}* | tail -1",
raise RuntimeError(f"QEMU: Unknown VM image option: {img}")
# Computed parameters for QEMU command line.
self._params = OptionString(prefix=u"-")
- self.add_params()
-
- def add_params(self):
- """Set QEMU command line parameters."""
- self.add_default_params()
- if self._opt.get(u"vm_type", u"") == u"nestedvm":
- self.add_nestedvm_params()
- elif self._opt.get(u"vm_type", u"") == u"kernelvm":
- self.add_kernelvm_params()
- else:
- raise RuntimeError(u"QEMU: Unsupported VM type!")
def add_default_params(self):
"""Set default QEMU command line parameters."""
+ mem_path = f"/dev/hugepages1G" \
+ if self._opt[u"page_size"] == u"1G" else u"/dev/hugepages"
+
self._params.add(u"daemonize")
self._params.add(u"nodefaults")
self._params.add_with_value(
u"name", f"vnf{self._opt.get(u'qemu_id')},debug-threads=on"
)
self._params.add(u"no-user-config")
- self._params.add_with_value(u"monitor", u"none")
- self._params.add_with_value(u"display", u"none")
- self._params.add_with_value(u"vga", u"none")
+ self._params.add(u"nographic")
self._params.add(u"enable-kvm")
self._params.add_with_value(u"pidfile", self._temp.get(u"pidfile"))
self._params.add_with_value(u"cpu", u"host")
)
self._params.add_with_value(
u"object", f"memory-backend-file,id=mem,"
- f"size={self._opt.get(u'mem')}M,mem-path=/dev/hugepages,share=on"
+ f"size={self._opt.get(u'mem')}M,"
+ f"mem-path={mem_path},share=on"
)
self._params.add_with_value(u"m", f"{self._opt.get(u'mem')}M")
self._params.add_with_value(u"numa", u"node,memdev=mem")
- self._params.add_with_value(u"balloon", u"none")
- def add_nestedvm_params(self):
- """Set NestedVM QEMU parameters."""
+ def add_net_user(self, net="10.0.2.0/24"):
+ """Set managment port forwarding."""
+ self._params.add_with_value(
+ u"netdev", f"user,id=mgmt,net={net},"
+ f"hostfwd=tcp::{self._vm_info[u'port']}-:22"
+ )
self._params.add_with_value(
- u"net",
- f"nic,macaddr=52:54:00:00:{self._opt.get(u'qemu_id'):02x}:ff"
+ u"device", f"virtio-net,netdev=mgmt"
)
+
+ def add_qmp_qga(self):
+ """Set QMP, QGA management."""
self._params.add_with_value(
- u"net", f"user,hostfwd=tcp::{self._vm_info[u'port']}-:22"
+ u"chardev", f"socket,path={self._temp.get(u'qga')},"
+ f"server,nowait,id=qga0"
)
- locking = u",file.locking=off"
self._params.add_with_value(
- u"drive", f"file={self._opt.get(u'img')},"
- f"format=raw,cache=none,if=virtio{locking}"
+ u"device", u"isa-serial,chardev=qga0"
)
self._params.add_with_value(
u"qmp", f"unix:{self._temp.get(u'qmp')},server,nowait"
)
+
+ def add_serial(self):
+ """Set serial to file redirect."""
self._params.add_with_value(
u"chardev", f"socket,host=127.0.0.1,"
- f"port={self._vm_info[u'serial']},id=gnc0,server,nowait")
- self._params.add_with_value(u"device", u"isa-serial,chardev=gnc0")
+ f"port={self._vm_info[u'serial']},id=gnc0,server,nowait"
+ )
self._params.add_with_value(
- u"chardev", f"socket,path={self._temp.get(u'qga')},"
- f"server,nowait,id=qga0"
+ u"device", u"isa-serial,chardev=gnc0"
+ )
+ self._params.add_with_value(
+ u"serial", f"file:{self._temp.get(u'log')}"
+ )
+
+ def add_drive_cdrom(self, drive_file, index=None):
+ """Set CD-ROM drive.
+
+ :param drive_file: Path to drive image.
+ :param index: Drive index.
+ :type drive_file: str
+ :type index: int
+ """
+ index = f"index={index}," if index else u""
+ self._params.add_with_value(
+ u"drive", f"file={drive_file},{index}media=cdrom"
+ )
+
+ def add_drive(self, drive_file, drive_format):
+ """Set drive with custom format.
+
+ :param drive_file: Path to drive image.
+ :param drive_format: Drive image format.
+ :type drive_file: str
+ :type drive_format: str
+ """
+ self._params.add_with_value(
+ u"drive", f"file={drive_file},format={drive_format},"
+ u"cache=none,if=virtio,file.locking=off"
)
- self._params.add_with_value(u"device", u"isa-serial,chardev=qga0")
def add_kernelvm_params(self):
"""Set KernelVM QEMU parameters."""
+ hugepages = 3 if self._opt[u"page_size"] == u"1G" else 512
+
self._params.add_with_value(
u"serial", f"file:{self._temp.get(u'log')}"
)
self._params.add_with_value(
u"device", u"virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot"
)
- self._params.add_with_value(u"kernel", f"{self._opt.get(u'img')}")
- self._params.add_with_value(u"initrd", f"{self._opt.get(u'initrd')}")
+ self._params.add_with_value(
+ u"kernel", f"{self._opt.get(u'img')}"
+ )
+ self._params.add_with_value(
+ u"initrd", f"{self._opt.get(u'initrd')}"
+ )
self._params.add_with_value(
u"append", f"'ro rootfstype=9p rootflags=trans=virtio "
f"root=virtioroot console={self._opt.get(u'console')} "
- f"tsc=reliable hugepages=256 "
+ f"tsc=reliable hugepages={hugepages} "
+ f"hugepagesz={self._opt.get(u'page_size')} "
f"init={self._temp.get(u'ini')} fastboot'"
)
+ def add_vhost_user_if(
+ self, socket, server=True, jumbo_frames=False, queue_size=None,
+ queues=1, virtio_feature_mask=None):
+ """Add Vhost-user interface.
+
+ :param socket: Path of the unix socket.
+ :param server: If True the socket shall be a listening socket.
+ :param jumbo_frames: Set True if jumbo frames are used in the test.
+ :param queue_size: Vring queue size.
+ :param queues: Number of queues.
+ :param virtio_feature_mask: Mask of virtio features to be enabled.
+ :type socket: str
+ :type server: bool
+ :type jumbo_frames: bool
+ :type queue_size: int
+ :type queues: int
+ :type virtio_feature_mask: int
+ """
+ self._nic_id += 1
+ if jumbo_frames:
+ logger.debug(u"Jumbo frames temporarily disabled!")
+ self._params.add_with_value(
+ u"chardev", f"socket,id=char{self._nic_id},"
+ f"path={socket}{u',server' if server is True else u''}"
+ )
+ self._params.add_with_value(
+ u"netdev", f"vhost-user,id=vhost{self._nic_id},"
+ f"chardev=char{self._nic_id},queues={queues}"
+ )
+ mac = f"52:54:00:00:{self._opt.get(u'qemu_id'):02x}:" \
+ f"{self._nic_id:02x}"
+ queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \
+ if queue_size else u""
+ gso = VirtioFeatureMask.is_feature_enabled(
+ virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_GSO)
+ csum = VirtioFeatureMask.is_feature_enabled(
+ virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_CSUM)
+
+ self._params.add_with_value(
+ u"device", f"virtio-net-pci,netdev=vhost{self._nic_id},mac={mac},"
+ f"addr={self._nic_id+5}.0,mq=on,vectors={2 * queues + 2},"
+ f"csum={u'on' if csum else u'off'},"
+ f"gso={u'on' if gso else u'off'},"
+ f"guest_tso4={u'on' if gso else u'off'},"
+ f"guest_tso6={u'on' if gso else u'off'},"
+ f"guest_ecn={u'on' if gso else u'off'},"
+ f"{queue_size}"
+ )
+
+ # Add interface MAC and socket to the node dict.
+ if_data = {u"mac_address": mac, u"socket": socket}
+ if_name = f"vhost{self._nic_id}"
+ self._vm_info[u"interfaces"][if_name] = if_data
+ # Add socket to temporary file list.
+ self._temp[if_name] = socket
+
+ def add_vfio_pci_if(self, pci):
+ """Add VFIO PCI interface.
+
+ :param pci: PCI address of interface.
+ :type pci: str
+ """
+ self._nic_id += 1
+ self._params.add_with_value(
+ u"device", f"vfio-pci,host={pci},addr={self._nic_id+5}.0"
+ )
+
def create_kernelvm_config_vpp(self, **kwargs):
"""Create QEMU VPP config files.
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_exec(running)
vpp_config.add_socksvr()
+ vpp_config.add_main_heap_size(u"512M")
+ vpp_config.add_main_heap_page_size(self._opt[u"page_size"])
+ vpp_config.add_default_hugepage_size(self._opt[u"page_size"])
+ vpp_config.add_statseg_size(u"512M")
+ vpp_config.add_statseg_page_size(self._opt[u"page_size"])
+ vpp_config.add_statseg_per_node_counters(u"on")
+ vpp_config.add_buffers_per_numa(107520)
vpp_config.add_cpu_main_core(u"0")
if self._opt.get(u"smp") > 1:
vpp_config.add_cpu_corelist_workers(f"1-{self._opt.get(u'smp')-1}")
- vpp_config.add_dpdk_dev(u"0000:00:06.0", u"0000:00:07.0")
- vpp_config.add_dpdk_dev_default_rxq(kwargs[u"queues"])
- vpp_config.add_dpdk_log_level(u"debug")
- if not kwargs[u"jumbo_frames"]:
- vpp_config.add_dpdk_no_multi_seg()
- vpp_config.add_dpdk_no_tx_checksum_offload()
vpp_config.add_plugin(u"disable", u"default")
- vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
+ vpp_config.add_plugin(u"enable", u"ping_plugin.so")
+ if "2vfpt" in self._opt.get(u'vnf'):
+ vpp_config.add_plugin(u"enable", u"avf_plugin.so")
+ if "vhost" in self._opt.get(u'vnf'):
+ vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
+ vpp_config.add_dpdk_dev(u"0000:00:06.0", u"0000:00:07.0")
+ vpp_config.add_dpdk_dev_default_rxq(kwargs[u"queues"])
+ vpp_config.add_dpdk_log_level(u"debug")
+ if not kwargs[u"jumbo_frames"]:
+ vpp_config.add_dpdk_no_multi_seg()
+ vpp_config.add_dpdk_no_tx_checksum_offload()
+ if "ipsec" in self._opt.get(u'vnf'):
+ vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
+ vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
+ vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
+ if "nat" in self._opt.get(u'vnf'):
+ vpp_config.add_nat(value=u"endpoint-dependent")
+ vpp_config.add_plugin(u"enable", u"nat_plugin.so")
vpp_config.write_config(startup)
# Create VPP running configuration.
- template = f"{Constants.RESOURCES_TPL_VM}/{self._opt.get(u'vnf')}.exec"
+ template = f"{Constants.RESOURCES_TPL}/vm/{self._opt.get(u'vnf')}.exec"
exec_cmd_no_error(self._node, f"rm -f {running}", sudo=True)
with open(template, u"rt") as src_file:
eal_pci_whitelist0=u"0000:00:06.0",
eal_pci_whitelist1=u"0000:00:07.0",
eal_in_memory=True,
- pmd_num_mbufs=16384,
+ pmd_num_mbufs=32768,
pmd_fwd_mode=u"io",
pmd_nb_ports=u"2",
pmd_portmask=u"0x3",
eal_pci_whitelist0=u"0000:00:06.0",
eal_pci_whitelist1=u"0000:00:07.0",
eal_in_memory=True,
- pmd_num_mbufs=16384,
+ pmd_num_mbufs=32768,
pmd_fwd_mode=u"mac",
pmd_nb_ports=u"2",
pmd_portmask=u"0x3",
self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
+ def create_kernelvm_config_iperf3(self):
+ """Create QEMU iperf3 command line."""
+ self._opt[u"vnf_bin"] = f"mkdir /run/sshd; /usr/sbin/sshd -D -d"
+
def create_kernelvm_init(self, **kwargs):
"""Create QEMU init script.
:param kwargs: Key-value pairs to replace content of init startup file.
:type kwargs: dict
"""
- template = f"{Constants.RESOURCES_TPL_VM}/init.sh"
init = self._temp.get(u"ini")
exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True)
- with open(template, u"rt") as src_file:
+ with open(kwargs[u"template"], u"rt") as src_file:
src = Template(src_file.read())
exec_cmd_no_error(
self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
"""
if u"vpp" in self._opt.get(u"vnf"):
self.create_kernelvm_config_vpp(**kwargs)
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
elif u"testpmd_io" in self._opt.get(u"vnf"):
self.create_kernelvm_config_testpmd_io(**kwargs)
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
elif u"testpmd_mac" in self._opt.get(u"vnf"):
self.create_kernelvm_config_testpmd_mac(**kwargs)
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
+ elif u"iperf3" in self._opt.get(u"vnf"):
+ qemu_id = self._opt.get(u'qemu_id') % 2
+ self.create_kernelvm_config_iperf3()
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init_iperf3.sh",
+ vnf_bin=self._opt.get(u"vnf_bin"),
+ ip_address_l=u"2.2.2.2/30" if qemu_id else u"1.1.1.1/30",
+ ip_address_r=u"2.2.2.1" if qemu_id else u"1.1.1.2",
+ ip_route_r=u"1.1.1.0/30" if qemu_id else u"2.2.2.0/30"
+ )
else:
raise RuntimeError(u"QEMU: Unsupported VNF!")
- self.create_kernelvm_init(vnf_bin=self._opt.get(u"vnf_bin"))
def get_qemu_pids(self):
"""Get QEMU CPU pids.
self.qemu_kill_all()
raise
- def qemu_add_vhost_user_if(
- self, socket, server=True, jumbo_frames=False, queue_size=None,
- queues=1, csum=False, gso=False):
- """Add Vhost-user interface.
-
- :param socket: Path of the unix socket.
- :param server: If True the socket shall be a listening socket.
- :param jumbo_frames: Set True if jumbo frames are used in the test.
- :param queue_size: Vring queue size.
- :param queues: Number of queues.
- :param csum: Checksum offloading.
- :param gso: Generic segmentation offloading.
- :type socket: str
- :type server: bool
- :type jumbo_frames: bool
- :type queue_size: int
- :type queues: int
- :type csum: bool
- :type gso: bool
- """
- self._vhost_id += 1
- self._params.add_with_value(
- u"chardev", f"socket,id=char{self._vhost_id},"
- f"path={socket}{u',server' if server is True else u''}"
- )
- self._params.add_with_value(
- u"netdev", f"vhost-user,id=vhost{self._vhost_id},"
- f"chardev=char{self._vhost_id},queues={queues}"
- )
- mac = f"52:54:00:00:{self._opt.get(u'qemu_id'):02x}:" \
- f"{self._vhost_id:02x}"
- queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \
- if queue_size else u""
- self._params.add_with_value(
- u"device", f"virtio-net-pci,netdev=vhost{self._vhost_id},mac={mac},"
- f"addr={self._vhost_id+5}.0,mq=on,vectors={2 * queues + 2},"
- f"csum={u'on' if csum else u'off'},gso={u'on' if gso else u'off'},"
- f"guest_tso4=off,guest_tso6=off,guest_ecn=off,"
- f"{queue_size}"
- )
-
- # Add interface MAC and socket to the node dict.
- if_data = {u"mac_address": mac, u"socket": socket}
- if_name = f"vhost{self._vhost_id}"
- self._vm_info[u"interfaces"][if_name] = if_data
- # Add socket to temporary file list.
- self._temp[if_name] = socket
-
def _qemu_qmp_exec(self, cmd):
"""Execute QMP command.
return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict()
def _wait_until_vm_boot(self):
- """Wait until QEMU with NestedVM is booted."""
- if self._opt.get(u"vm_type") == u"nestedvm":
- self._wait_until_nestedvm_boot()
- self._update_vm_interfaces()
- elif self._opt.get(u"vm_type") == u"kernelvm":
- self._wait_until_kernelvm_boot()
+ """Wait until QEMU VM is booted."""
+ try:
+ getattr(self, f'_wait_{self._opt["vnf"]}')()
+ except AttributeError:
+ self._wait_default()
+
+ def _wait_default(self, retries=60):
+ """Wait until QEMU with VPP is booted.
+
+ :param retries: Number of retries.
+ :type retries: int
+ """
+ for _ in range(retries):
+ command = f"tail -1 {self._temp.get(u'log')}"
+ stdout = None
+ try:
+ stdout, _ = exec_cmd_no_error(self._node, command, sudo=True)
+ sleep(1)
+ except RuntimeError:
+ pass
+ if "vpp " in stdout and "built by" in stdout:
+ break
+ if u"Press enter to exit" in stdout:
+ break
+ if u"reboot: Power down" in stdout:
+ raise RuntimeError(
+ f"QEMU: NF failed to run on {self._node[u'host']}!"
+ )
else:
- raise RuntimeError(u"QEMU: Unsupported VM type!")
+ raise RuntimeError(
+ f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+ )
- def _wait_until_nestedvm_boot(self, retries=12):
+ def _wait_nestedvm(self, retries=12):
"""Wait until QEMU with NestedVM is booted.
First try to flush qga until there is output.
f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
)
- def _wait_until_kernelvm_boot(self, retries=60):
- """Wait until QEMU KernelVM is booted.
+ def _wait_iperf3(self, retries=60):
+ """Wait until QEMU with iPerf3 is booted.
:param retries: Number of retries.
:type retries: int
"""
- vpp_ver = VPPUtil.vpp_show_version(self._node)
-
- for _ in range(retries):
- command = f"tail -1 {self._temp.get(u'log')}"
- stdout = None
- try:
- stdout, _ = exec_cmd_no_error(self._node, command, sudo=True)
- sleep(1)
- except RuntimeError:
- pass
- if vpp_ver in stdout or u"Press enter to exit" in stdout:
- break
- if u"reboot: Power down" in stdout:
- raise RuntimeError(
- f"QEMU: NF failed to run on {self._node[u'host']}!"
- )
- else:
- raise RuntimeError(
- f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
- )
+ grep = u"Server listening on 0.0.0.0 port 22."
+ cmd = f"fgrep '{grep}' {self._temp.get(u'log')}"
+ message = f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+ exec_cmd_no_error(
+ self._node, cmd=cmd, sudo=True, message=message, retries=retries,
+ include_reason=True
+ )
def _update_vm_interfaces(self):
"""Update interface names in VM node dict."""
message = f"QEMU: Start failed on {self._node[u'host']}!"
try:
DUTSetup.check_huge_page(
- self._node, u"/dev/hugepages", int(self._opt.get(u"mem")))
+ self._node, self._opt.get(u"mem-path"),
+ int(self._opt.get(u"mem"))
+ )
exec_cmd_no_error(
self._node, cmd_opts, timeout=300, sudo=True, message=message