51fba6105e7f93c7f2827caf2d5b53dfe5b2aad5
[csit.git] / resources / libraries / python / QemuUtils.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """QEMU utilities library."""
15
16 import json
17
18 from re import match
19 from string import Template
20 from time import sleep
21
22 from robot.api import logger
23
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.DpdkUtil import DpdkUtil
26 from resources.libraries.python.DUTSetup import DUTSetup
27 from resources.libraries.python.OptionString import OptionString
28 from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VhostUser import VirtioFeaturesFlags
31 from resources.libraries.python.VhostUser import VirtioFeatureMask
32 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
33
34 __all__ = [u"QemuUtils"]
35
36
37 class QemuUtils:
38     """QEMU utilities."""
39
40     # Use one instance of class per tests.
41     ROBOT_LIBRARY_SCOPE = u"TEST CASE"
42
43     def __init__(
44             self, node, qemu_id=1, smp=1, mem=512, vnf=None,
45             img=Constants.QEMU_VM_IMAGE):
46         """Initialize QemuUtil class.
47
48         :param node: Node to run QEMU on.
49         :param qemu_id: QEMU identifier.
50         :param smp: Number of virtual SMP units (cores).
51         :param mem: Amount of memory.
52         :param vnf: Network function workload.
53         :param img: QEMU disk image or kernel image path.
54         :type node: dict
55         :type qemu_id: int
56         :type smp: int
57         :type mem: int
58         :type vnf: str
59         :type img: str
60         """
61         self._nic_id = 0
62         self._node = node
63         self._arch = Topology.get_node_arch(self._node)
64         self._opt = dict()
65
66         # Architecture specific options
67         if self._arch == u"aarch64":
68             dpdk_target = u"arm64-armv8a"
69             self._opt[u"machine_args"] = \
70                 u"virt,accel=kvm,usb=off,mem-merge=off,gic-version=3"
71             self._opt[u"console"] = u"ttyAMA0"
72         else:
73             dpdk_target = u"x86_64-native"
74             self._opt[u"machine_args"] = u"pc,accel=kvm,usb=off,mem-merge=off"
75             self._opt[u"console"] = u"ttyS0"
76         self._testpmd_path = f"{Constants.QEMU_VM_DPDK}/" \
77             f"{dpdk_target}-linux-gcc/app"
78         self._vm_info = {
79             u"host": node[u"host"],
80             u"type": NodeType.VM,
81             u"port": 10021 + qemu_id,
82             u"serial": 4555 + qemu_id,
83             u"username": 'testuser',
84             u"password": 'Csit1234',
85             u"interfaces": {},
86         }
87         if node[u"port"] != 22:
88             self._vm_info[u"host_port"] = node[u"port"]
89             self._vm_info[u"host_username"] = node[u"username"]
90             self._vm_info[u"host_password"] = node[u"password"]
91         # Input Options.
92         self._opt[u"qemu_id"] = qemu_id
93         self._opt[u"mem"] = int(mem)
94         self._opt[u"smp"] = int(smp)
95         self._opt[u"img"] = img
96         self._opt[u"vnf"] = vnf
97         # Temporary files.
98         self._temp = dict()
99         self._temp[u"log"] = f"/tmp/serial_{qemu_id}.log"
100         self._temp[u"pidfile"] = f"/run/qemu_{qemu_id}.pid"
101         if img == Constants.QEMU_VM_IMAGE:
102             self._temp[u"qmp"] = f"/run/qmp_{qemu_id}.sock"
103             self._temp[u"qga"] = f"/run/qga_{qemu_id}.sock"
104         elif img == Constants.QEMU_VM_KERNEL:
105             self._opt[u"img"], _ = exec_cmd_no_error(
106                 node, f"ls -1 {Constants.QEMU_VM_KERNEL}* | tail -1",
107                 message=u"Qemu Kernel VM image not found!"
108             )
109             self._temp[u"ini"] = f"/etc/vm_init_{qemu_id}.conf"
110             self._opt[u"initrd"], _ = exec_cmd_no_error(
111                 node, f"ls -1 {Constants.QEMU_VM_KERNEL_INITRD}* | tail -1",
112                 message=u"Qemu Kernel initrd image not found!"
113             )
114         else:
115             raise RuntimeError(f"QEMU: Unknown VM image option: {img}")
116         # Computed parameters for QEMU command line.
117         self._params = OptionString(prefix=u"-")
118
119     def add_default_params(self):
120         """Set default QEMU command line parameters."""
121         self._params.add(u"daemonize")
122         self._params.add(u"nodefaults")
123         self._params.add_with_value(
124             u"name", f"vnf{self._opt.get(u'qemu_id')},debug-threads=on"
125         )
126         self._params.add(u"no-user-config")
127         self._params.add(u"nographic")
128         self._params.add(u"enable-kvm")
129         self._params.add_with_value(u"pidfile", self._temp.get(u"pidfile"))
130         self._params.add_with_value(u"cpu", u"host")
131
132         self._params.add_with_value(u"machine", self._opt.get(u"machine_args"))
133         self._params.add_with_value(
134             u"smp", f"{self._opt.get(u'smp')},sockets=1,"
135             f"cores={self._opt.get(u'smp')},threads=1"
136         )
137         self._params.add_with_value(
138             u"object", f"memory-backend-file,id=mem,"
139             f"size={self._opt.get(u'mem')}M,mem-path=/dev/hugepages,share=on"
140         )
141         self._params.add_with_value(u"m", f"{self._opt.get(u'mem')}M")
142         self._params.add_with_value(u"numa", u"node,memdev=mem")
143         self._params.add_with_value(u"balloon", u"none")
144
145     def add_net_user(self, net="10.0.2.0/24"):
146         """Set managment port forwarding."""
147         self._params.add_with_value(
148             u"netdev", f"user,id=mgmt,net={net},"
149             f"hostfwd=tcp::{self._vm_info[u'port']}-:22"
150         )
151         self._params.add_with_value(
152             u"device", f"virtio-net,netdev=mgmt"
153         )
154
155     def add_qmp_qga(self):
156         """Set QMP, QGA management."""
157         self._params.add_with_value(
158             u"chardev", f"socket,path={self._temp.get(u'qga')},"
159             f"server,nowait,id=qga0"
160         )
161         self._params.add_with_value(
162             u"device", u"isa-serial,chardev=qga0"
163         )
164         self._params.add_with_value(
165             u"qmp", f"unix:{self._temp.get(u'qmp')},server,nowait"
166         )
167
168     def add_serial(self):
169         """Set serial to file redirect."""
170         self._params.add_with_value(
171             u"chardev", f"socket,host=127.0.0.1,"
172             f"port={self._vm_info[u'serial']},id=gnc0,server,nowait"
173         )
174         self._params.add_with_value(
175             u"device", u"isa-serial,chardev=gnc0"
176         )
177         self._params.add_with_value(
178             u"serial", f"file:{self._temp.get(u'log')}"
179         )
180
181     def add_drive_cdrom(self, drive_file, index=None):
182         """Set CD-ROM drive.
183
184         :param drive_file: Path to drive image.
185         :param index: Drive index.
186         :type drive_file: str
187         :type index: int
188         """
189         index = f"index={index}," if index else u""
190         self._params.add_with_value(
191             u"drive", f"file={drive_file},{index}media=cdrom"
192         )
193
194     def add_drive(self, drive_file, drive_format):
195         """Set drive with custom format.
196
197         :param drive_file: Path to drive image.
198         :param drive_format: Drive image format.
199         :type drive_file: str
200         :type drive_format: str
201         """
202         self._params.add_with_value(
203             u"drive", f"file={drive_file},format={drive_format},"
204             u"cache=none,if=virtio,file.locking=off"
205         )
206
207     def add_kernelvm_params(self):
208         """Set KernelVM QEMU parameters."""
209         self._params.add_with_value(
210             u"serial", f"file:{self._temp.get(u'log')}"
211         )
212         self._params.add_with_value(
213             u"fsdev", u"local,id=root9p,path=/,security_model=none"
214         )
215         self._params.add_with_value(
216             u"device", u"virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot"
217         )
218         self._params.add_with_value(
219             u"kernel", f"{self._opt.get(u'img')}"
220         )
221         self._params.add_with_value(
222             u"initrd", f"{self._opt.get(u'initrd')}"
223         )
224         self._params.add_with_value(
225             u"append", f"'ro rootfstype=9p rootflags=trans=virtio "
226             f"root=virtioroot console={self._opt.get(u'console')} "
227             f"tsc=reliable hugepages=512 "
228             f"init={self._temp.get(u'ini')} fastboot'"
229         )
230
231     def add_vhost_user_if(
232             self, socket, server=True, jumbo_frames=False, queue_size=None,
233             queues=1, virtio_feature_mask=None):
234         """Add Vhost-user interface.
235
236         :param socket: Path of the unix socket.
237         :param server: If True the socket shall be a listening socket.
238         :param jumbo_frames: Set True if jumbo frames are used in the test.
239         :param queue_size: Vring queue size.
240         :param queues: Number of queues.
241         :param virtio_feature_mask: Mask of virtio features to be enabled.
242         :type socket: str
243         :type server: bool
244         :type jumbo_frames: bool
245         :type queue_size: int
246         :type queues: int
247         :type virtio_feature_mask: int
248         """
249         self._nic_id += 1
250         self._params.add_with_value(
251             u"chardev", f"socket,id=char{self._nic_id},"
252             f"path={socket}{u',server' if server is True else u''}"
253         )
254         self._params.add_with_value(
255             u"netdev", f"vhost-user,id=vhost{self._nic_id},"
256             f"chardev=char{self._nic_id},queues={queues}"
257         )
258         mac = f"52:54:00:00:{self._opt.get(u'qemu_id'):02x}:" \
259             f"{self._nic_id:02x}"
260         queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \
261             if queue_size else u""
262         gso = VirtioFeatureMask.is_feature_enabled(
263             virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_GSO)
264         csum = VirtioFeatureMask.is_feature_enabled(
265             virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_CSUM)
266
267         self._params.add_with_value(
268             u"device", f"virtio-net-pci,netdev=vhost{self._nic_id},mac={mac},"
269             f"addr={self._nic_id+5}.0,mq=on,vectors={2 * queues + 2},"
270             f"csum={u'on' if csum else u'off'},"
271             f"gso={u'on' if gso else u'off'},"
272             f"guest_tso4={u'on' if gso else u'off'},"
273             f"guest_tso6={u'on' if gso else u'off'},"
274             f"guest_ecn={u'on' if gso else u'off'},"
275             f"{queue_size}"
276         )
277
278         # Add interface MAC and socket to the node dict.
279         if_data = {u"mac_address": mac, u"socket": socket}
280         if_name = f"vhost{self._nic_id}"
281         self._vm_info[u"interfaces"][if_name] = if_data
282         # Add socket to temporary file list.
283         self._temp[if_name] = socket
284
285     def add_vfio_pci_if(self, pci):
286         """Add VFIO PCI interface.
287
288         :param pci: PCI address of interface.
289         :type pci: str
290         """
291         self._nic_id += 1
292         self._params.add_with_value(
293             u"device", f"vfio-pci,host={pci},addr={self._nic_id+5}.0"
294         )
295
296     def create_kernelvm_config_vpp(self, **kwargs):
297         """Create QEMU VPP config files.
298
299         :param kwargs: Key-value pairs to replace content of VPP configuration
300             file.
301         :type kwargs: dict
302         """
303         startup = f"/etc/vpp/vm_startup_{self._opt.get(u'qemu_id')}.conf"
304         running = f"/etc/vpp/vm_running_{self._opt.get(u'qemu_id')}.exec"
305
306         self._temp[u"startup"] = startup
307         self._temp[u"running"] = running
308         self._opt[u"vnf_bin"] = f"/usr/bin/vpp -c {startup}"
309
310         # Create VPP startup configuration.
311         vpp_config = VppConfigGenerator()
312         vpp_config.set_node(self._node)
313         vpp_config.add_unix_nodaemon()
314         vpp_config.add_unix_cli_listen()
315         vpp_config.add_unix_exec(running)
316         vpp_config.add_socksvr()
317         vpp_config.add_main_heap_size(u"512M")
318         vpp_config.add_main_heap_page_size(u"2M")
319         vpp_config.add_statseg_size(u"512M")
320         vpp_config.add_statseg_page_size(u"2M")
321         vpp_config.add_statseg_per_node_counters(u"on")
322         vpp_config.add_buffers_per_numa(107520)
323         vpp_config.add_cpu_main_core(u"0")
324         if self._opt.get(u"smp") > 1:
325             vpp_config.add_cpu_corelist_workers(f"1-{self._opt.get(u'smp')-1}")
326         vpp_config.add_plugin(u"disable", u"default")
327         vpp_config.add_plugin(u"enable", u"ping_plugin.so")
328         if "2vfpt" in self._opt.get(u'vnf'):
329             vpp_config.add_plugin(u"enable", u"avf_plugin.so")
330         if "vhost" in self._opt.get(u'vnf'):
331             vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
332             vpp_config.add_dpdk_dev(u"0000:00:06.0", u"0000:00:07.0")
333             vpp_config.add_dpdk_dev_default_rxq(kwargs[u"queues"])
334             vpp_config.add_dpdk_log_level(u"debug")
335             if not kwargs[u"jumbo_frames"]:
336                 vpp_config.add_dpdk_no_multi_seg()
337                 vpp_config.add_dpdk_no_tx_checksum_offload()
338         if "ipsec" in self._opt.get(u'vnf'):
339             vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
340             vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
341             vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
342         if "nat" in self._opt.get(u'vnf'):
343             vpp_config.add_nat(value=u"endpoint-dependent")
344             vpp_config.add_plugin(u"enable", u"nat_plugin.so")
345         vpp_config.write_config(startup)
346
347         # Create VPP running configuration.
348         template = f"{Constants.RESOURCES_TPL}/vm/{self._opt.get(u'vnf')}.exec"
349         exec_cmd_no_error(self._node, f"rm -f {running}", sudo=True)
350
351         with open(template, u"rt") as src_file:
352             src = Template(src_file.read())
353             exec_cmd_no_error(
354                 self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
355                 f"sudo tee {running}"
356             )
357
358     def create_kernelvm_config_testpmd_io(self, **kwargs):
359         """Create QEMU testpmd-io command line.
360
361         :param kwargs: Key-value pairs to construct command line parameters.
362         :type kwargs: dict
363         """
364         pmd_max_pkt_len = u"9200" if kwargs[u"jumbo_frames"] else u"1518"
365         testpmd_cmd = DpdkUtil.get_testpmd_cmdline(
366             eal_corelist=f"0-{self._opt.get(u'smp') - 1}",
367             eal_driver=False,
368             eal_pci_whitelist0=u"0000:00:06.0",
369             eal_pci_whitelist1=u"0000:00:07.0",
370             eal_in_memory=True,
371             pmd_num_mbufs=16384,
372             pmd_fwd_mode=u"io",
373             pmd_nb_ports=u"2",
374             pmd_portmask=u"0x3",
375             pmd_max_pkt_len=pmd_max_pkt_len,
376             pmd_mbuf_size=u"16384",
377             pmd_rxq=kwargs[u"queues"],
378             pmd_txq=kwargs[u"queues"],
379             pmd_tx_offloads='0x0',
380             pmd_nb_cores=str(self._opt.get(u"smp") - 1)
381         )
382
383         self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
384
385     def create_kernelvm_config_testpmd_mac(self, **kwargs):
386         """Create QEMU testpmd-mac command line.
387
388         :param kwargs: Key-value pairs to construct command line parameters.
389         :type kwargs: dict
390         """
391         pmd_max_pkt_len = u"9200" if kwargs[u"jumbo_frames"] else u"1518"
392         testpmd_cmd = DpdkUtil.get_testpmd_cmdline(
393             eal_corelist=f"0-{self._opt.get(u'smp') - 1}",
394             eal_driver=False,
395             eal_pci_whitelist0=u"0000:00:06.0",
396             eal_pci_whitelist1=u"0000:00:07.0",
397             eal_in_memory=True,
398             pmd_num_mbufs=16384,
399             pmd_fwd_mode=u"mac",
400             pmd_nb_ports=u"2",
401             pmd_portmask=u"0x3",
402             pmd_max_pkt_len=pmd_max_pkt_len,
403             pmd_mbuf_size=u"16384",
404             pmd_eth_peer_0=f"0,{kwargs[u'vif1_mac']}",
405             pmd_eth_peer_1=f"1,{kwargs[u'vif2_mac']}",
406             pmd_rxq=kwargs[u"queues"],
407             pmd_txq=kwargs[u"queues"],
408             pmd_tx_offloads=u"0x0",
409             pmd_nb_cores=str(self._opt.get(u"smp") - 1)
410         )
411
412         self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
413
414     def create_kernelvm_config_iperf3(self):
415         """Create QEMU iperf3 command line."""
416         self._opt[u"vnf_bin"] = f"mkdir /run/sshd; /usr/sbin/sshd -D -d"
417
418     def create_kernelvm_init(self, **kwargs):
419         """Create QEMU init script.
420
421         :param kwargs: Key-value pairs to replace content of init startup file.
422         :type kwargs: dict
423         """
424         init = self._temp.get(u"ini")
425         exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True)
426
427         with open(kwargs[u"template"], u"rt") as src_file:
428             src = Template(src_file.read())
429             exec_cmd_no_error(
430                 self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
431                 f"sudo tee {init}"
432             )
433             exec_cmd_no_error(self._node, f"chmod +x {init}", sudo=True)
434
435     def configure_kernelvm_vnf(self, **kwargs):
436         """Create KernelVM VNF configurations.
437
438         :param kwargs: Key-value pairs for templating configs.
439         :type kwargs: dict
440         """
441         if u"vpp" in self._opt.get(u"vnf"):
442             self.create_kernelvm_config_vpp(**kwargs)
443             self.create_kernelvm_init(
444                 template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
445                 vnf_bin=self._opt.get(u"vnf_bin")
446             )
447         elif u"testpmd_io" in self._opt.get(u"vnf"):
448             self.create_kernelvm_config_testpmd_io(**kwargs)
449             self.create_kernelvm_init(
450                 template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
451                 vnf_bin=self._opt.get(u"vnf_bin")
452             )
453         elif u"testpmd_mac" in self._opt.get(u"vnf"):
454             self.create_kernelvm_config_testpmd_mac(**kwargs)
455             self.create_kernelvm_init(
456                 template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
457                 vnf_bin=self._opt.get(u"vnf_bin")
458             )
459         elif u"iperf3" in self._opt.get(u"vnf"):
460             qemu_id = self._opt.get(u'qemu_id') % 2
461             self.create_kernelvm_config_iperf3()
462             self.create_kernelvm_init(
463                 template=f"{Constants.RESOURCES_TPL}/vm/init_iperf3.sh",
464                 vnf_bin=self._opt.get(u"vnf_bin"),
465                 ip_address_l=u"2.2.2.2/30" if qemu_id else u"1.1.1.1/30",
466                 ip_address_r=u"2.2.2.1" if qemu_id else u"1.1.1.2",
467                 ip_route_r=u"1.1.1.0/30" if qemu_id else u"2.2.2.0/30"
468             )
469         else:
470             raise RuntimeError(u"QEMU: Unsupported VNF!")
471
472     def get_qemu_pids(self):
473         """Get QEMU CPU pids.
474
475         :returns: List of QEMU CPU pids.
476         :rtype: list of str
477         """
478         command = f"grep -rwl 'CPU' /proc/$(sudo cat " \
479             f"{self._temp.get(u'pidfile')})/task/*/comm "
480         command += r"| xargs dirname | sed -e 's/\/.*\///g' | uniq"
481
482         stdout, _ = exec_cmd_no_error(self._node, command)
483         return stdout.splitlines()
484
485     def qemu_set_affinity(self, *host_cpus):
486         """Set qemu affinity by getting thread PIDs via QMP and taskset to list
487         of CPU cores. Function tries to execute 3 times to avoid race condition
488         in getting thread PIDs.
489
490         :param host_cpus: List of CPU cores.
491         :type host_cpus: list
492         """
493         for _ in range(3):
494             try:
495                 qemu_cpus = self.get_qemu_pids()
496
497                 if len(qemu_cpus) != len(host_cpus):
498                     sleep(1)
499                     continue
500                 for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):
501                     command = f"taskset -pc {host_cpu} {qemu_cpu}"
502                     message = f"QEMU: Set affinity failed " \
503                         f"on {self._node[u'host']}!"
504                     exec_cmd_no_error(
505                         self._node, command, sudo=True, message=message
506                     )
507                 break
508             except (RuntimeError, ValueError):
509                 self.qemu_kill_all()
510                 raise
511         else:
512             self.qemu_kill_all()
513             raise RuntimeError(u"Failed to set Qemu threads affinity!")
514
515     def qemu_set_scheduler_policy(self):
516         """Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU
517         processes.
518
519         :raises RuntimeError: Set scheduler policy failed.
520         """
521         try:
522             qemu_cpus = self.get_qemu_pids()
523
524             for qemu_cpu in qemu_cpus:
525                 command = f"chrt -r -p 1 {qemu_cpu}"
526                 message = f"QEMU: Set SCHED_RR failed on {self._node[u'host']}"
527                 exec_cmd_no_error(
528                     self._node, command, sudo=True, message=message
529                 )
530         except (RuntimeError, ValueError):
531             self.qemu_kill_all()
532             raise
533
534     def _qemu_qmp_exec(self, cmd):
535         """Execute QMP command.
536
537         QMP is JSON based protocol which allows to control QEMU instance.
538
539         :param cmd: QMP command to execute.
540         :type cmd: str
541         :returns: Command output in python representation of JSON format. The
542             { "return": {} } response is QMP's success response. An error
543             response will contain the "error" keyword instead of "return".
544         """
545         # To enter command mode, the qmp_capabilities command must be issued.
546         command = f"echo \"{{{{ \\\"execute\\\": " \
547             f"\\\"qmp_capabilities\\\" }}}}" \
548             f"{{{{ \\\"execute\\\": \\\"{cmd}\\\" }}}}\" | " \
549             f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qmp')}"
550         message = f"QMP execute '{cmd}' failed on {self._node[u'host']}"
551
552         stdout, _ = exec_cmd_no_error(
553             self._node, command, sudo=False, message=message
554         )
555
556         # Skip capabilities negotiation messages.
557         out_list = stdout.splitlines()
558         if len(out_list) < 3:
559             raise RuntimeError(f"Invalid QMP output on {self._node[u'host']}")
560         return json.loads(out_list[2])
561
562     def _qemu_qga_flush(self):
563         """Flush the QGA parser state."""
564         command = f"(printf \"\xFF\"; sleep 1) | sudo -S socat " \
565             f"- UNIX-CONNECT:{self._temp.get(u'qga')}"
566         message = f"QGA flush failed on {self._node[u'host']}"
567         stdout, _ = exec_cmd_no_error(
568             self._node, command, sudo=False, message=message
569         )
570
571         return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict()
572
573     def _qemu_qga_exec(self, cmd):
574         """Execute QGA command.
575
576         QGA provide access to a system-level agent via standard QMP commands.
577
578         :param cmd: QGA command to execute.
579         :type cmd: str
580         """
581         command = f"(echo \"{{{{ \\\"execute\\\": " \
582             f"\\\"{cmd}\\\" }}}}\"; sleep 1) | " \
583             f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qga')}"
584         message = f"QGA execute '{cmd}' failed on {self._node[u'host']}"
585         stdout, _ = exec_cmd_no_error(
586             self._node, command, sudo=False, message=message
587         )
588
589         return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict()
590
591     def _wait_until_vm_boot(self):
592         """Wait until QEMU VM is booted."""
593         try:
594             getattr(self, f'_wait_{self._opt["vnf"]}')()
595         except AttributeError:
596             self._wait_default()
597
598     def _wait_default(self, retries=60):
599         """Wait until QEMU with VPP is booted.
600
601         :param retries: Number of retries.
602         :type retries: int
603         """
604         for _ in range(retries):
605             command = f"tail -1 {self._temp.get(u'log')}"
606             stdout = None
607             try:
608                 stdout, _ = exec_cmd_no_error(self._node, command, sudo=True)
609                 sleep(1)
610             except RuntimeError:
611                 pass
612             if "vpp " in stdout and "built by" in stdout:
613                 break
614             if u"Press enter to exit" in stdout:
615                 break
616             if u"reboot: Power down" in stdout:
617                 raise RuntimeError(
618                     f"QEMU: NF failed to run on {self._node[u'host']}!"
619                 )
620         else:
621             raise RuntimeError(
622                 f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
623             )
624
625     def _wait_nestedvm(self, retries=12):
626         """Wait until QEMU with NestedVM is booted.
627
628         First try to flush qga until there is output.
629         Then ping QEMU guest agent each 5s until VM booted or timeout.
630
631         :param retries: Number of retries with 5s between trials.
632         :type retries: int
633         """
634         for _ in range(retries):
635             out = None
636             try:
637                 out = self._qemu_qga_flush()
638             except ValueError:
639                 logger.trace(f"QGA qga flush unexpected output {out}")
640             # Empty output - VM not booted yet
641             if not out:
642                 sleep(5)
643             else:
644                 break
645         else:
646             raise RuntimeError(
647                 f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
648             )
649         for _ in range(retries):
650             out = None
651             try:
652                 out = self._qemu_qga_exec(u"guest-ping")
653             except ValueError:
654                 logger.trace(f"QGA guest-ping unexpected output {out}")
655             # Empty output - VM not booted yet.
656             if not out:
657                 sleep(5)
658             # Non-error return - VM booted.
659             elif out.get(u"return") is not None:
660                 break
661             # Skip error and wait.
662             elif out.get(u"error") is not None:
663                 sleep(5)
664             else:
665                 # If there is an unexpected output from QGA guest-info, try
666                 # again until timeout.
667                 logger.trace(f"QGA guest-ping unexpected output {out}")
668         else:
669             raise RuntimeError(
670                 f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
671             )
672
673     def _wait_iperf3(self, retries=60):
674         """Wait until QEMU with iPerf3 is booted.
675
676         :param retries: Number of retries.
677         :type retries: int
678         """
679         grep = u"Server listening on 0.0.0.0 port 22."
680         cmd = f"fgrep '{grep}' {self._temp.get(u'log')}"
681         message = f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
682         exec_cmd_no_error(
683             self._node, cmd=cmd, sudo=True, message=message, retries=retries,
684             include_reason=True
685         )
686
687     def _update_vm_interfaces(self):
688         """Update interface names in VM node dict."""
689         # Send guest-network-get-interfaces command via QGA, output example:
690         # {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"},
691         # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]}.
692         out = self._qemu_qga_exec(u"guest-network-get-interfaces")
693         interfaces = out.get(u"return")
694         mac_name = {}
695         if not interfaces:
696             raise RuntimeError(
697                 f"Get VM interface list failed on {self._node[u'host']}"
698             )
699         # Create MAC-name dict.
700         for interface in interfaces:
701             if u"hardware-address" not in interface:
702                 continue
703             mac_name[interface[u"hardware-address"]] = interface[u"name"]
704         # Match interface by MAC and save interface name.
705         for interface in self._vm_info[u"interfaces"].values():
706             mac = interface.get(u"mac_address")
707             if_name = mac_name.get(mac)
708             if if_name is None:
709                 logger.trace(f"Interface name for MAC {mac} not found")
710             else:
711                 interface[u"name"] = if_name
712
713     def qemu_start(self):
714         """Start QEMU and wait until VM boot.
715
716         :returns: VM node info.
717         :rtype: dict
718         """
719         cmd_opts = OptionString()
720         cmd_opts.add(f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch}")
721         cmd_opts.extend(self._params)
722         message = f"QEMU: Start failed on {self._node[u'host']}!"
723         try:
724             DUTSetup.check_huge_page(
725                 self._node, u"/dev/hugepages", int(self._opt.get(u"mem")))
726
727             exec_cmd_no_error(
728                 self._node, cmd_opts, timeout=300, sudo=True, message=message
729             )
730             self._wait_until_vm_boot()
731         except RuntimeError:
732             self.qemu_kill_all()
733             raise
734         return self._vm_info
735
736     def qemu_kill(self):
737         """Kill qemu process."""
738         exec_cmd(
739             self._node, f"chmod +r {self._temp.get(u'pidfile')}", sudo=True
740         )
741         exec_cmd(
742             self._node, f"kill -SIGKILL $(cat {self._temp.get(u'pidfile')})",
743             sudo=True
744         )
745
746         for value in self._temp.values():
747             exec_cmd(self._node, f"cat {value}", sudo=True)
748             exec_cmd(self._node, f"rm -f {value}", sudo=True)
749
750     def qemu_kill_all(self):
751         """Kill all qemu processes on DUT node if specified."""
752         exec_cmd(self._node, u"pkill -SIGKILL qemu", sudo=True)
753
754         for value in self._temp.values():
755             exec_cmd(self._node, f"cat {value}", sudo=True)
756             exec_cmd(self._node, f"rm -f {value}", sudo=True)
757
758     def qemu_version(self):
759         """Return Qemu version.
760
761         :returns: Qemu version.
762         :rtype: str
763         """
764         command = f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch} " \
765             f"--version"
766         try:
767             stdout, _ = exec_cmd_no_error(self._node, command, sudo=True)
768             return match(r"QEMU emulator version ([\d.]*)", stdout).group(1)
769         except RuntimeError:
770             self.qemu_kill_all()
771             raise