1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """QEMU utilities library."""
19 from string import Template
20 from time import sleep
22 from robot.api import logger
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.DpdkUtil import DpdkUtil
26 from resources.libraries.python.DUTSetup import DUTSetup
27 from resources.libraries.python.OptionString import OptionString
28 from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
29 from resources.libraries.python.topology import NodeType, Topology
30 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
32 __all__ = [u"QemuUtils"]
38 # Use one instance of class per tests.
39 ROBOT_LIBRARY_SCOPE = u"TEST CASE"
42 self, node, qemu_id=1, smp=1, mem=512, vnf=None,
43 img=Constants.QEMU_VM_IMAGE):
44 """Initialize QemuUtil class.
46 :param node: Node to run QEMU on.
47 :param qemu_id: QEMU identifier.
48 :param smp: Number of virtual SMP units (cores).
49 :param mem: Amount of memory.
50 :param vnf: Network function workload.
51 :param img: QEMU disk image or kernel image path.
61 self._arch = Topology.get_node_arch(self._node)
64 # Architecture specific options
65 if self._arch == u"aarch64":
66 dpdk_target = u"arm64-armv8a"
67 self._opt[u"machine_args"] = \
68 u"virt,accel=kvm,usb=off,mem-merge=off,gic-version=3"
69 self._opt[u"console"] = u"ttyAMA0"
71 dpdk_target = u"x86_64-native"
72 self._opt[u"machine_args"] = u"pc,accel=kvm,usb=off,mem-merge=off"
73 self._opt[u"console"] = u"ttyS0"
74 self._testpmd_path = f"{Constants.QEMU_VM_DPDK}/" \
75 f"{dpdk_target}-linux-gcc/app"
77 u"host": node[u"host"],
79 u"port": 10021 + qemu_id,
80 u"serial": 4555 + qemu_id,
85 if node[u"port"] != 22:
86 self._vm_info[u"host_port"] = node[u"port"]
87 self._vm_info[u"host_username"] = node[u"username"]
88 self._vm_info[u"host_password"] = node[u"password"]
90 self._opt[u"qemu_id"] = qemu_id
91 self._opt[u"mem"] = int(mem)
92 self._opt[u"smp"] = int(smp)
93 self._opt[u"img"] = img
94 self._opt[u"vnf"] = vnf
97 self._temp[u"log"] = f"/tmp/serial_{qemu_id}.log"
98 self._temp[u"pidfile"] = f"/run/qemu_{qemu_id}.pid"
99 if img == Constants.QEMU_VM_IMAGE:
100 self._temp[u"qmp"] = f"/run/qmp_{qemu_id}.sock"
101 self._temp[u"qga"] = f"/run/qga_{qemu_id}.sock"
102 elif img == Constants.QEMU_VM_KERNEL:
103 self._opt[u"img"], _ = exec_cmd_no_error(
104 node, f"ls -1 {Constants.QEMU_VM_KERNEL}* | tail -1",
105 message=u"Qemu Kernel VM image not found!"
107 self._temp[u"ini"] = f"/etc/vm_init_{qemu_id}.conf"
108 self._opt[u"initrd"], _ = exec_cmd_no_error(
109 node, f"ls -1 {Constants.QEMU_VM_KERNEL_INITRD}* | tail -1",
110 message=u"Qemu Kernel initrd image not found!"
113 raise RuntimeError(f"QEMU: Unknown VM image option: {img}")
114 # Computed parameters for QEMU command line.
115 self._params = OptionString(prefix=u"-")
117 def add_default_params(self):
118 """Set default QEMU command line parameters."""
119 self._params.add(u"daemonize")
120 self._params.add(u"nodefaults")
121 self._params.add_with_value(
122 u"name", f"vnf{self._opt.get(u'qemu_id')},debug-threads=on"
124 self._params.add(u"no-user-config")
125 self._params.add(u"nographic")
126 self._params.add(u"enable-kvm")
127 self._params.add_with_value(u"pidfile", self._temp.get(u"pidfile"))
128 self._params.add_with_value(u"cpu", u"host")
130 self._params.add_with_value(u"machine", self._opt.get(u"machine_args"))
131 self._params.add_with_value(
132 u"smp", f"{self._opt.get(u'smp')},sockets=1,"
133 f"cores={self._opt.get(u'smp')},threads=1"
135 self._params.add_with_value(
136 u"object", f"memory-backend-file,id=mem,"
137 f"size={self._opt.get(u'mem')}M,mem-path=/dev/hugepages,share=on"
139 self._params.add_with_value(u"m", f"{self._opt.get(u'mem')}M")
140 self._params.add_with_value(u"numa", u"node,memdev=mem")
141 self._params.add_with_value(u"balloon", u"none")
143 def add_net_user(self):
144 """Set managment port forwarding."""
145 self._params.add_with_value(
146 u"netdev", f"user,id=mgmt,net=192.168.76.0/24,"
147 f"hostfwd=tcp::{self._vm_info[u'port']}-:22"
149 self._params.add_with_value(
150 u"device", f"virtio-net,netdev=mgmt"
153 def add_qmp_qga(self):
154 """Set QMP, QGA management."""
155 self._params.add_with_value(
156 u"chardev", f"socket,path={self._temp.get(u'qga')},"
157 f"server,nowait,id=qga0"
159 self._params.add_with_value(u"device", u"isa-serial,chardev=qga0")
160 self._params.add_with_value(
161 u"qmp", f"unix:{self._temp.get(u'qmp')},server,nowait"
164 def add_serial(self):
165 """Set serial to file redirect."""
166 self._params.add_with_value(
167 u"chardev", f"socket,host=127.0.0.1,"
168 f"port={self._vm_info[u'serial']},id=gnc0,server,nowait")
169 self._params.add_with_value(u"device", u"isa-serial,chardev=gnc0")
170 self._params.add_with_value(
171 u"serial", f"file:{self._temp.get(u'log')}"
174 def add_drive_cdrom(self, drive_file):
177 :param drive_file: Path to drive image.
178 :type drive_file: str
180 self._params.add_with_value(
181 u"drive", f"file={drive_file},media=cdrom"
184 def add_drive(self, drive_file, drive_format):
185 """Set drive with custom format.
187 :param drive_file: Path to drive image.
188 :param drive_format: Drive image format.
189 :type drive_file: str
190 :type drive_format: str
192 self._params.add_with_value(
193 u"drive", f"file={drive_file},format={drive_format},"
194 u"cache=none,if=virtio,file.locking=off"
197 def add_kernelvm_params(self):
198 """Set KernelVM QEMU parameters."""
199 self._params.add_with_value(
200 u"serial", f"file:{self._temp.get(u'log')}"
202 self._params.add_with_value(
203 u"fsdev", u"local,id=root9p,path=/,security_model=none"
205 self._params.add_with_value(
206 u"device", u"virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot"
208 self._params.add_with_value(u"kernel", f"{self._opt.get(u'img')}")
209 self._params.add_with_value(u"initrd", f"{self._opt.get(u'initrd')}")
210 self._params.add_with_value(
211 u"append", f"'ro rootfstype=9p rootflags=trans=virtio "
212 f"root=virtioroot console={self._opt.get(u'console')} "
213 f"tsc=reliable hugepages=256 "
214 f"init={self._temp.get(u'ini')} fastboot'"
217 def add_vhost_user_if(
218 self, socket, server=True, jumbo_frames=False, queue_size=None,
219 queues=1, csum=False, gso=False):
220 """Add Vhost-user interface.
222 :param socket: Path of the unix socket.
223 :param server: If True the socket shall be a listening socket.
224 :param jumbo_frames: Set True if jumbo frames are used in the test.
225 :param queue_size: Vring queue size.
226 :param queues: Number of queues.
227 :param csum: Checksum offloading.
228 :param gso: Generic segmentation offloading.
231 :type jumbo_frames: bool
232 :type queue_size: int
238 self._params.add_with_value(
239 u"chardev", f"socket,id=char{self._vhost_id},"
240 f"path={socket}{u',server' if server is True else u''}"
242 self._params.add_with_value(
243 u"netdev", f"vhost-user,id=vhost{self._vhost_id},"
244 f"chardev=char{self._vhost_id},queues={queues}"
246 mac = f"52:54:00:00:{self._opt.get(u'qemu_id'):02x}:" \
247 f"{self._vhost_id:02x}"
248 queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \
249 if queue_size else u""
250 self._params.add_with_value(
251 u"device", f"virtio-net-pci,netdev=vhost{self._vhost_id},mac={mac},"
252 f"addr={self._vhost_id+5}.0,mq=on,vectors={2 * queues + 2},"
253 f"csum={u'on' if csum else u'off'},gso={u'on' if gso else u'off'},"
254 f"guest_tso4=off,guest_tso6=off,guest_ecn=off,"
258 # Add interface MAC and socket to the node dict.
259 if_data = {u"mac_address": mac, u"socket": socket}
260 if_name = f"vhost{self._vhost_id}"
261 self._vm_info[u"interfaces"][if_name] = if_data
262 # Add socket to temporary file list.
263 self._temp[if_name] = socket
265 def add_vfio_pci_if(self, pci):
266 """Add VFIO PCI interface.
268 :param pci: PCI address of interface.
271 self._params.add_with_value(u"device", f"vfio-pci,host={pci}")
273 def create_kernelvm_config_vpp(self, **kwargs):
274 """Create QEMU VPP config files.
276 :param kwargs: Key-value pairs to replace content of VPP configuration
280 startup = f"/etc/vpp/vm_startup_{self._opt.get(u'qemu_id')}.conf"
281 running = f"/etc/vpp/vm_running_{self._opt.get(u'qemu_id')}.exec"
283 self._temp[u"startup"] = startup
284 self._temp[u"running"] = running
285 self._opt[u"vnf_bin"] = f"/usr/bin/vpp -c {startup}"
287 # Create VPP startup configuration.
288 vpp_config = VppConfigGenerator()
289 vpp_config.set_node(self._node)
290 vpp_config.add_unix_nodaemon()
291 vpp_config.add_unix_cli_listen()
292 vpp_config.add_unix_exec(running)
293 vpp_config.add_socksvr()
294 vpp_config.add_buffers_per_numa(107520)
295 vpp_config.add_cpu_main_core(u"0")
296 if self._opt.get(u"smp") > 1:
297 vpp_config.add_cpu_corelist_workers(f"1-{self._opt.get(u'smp')-1}")
298 vpp_config.add_plugin(u"disable", u"default")
299 vpp_config.add_plugin(u"enable", u"ping_plugin.so")
300 if "ipsec" in self._opt.get(u'vnf'):
301 vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
302 vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
303 vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
304 if "2vfpt" in self._opt.get(u'vnf'):
305 vpp_config.add_plugin(u"enable", u"avf_plugin.so")
306 if "vhost" in self._opt.get(u'vnf'):
307 vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
308 vpp_config.add_dpdk_dev(u"0000:00:06.0", u"0000:00:07.0")
309 vpp_config.add_dpdk_dev_default_rxq(kwargs[u"queues"])
310 vpp_config.add_dpdk_log_level(u"debug")
311 if not kwargs[u"jumbo_frames"]:
312 vpp_config.add_dpdk_no_multi_seg()
313 vpp_config.add_dpdk_no_tx_checksum_offload()
314 vpp_config.write_config(startup)
316 # Create VPP running configuration.
317 template = f"{Constants.RESOURCES_TPL_VM}/{self._opt.get(u'vnf')}.exec"
318 exec_cmd_no_error(self._node, f"rm -f {running}", sudo=True)
320 with open(template, u"rt") as src_file:
321 src = Template(src_file.read())
323 self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
324 f"sudo tee {running}"
327 def create_kernelvm_config_testpmd_io(self, **kwargs):
328 """Create QEMU testpmd-io command line.
330 :param kwargs: Key-value pairs to construct command line parameters.
333 pmd_max_pkt_len = u"9200" if kwargs[u"jumbo_frames"] else u"1518"
334 testpmd_cmd = DpdkUtil.get_testpmd_cmdline(
335 eal_corelist=f"0-{self._opt.get(u'smp') - 1}",
337 eal_pci_whitelist0=u"0000:00:06.0",
338 eal_pci_whitelist1=u"0000:00:07.0",
344 pmd_max_pkt_len=pmd_max_pkt_len,
345 pmd_mbuf_size=u"16384",
346 pmd_rxq=kwargs[u"queues"],
347 pmd_txq=kwargs[u"queues"],
348 pmd_tx_offloads='0x0',
349 pmd_nb_cores=str(self._opt.get(u"smp") - 1)
352 self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
354 def create_kernelvm_config_testpmd_mac(self, **kwargs):
355 """Create QEMU testpmd-mac command line.
357 :param kwargs: Key-value pairs to construct command line parameters.
360 pmd_max_pkt_len = u"9200" if kwargs[u"jumbo_frames"] else u"1518"
361 testpmd_cmd = DpdkUtil.get_testpmd_cmdline(
362 eal_corelist=f"0-{self._opt.get(u'smp') - 1}",
364 eal_pci_whitelist0=u"0000:00:06.0",
365 eal_pci_whitelist1=u"0000:00:07.0",
371 pmd_max_pkt_len=pmd_max_pkt_len,
372 pmd_mbuf_size=u"16384",
373 pmd_eth_peer_0=f"0,{kwargs[u'vif1_mac']}",
374 pmd_eth_peer_1=f"1,{kwargs[u'vif2_mac']}",
375 pmd_rxq=kwargs[u"queues"],
376 pmd_txq=kwargs[u"queues"],
377 pmd_tx_offloads=u"0x0",
378 pmd_nb_cores=str(self._opt.get(u"smp") - 1)
381 self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
383 def create_kernelvm_init(self, **kwargs):
384 """Create QEMU init script.
386 :param kwargs: Key-value pairs to replace content of init startup file.
389 template = f"{Constants.RESOURCES_TPL_VM}/init.sh"
390 init = self._temp.get(u"ini")
391 exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True)
393 with open(template, u"rt") as src_file:
394 src = Template(src_file.read())
396 self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
399 exec_cmd_no_error(self._node, f"chmod +x {init}", sudo=True)
401 def configure_kernelvm_vnf(self, **kwargs):
402 """Create KernelVM VNF configurations.
404 :param kwargs: Key-value pairs for templating configs.
407 if u"vpp" in self._opt.get(u"vnf"):
408 self.create_kernelvm_config_vpp(**kwargs)
409 elif u"testpmd_io" in self._opt.get(u"vnf"):
410 self.create_kernelvm_config_testpmd_io(**kwargs)
411 elif u"testpmd_mac" in self._opt.get(u"vnf"):
412 self.create_kernelvm_config_testpmd_mac(**kwargs)
414 raise RuntimeError(u"QEMU: Unsupported VNF!")
415 self.create_kernelvm_init(vnf_bin=self._opt.get(u"vnf_bin"))
417 def get_qemu_pids(self):
418 """Get QEMU CPU pids.
420 :returns: List of QEMU CPU pids.
423 command = f"grep -rwl 'CPU' /proc/$(sudo cat " \
424 f"{self._temp.get(u'pidfile')})/task/*/comm "
425 command += r"| xargs dirname | sed -e 's/\/.*\///g' | uniq"
427 stdout, _ = exec_cmd_no_error(self._node, command)
428 return stdout.splitlines()
430 def qemu_set_affinity(self, *host_cpus):
431 """Set qemu affinity by getting thread PIDs via QMP and taskset to list
432 of CPU cores. Function tries to execute 3 times to avoid race condition
433 in getting thread PIDs.
435 :param host_cpus: List of CPU cores.
436 :type host_cpus: list
440 qemu_cpus = self.get_qemu_pids()
442 if len(qemu_cpus) != len(host_cpus):
445 for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):
446 command = f"taskset -pc {host_cpu} {qemu_cpu}"
447 message = f"QEMU: Set affinity failed " \
448 f"on {self._node[u'host']}!"
450 self._node, command, sudo=True, message=message
453 except (RuntimeError, ValueError):
458 raise RuntimeError(u"Failed to set Qemu threads affinity!")
460 def qemu_set_scheduler_policy(self):
461 """Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU
464 :raises RuntimeError: Set scheduler policy failed.
467 qemu_cpus = self.get_qemu_pids()
469 for qemu_cpu in qemu_cpus:
470 command = f"chrt -r -p 1 {qemu_cpu}"
471 message = f"QEMU: Set SCHED_RR failed on {self._node[u'host']}"
473 self._node, command, sudo=True, message=message
475 except (RuntimeError, ValueError):
479 def _qemu_qmp_exec(self, cmd):
480 """Execute QMP command.
482 QMP is JSON based protocol which allows to control QEMU instance.
484 :param cmd: QMP command to execute.
486 :returns: Command output in python representation of JSON format. The
487 { "return": {} } response is QMP's success response. An error
488 response will contain the "error" keyword instead of "return".
490 # To enter command mode, the qmp_capabilities command must be issued.
491 command = f"echo \"{{{{ \\\"execute\\\": " \
492 f"\\\"qmp_capabilities\\\" }}}}" \
493 f"{{{{ \\\"execute\\\": \\\"{cmd}\\\" }}}}\" | " \
494 f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qmp')}"
495 message = f"QMP execute '{cmd}' failed on {self._node[u'host']}"
497 stdout, _ = exec_cmd_no_error(
498 self._node, command, sudo=False, message=message
501 # Skip capabilities negotiation messages.
502 out_list = stdout.splitlines()
503 if len(out_list) < 3:
504 raise RuntimeError(f"Invalid QMP output on {self._node[u'host']}")
505 return json.loads(out_list[2])
507 def _qemu_qga_flush(self):
508 """Flush the QGA parser state."""
509 command = f"(printf \"\xFF\"; sleep 1) | sudo -S socat " \
510 f"- UNIX-CONNECT:{self._temp.get(u'qga')}"
511 message = f"QGA flush failed on {self._node[u'host']}"
512 stdout, _ = exec_cmd_no_error(
513 self._node, command, sudo=False, message=message
516 return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict()
518 def _qemu_qga_exec(self, cmd):
519 """Execute QGA command.
521 QGA provide access to a system-level agent via standard QMP commands.
523 :param cmd: QGA command to execute.
526 command = f"(echo \"{{{{ \\\"execute\\\": " \
527 f"\\\"{cmd}\\\" }}}}\"; sleep 1) | " \
528 f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qga')}"
529 message = f"QGA execute '{cmd}' failed on {self._node[u'host']}"
530 stdout, _ = exec_cmd_no_error(
531 self._node, command, sudo=False, message=message
534 return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict()
536 def _wait_until_vm_boot(self):
537 """Wait until QEMU VM is booted."""
539 getattr(self, f'_wait_{self._opt["vnf"]}')()
540 except AttributeError:
543 def _wait_default(self, retries=60):
544 """Wait until QEMU with VPP is booted.
546 :param retries: Number of retries.
549 for _ in range(retries):
550 command = f"tail -1 {self._temp.get(u'log')}"
553 stdout, _ = exec_cmd_no_error(self._node, command, sudo=True)
557 if "vpp " in stdout and "built by" in stdout:
559 if u"Press enter to exit" in stdout:
561 if u"reboot: Power down" in stdout:
563 f"QEMU: NF failed to run on {self._node[u'host']}!"
567 f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
570 def _wait_nestedvm(self, retries=12):
571 """Wait until QEMU with NestedVM is booted.
573 First try to flush qga until there is output.
574 Then ping QEMU guest agent each 5s until VM booted or timeout.
576 :param retries: Number of retries with 5s between trials.
579 for _ in range(retries):
582 out = self._qemu_qga_flush()
584 logger.trace(f"QGA qga flush unexpected output {out}")
585 # Empty output - VM not booted yet
592 f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
594 for _ in range(retries):
597 out = self._qemu_qga_exec(u"guest-ping")
599 logger.trace(f"QGA guest-ping unexpected output {out}")
600 # Empty output - VM not booted yet.
603 # Non-error return - VM booted.
604 elif out.get(u"return") is not None:
606 # Skip error and wait.
607 elif out.get(u"error") is not None:
610 # If there is an unexpected output from QGA guest-info, try
611 # again until timeout.
612 logger.trace(f"QGA guest-ping unexpected output {out}")
615 f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
618 def _update_vm_interfaces(self):
619 """Update interface names in VM node dict."""
620 # Send guest-network-get-interfaces command via QGA, output example:
621 # {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"},
622 # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]}.
623 out = self._qemu_qga_exec(u"guest-network-get-interfaces")
624 interfaces = out.get(u"return")
628 f"Get VM interface list failed on {self._node[u'host']}"
630 # Create MAC-name dict.
631 for interface in interfaces:
632 if u"hardware-address" not in interface:
634 mac_name[interface[u"hardware-address"]] = interface[u"name"]
635 # Match interface by MAC and save interface name.
636 for interface in self._vm_info[u"interfaces"].values():
637 mac = interface.get(u"mac_address")
638 if_name = mac_name.get(mac)
640 logger.trace(f"Interface name for MAC {mac} not found")
642 interface[u"name"] = if_name
644 def qemu_start(self):
645 """Start QEMU and wait until VM boot.
647 :returns: VM node info.
650 cmd_opts = OptionString()
651 cmd_opts.add(f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch}")
652 cmd_opts.extend(self._params)
653 message = f"QEMU: Start failed on {self._node[u'host']}!"
655 DUTSetup.check_huge_page(
656 self._node, u"/dev/hugepages", int(self._opt.get(u"mem")))
659 self._node, cmd_opts, timeout=300, sudo=True, message=message
661 self._wait_until_vm_boot()
668 """Kill qemu process."""
670 self._node, f"chmod +r {self._temp.get(u'pidfile')}", sudo=True
673 self._node, f"kill -SIGKILL $(cat {self._temp.get(u'pidfile')})",
677 for value in self._temp.values():
678 exec_cmd(self._node, f"cat {value}", sudo=True)
679 exec_cmd(self._node, f"rm -f {value}", sudo=True)
681 def qemu_kill_all(self):
682 """Kill all qemu processes on DUT node if specified."""
683 exec_cmd(self._node, u"pkill -SIGKILL qemu", sudo=True)
685 for value in self._temp.values():
686 exec_cmd(self._node, f"cat {value}", sudo=True)
687 exec_cmd(self._node, f"rm -f {value}", sudo=True)
689 def qemu_version(self):
690 """Return Qemu version.
692 :returns: Qemu version.
695 command = f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch} " \
698 stdout, _ = exec_cmd_no_error(self._node, command, sudo=True)
699 return match(r"QEMU emulator version ([\d.]*)", stdout).group(1)