X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FQemuUtils.py;h=bc854350dd7cfac5563d9789a1b4e2b10904c8c2;hp=282b157eb812f09a05fd04a008ba5f6f1c94b522;hb=bdc400b3feacd10d8130dedaa7df8df2d2d57feb;hpb=56873a866fc17056e467fe7cafb2c987181f209a diff --git a/resources/libraries/python/QemuUtils.py b/resources/libraries/python/QemuUtils.py index 282b157eb8..bc854350dd 100644 --- a/resources/libraries/python/QemuUtils.py +++ b/resources/libraries/python/QemuUtils.py @@ -39,9 +39,9 @@ class QemuUtils(object): self._qemu_opt['smp'] = '-smp 1,sockets=1,cores=1,threads=1' # Daemonize the QEMU process after initialization. Default one # management interface. - self._qemu_opt['options'] = '-daemonize -enable-kvm ' \ + self._qemu_opt['options'] = '-cpu host -daemonize -enable-kvm ' \ '-machine pc-1.0,accel=kvm,usb=off,mem-merge=off ' \ - '-net nic,macaddr=52:54:00:00:02:01' + '-net nic,macaddr=52:54:00:00:02:01 -balloon none' self._qemu_opt['ssh_fwd_port'] = 10022 # Default serial console port self._qemu_opt['serial_port'] = 4556 @@ -49,6 +49,8 @@ class QemuUtils(object): self._qemu_opt['mem_size'] = 512 # Default huge page mount point, required for Vhost-user interfaces. self._qemu_opt['huge_mnt'] = '/mnt/huge' + # Default do not allocate huge pages. + self._qemu_opt['huge_allocate'] = False # Default image for CSIT virl setup self._qemu_opt['disk_image'] = '/var/lib/vm/vhost-nested.img' # VM node info dict @@ -102,7 +104,7 @@ class QemuUtils(object): :param mem_size: RAM size in Mega Bytes. :type mem_size: int """ - self._qemu_opt['mem_size'] = mem_size + self._qemu_opt['mem_size'] = int(mem_size) def qemu_set_huge_mnt(self, huge_mnt): """Set hugefile mount point. @@ -112,6 +114,10 @@ class QemuUtils(object): """ self._qemu_opt['huge_mnt'] = huge_mnt + def qemu_set_huge_allocate(self): + """Set flag to allocate more huge pages if needed.""" + self._qemu_opt['huge_allocate'] = True + def qemu_set_disk_image(self, disk_image): """Set disk image. @@ -120,6 +126,29 @@ class QemuUtils(object): """ self._qemu_opt['disk_image'] = disk_image + def qemu_set_affinity(self, *host_cpus): + """Set qemu affinity by getting thread PIDs via QMP and taskset to list + of CPU cores. + + :param host_cpus: List of CPU cores. + :type host_cpus: list + """ + qemu_cpus = self._qemu_qmp_exec('query-cpus')['return'] + + if len(qemu_cpus) != len(host_cpus): + logger.debug('Host CPU count {0}, Qemu Thread count {1}'.format( + len(host_cpus), len(qemu_cpus))) + raise ValueError('Host CPU count must match Qemu Thread count') + + for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus): + cmd = 'taskset -p {0} {1}'.format(hex(1 << int(host_cpu)), + qemu_cpu['thread_id']) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + if int(ret_code) != 0: + logger.debug('Set affinity failed {0}'.format(stderr)) + raise RuntimeError('Set affinity failed on {0}'.format( + self._node['host'])) + def qemu_set_node(self, node): """Set node to run QEMU on. @@ -157,9 +186,11 @@ class QemuUtils(object): # e.g. vhost1 MAC is 52:54:00:00:04:01 if mac is None: mac = '52:54:00:00:04:{0:02x}'.format(self._vhost_id) + extend_options = 'csum=off,gso=off,guest_tso4=off,guest_tso6=off,'\ + 'guest_ecn=off,mrg_rxbuf=off' # Create Virtio network device. - device = ' -device virtio-net-pci,netdev=vhost{0},mac={1}'.format( - self._vhost_id, mac) + device = ' -device virtio-net-pci,netdev=vhost{0},mac={1},{2}'.format( + self._vhost_id, mac, extend_options) self._qemu_opt['options'] += device # Add interface MAC and socket to the node dict if_data = {'mac_address': mac, 'socket': socket} @@ -196,6 +227,21 @@ class QemuUtils(object): self._node['host'])) return json.loads(out_list[2]) + def _qemu_qga_flush(self): + """Flush the QGA parser state + """ + qga_cmd = 'printf "\xFF" | sudo -S nc ' \ + '-q 1 -U ' + self.__QGA_SOCK + (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd) + if 0 != int(ret_code): + logger.debug('QGA execute failed {0}'.format(stderr)) + raise RuntimeError('QGA execute "{0}" failed on {1}'.format(cmd, + self._node['host'])) + logger.trace(stdout) + if not stdout: + return {} + return json.loads(stdout.split('\n', 1)[0]) + def _qemu_qga_exec(self, cmd): """Execute QGA command. @@ -229,6 +275,7 @@ class QemuUtils(object): if time() - start > timeout: raise RuntimeError('timeout, VM {0} not booted on {1}'.format( self._qemu_opt['disk_image'], self._node['host'])) + self._qemu_qga_flush() out = self._qemu_qga_exec('guest-ping') # Empty output - VM not booted yet if not out: @@ -270,7 +317,7 @@ class QemuUtils(object): else: interface['name'] = if_name - def _huge_page_check(self): + def _huge_page_check(self, allocate=False): """Huge page check.""" huge_mnt = self._qemu_opt.get('huge_mnt') mem_size = self._qemu_opt.get('mem_size') @@ -279,12 +326,35 @@ class QemuUtils(object): regex = re.compile(r'HugePages_Free:\s+(\d+)') match = regex.search(output) huge_free = int(match.group(1)) + regex = re.compile(r'HugePages_Total:\s+(\d+)') + match = regex.search(output) + huge_total = int(match.group(1)) regex = re.compile(r'Hugepagesize:\s+(\d+)') match = regex.search(output) huge_size = int(match.group(1)) + # Check if memory reqested by qemu is available on host if (mem_size * 1024) > (huge_free * huge_size): - raise RuntimeError('Not enough free huge pages {0} kB, required ' - '{1} MB'.format(huge_free * huge_size, mem_size)) + # If we want to allocate hugepage dynamically + if allocate: + mem_needed = abs((huge_free * huge_size) - (mem_size * 1024)) + huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total + max_map_count = huge_to_allocate*4 + # Increase maximum number of memory map areas a process may have + cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format( + max_map_count) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + # Increase hugepage count + cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format( + huge_to_allocate) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + if int(ret_code) != 0: + logger.debug('Mount huge pages failed {0}'.format(stderr)) + raise RuntimeError('Mount huge pages failed on {0}'.format( + self._node['host'])) + # If we do not want to allocate dynamicaly end with error + else: + raise RuntimeError('Not enough free huge pages: {0}, ' + '{1} MB'.format(huge_free, huge_free * huge_size)) # Check if huge pages mount point exist has_huge_mnt = False (_, output, _) = self._ssh.exec_command('cat /proc/mounts') @@ -297,6 +367,12 @@ class QemuUtils(object): break # If huge page mount point not exist create one if not has_huge_mnt: + cmd = 'mkdir -p {0}'.format(huge_mnt) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + if int(ret_code) != 0: + logger.debug('Create mount dir failed: {0}'.format(stderr)) + raise RuntimeError('Create mount dir failed on {0}'.format( + self._node['host'])) cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format( huge_mnt) (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) @@ -320,7 +396,11 @@ class QemuUtils(object): mem = '-object memory-backend-file,id=mem,size={0}M,mem-path={1},' \ 'share=on -m {0} -numa node,memdev=mem'.format( self._qemu_opt.get('mem_size'), self._qemu_opt.get('huge_mnt')) - self._huge_page_check() + + # By default check only if hugepages are availbale. + # If 'huge_allocate' is set to true try to allocate as well. + self._huge_page_check(allocate=self._qemu_opt.get('huge_allocate')) + # Setup QMP via unix socket qmp = '-qmp unix:{0},server,nowait'.format(self.__QMP_SOCK) # Setup serial console