CSIT-366 IPv4dp - baseline vhost-user
[csit.git] / resources / libraries / python / QemuUtils.py
index 3751cd6..bc85435 100644 (file)
@@ -53,8 +53,6 @@ class QemuUtils(object):
         self._qemu_opt['huge_allocate'] = False
         # Default image for CSIT virl setup
         self._qemu_opt['disk_image'] = '/var/lib/vm/vhost-nested.img'
-        # Affinity of qemu processes
-        self._qemu_opt['affinity'] = False
         # VM node info dict
         self._vm_info = {
             'type': NodeType.VM,
@@ -128,13 +126,28 @@ class QemuUtils(object):
         """
         self._qemu_opt['disk_image'] = disk_image
 
-    def qemu_set_affinity(self, mask):
-        """Set qemu affinity by taskset with cpu mask.
+    def qemu_set_affinity(self, *host_cpus):
+        """Set qemu affinity by getting thread PIDs via QMP and taskset to list
+        of CPU cores.
 
-       :param mask: Hex CPU mask.
-       :type mask: str
+        :param host_cpus: List of CPU cores.
+        :type host_cpus: list
         """
-        self._qemu_opt['affinity'] = mask
+        qemu_cpus = self._qemu_qmp_exec('query-cpus')['return']
+
+        if len(qemu_cpus) != len(host_cpus):
+            logger.debug('Host CPU count {0}, Qemu Thread count {1}'.format(
+                len(host_cpus), len(qemu_cpus)))
+            raise ValueError('Host CPU count must match Qemu Thread count')
+
+        for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):
+            cmd = 'taskset -p {0} {1}'.format(hex(1 << int(host_cpu)),
+                                              qemu_cpu['thread_id'])
+            (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
+            if int(ret_code) != 0:
+                logger.debug('Set affinity failed {0}'.format(stderr))
+                raise RuntimeError('Set affinity failed on {0}'.format(
+                    self._node['host']))
 
     def qemu_set_node(self, node):
         """Set node to run QEMU on.
@@ -304,7 +317,7 @@ class QemuUtils(object):
             else:
                 interface['name'] = if_name
 
-    def _huge_page_check(self):
+    def _huge_page_check(self, allocate=False):
         """Huge page check."""
         huge_mnt = self._qemu_opt.get('huge_mnt')
         mem_size = self._qemu_opt.get('mem_size')
@@ -313,12 +326,35 @@ class QemuUtils(object):
         regex = re.compile(r'HugePages_Free:\s+(\d+)')
         match = regex.search(output)
         huge_free = int(match.group(1))
+        regex = re.compile(r'HugePages_Total:\s+(\d+)')
+        match = regex.search(output)
+        huge_total = int(match.group(1))
         regex = re.compile(r'Hugepagesize:\s+(\d+)')
         match = regex.search(output)
         huge_size = int(match.group(1))
+        # Check if memory reqested by qemu is available on host
         if (mem_size * 1024) > (huge_free * huge_size):
-            raise RuntimeError('Not enough free huge pages {0} kB, required '
-                '{1} MB'.format(huge_free * huge_size, mem_size))
+            # If we want to allocate hugepage dynamically
+            if allocate:
+                mem_needed = abs((huge_free * huge_size) - (mem_size * 1024))
+                huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
+                max_map_count = huge_to_allocate*4
+                # Increase maximum number of memory map areas a process may have
+                cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format(
+                    max_map_count)
+                (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
+                # Increase hugepage count
+                cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format(
+                    huge_to_allocate)
+                (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
+                if int(ret_code) != 0:
+                    logger.debug('Mount huge pages failed {0}'.format(stderr))
+                    raise RuntimeError('Mount huge pages failed on {0}'.format(
+                        self._node['host']))
+            # If we do not want to allocate dynamicaly end with error
+            else:
+                raise RuntimeError('Not enough free huge pages: {0}, '
+                    '{1} MB'.format(huge_free, huge_free * huge_size))
         # Check if huge pages mount point exist
         has_huge_mnt = False
         (_, output, _) = self._ssh.exec_command('cat /proc/mounts')
@@ -331,41 +367,14 @@ class QemuUtils(object):
                 break
         # If huge page mount point not exist create one
         if not has_huge_mnt:
-            cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format(
-                huge_mnt)
+            cmd = 'mkdir -p {0}'.format(huge_mnt)
             (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
             if int(ret_code) != 0:
-                logger.debug('Mount huge pages failed {0}'.format(stderr))
-                raise RuntimeError('Mount huge pages failed on {0}'.format(
+                logger.debug('Create mount dir failed: {0}'.format(stderr))
+                raise RuntimeError('Create mount dir failed on {0}'.format(
                     self._node['host']))
-
-    def _huge_page_allocate(self):
-        """Huge page allocate."""
-        huge_mnt = self._qemu_opt.get('huge_mnt')
-        mem_size = self._qemu_opt.get('mem_size')
-        # Check size of free huge pages
-        (_, output, _) = self._ssh.exec_command('grep Huge /proc/meminfo')
-        regex = re.compile(r'HugePages_Free:\s+(\d+)')
-        match = regex.search(output)
-        huge_free = int(match.group(1))
-        regex = re.compile(r'HugePages_Total:\s+(\d+)')
-        match = regex.search(output)
-        huge_total = int(match.group(1))
-        regex = re.compile(r'Hugepagesize:\s+(\d+)')
-        match = regex.search(output)
-        huge_size = int(match.group(1))
-
-        mem_needed = abs((huge_free * huge_size) - (mem_size * 1024))
-
-        if mem_needed:
-            huge_to_allocate = (mem_needed / huge_size) + huge_total
-            # Increase limit of allowed max hugepage count
-            cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format(
-                huge_to_allocate*3)
-            (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
-            # Increase hugepage count
-            cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format(
-                huge_to_allocate)
+            cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format(
+                huge_mnt)
             (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
             if int(ret_code) != 0:
                 logger.debug('Mount huge pages failed {0}'.format(stderr))
@@ -390,13 +399,7 @@ class QemuUtils(object):
 
         # By default check only if hugepages are availbale.
         # If 'huge_allocate' is set to true try to allocate as well.
-        try:
-            self._huge_page_check()
-        except RuntimeError as runtime_error:
-            if self._qemu_opt.get('huge_allocate'):
-                self._huge_page_allocate()
-            else:
-                raise runtime_error
+        self._huge_page_check(allocate=self._qemu_opt.get('huge_allocate'))
 
         # Setup QMP via unix socket
         qmp = '-qmp unix:{0},server,nowait'.format(self.__QMP_SOCK)
@@ -409,12 +412,9 @@ class QemuUtils(object):
             '-device isa-serial,chardev=qga0'
         # Graphic setup
         graphic = '-monitor none -display none -vga none'
-        qbin = 'taskset {0} {1}'.format(self._qemu_opt.get('affinity'),
-            self.__QEMU_BIN) if self._qemu_opt.get(
-            'affinity') else self.__QEMU_BIN
         # Run QEMU
         cmd = '{0} {1} {2} {3} {4} -hda {5} {6} {7} {8} {9}'.format(
-            qbin, self._qemu_opt.get('smp'), mem, ssh_fwd,
+            self.__QEMU_BIN, self._qemu_opt.get('smp'), mem, ssh_fwd,
             self._qemu_opt.get('options'),
             self._qemu_opt.get('disk_image'), qmp, serial, qga, graphic)
         (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd, timeout=300)