CSIT-312 VPP vhost-user - VPP vhost-user driver, virtio in VM 80/2180/15
authorpmikus <pmikus@cisco.com>
Tue, 2 Aug 2016 10:34:29 +0000 (11:34 +0100)
committerPeter Mikus <pmikus@cisco.com>
Wed, 17 Aug 2016 15:07:54 +0000 (15:07 +0000)
- Add libraries and keywords to control Qemu VM and VMapp

Change-Id: Iaaedfe240afe394f507ccaf0cf7a77c8b863acd3
Signed-off-by: pmikus <pmikus@cisco.com>
resources/libraries/python/DpdkUtil.py [new file with mode: 0644]
resources/libraries/python/QemuUtils.py
resources/libraries/robot/performance.robot
resources/libraries/robot/qemu.robot

diff --git a/resources/libraries/python/DpdkUtil.py b/resources/libraries/python/DpdkUtil.py
new file mode 100644 (file)
index 0000000..5eea0d6
--- /dev/null
@@ -0,0 +1,109 @@
+# Copyright (c) 2016 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Dpdk Utilities Library."""
+
+from resources.libraries.python.ssh import SSH, exec_cmd_no_error
+
+
+class DpdkUtil(object):
+    """Utilities for DPDK."""
+
+    @staticmethod
+    def dpdk_testpmd_start(node, **args):
+        """Start DPDK testpmd app on VM node.
+
+        :param node: VM Node to start testpmd on.
+        :param args: List of testpmd parameters.
+        :type node: dict
+        :type args: list
+        :return: nothing
+        """
+        # Set the hexadecimal bitmask of the cores to run on.
+        eal_coremask = '-c {} '.format(args['eal_coremask'])\
+            if args.get('eal_coremask', '') else ''
+        # Set the number of memory channels to use.
+        eal_mem_channels = '-n {} '.format(args['eal_mem_channels'])\
+            if args.get('eal_mem_channels', '') else ''
+        # Set the memory to allocate on specific sockets (comma separated).
+        eal_socket_mem = '--socket-mem {} '.format(args['eal_socket_mem'])\
+            if args.get('eal_socket_mem', '') else ''
+        # Load an external driver. Multiple -d options are allowed.
+        eal_driver = '-d /usr/lib/librte_pmd_virtio.so '
+        # Set the forwarding mode: io, mac, mac_retry, mac_swap, flowgen,
+        # rxonly, txonly, csum, icmpecho, ieee1588
+        pmd_fwd_mode = '--forward-mode={} '.format(args['pmd_fwd_mode'])\
+            if args.get('pmd_fwd_mode', '') else ''
+        # Set the number of packets per burst to N.
+        pmd_burst = '--burst=64 '
+        # Set the number of descriptors in the TX rings to N.
+        pmd_txd = '--txd=2048 '
+        # Set the number of descriptors in the RX rings to N.
+        pmd_rxd = '--rxd=2048 '
+        # Set the hexadecimal bitmask of TX queue flags.
+        pmd_txqflags = '--txqflags=0xf00 '
+        # Set the number of mbufs to be allocated in the mbuf pools.
+        pmd_total_num_mbufs = '--total-num-mbufs=65536 '
+        # Set the hexadecimal bitmask of the ports for forwarding.
+        pmd_portmask = '--portmask=0x3 '
+        # Disable hardware VLAN.
+        pmd_disable_hw_vlan = '--disable-hw-vlan '\
+            if args.get('pmd_disable_hw_vlan', '') else ''
+        # Disable RSS (Receive Side Scaling).
+        pmd_disable_rss = '--disable-rss '\
+            if args.get('pmd_disable_rss', '') else ''
+        # Set the hexadecimal bitmask of the cores running forwarding. Master
+        # lcore=0 is reserved, so highest bit is set to 0.
+        pmd_coremask = '--coremask={} '.format(\
+            hex(int(args['eal_coremask'], 0) & 0xFFFE))\
+            if args.get('eal_coremask', '') else ''
+        # Set the number of forwarding cores based on coremask.
+        pmd_nb_cores = '--nb-cores={} '.format(\
+            bin(int(args['eal_coremask'], 0) & 0xFFFE).count('1'))\
+            if args.get('eal_coremask', '') else ''
+        eal_options = '-v '\
+            + eal_coremask\
+            + eal_mem_channels\
+            + eal_socket_mem\
+            + eal_driver
+        pmd_options = '-- '\
+            + pmd_fwd_mode\
+            + pmd_burst\
+            + pmd_txd\
+            + pmd_rxd\
+            + pmd_txqflags\
+            + pmd_total_num_mbufs\
+            + pmd_portmask\
+            + pmd_disable_hw_vlan\
+            + pmd_disable_rss\
+            + pmd_coremask\
+            + pmd_nb_cores
+        ssh = SSH()
+        ssh.connect(node)
+        cmd = "/start-testpmd.sh {0} {1}".format(eal_options, pmd_options)
+        exec_cmd_no_error(node, cmd, sudo=True)
+        ssh.disconnect(node)
+
+    @staticmethod
+    def dpdk_testpmd_stop(node):
+        """Stop DPDK testpmd app on node.
+
+        :param node: Node to stop testpmd on.
+        :type node: dict
+        :return: nothing
+        """
+        ssh = SSH()
+        ssh.connect(node)
+        cmd = "/stop-testpmd.sh"
+        exec_cmd_no_error(node, cmd, sudo=True)
+        ssh.disconnect(node)
index 16ade29..bc85435 100644 (file)
@@ -317,7 +317,7 @@ class QemuUtils(object):
             else:
                 interface['name'] = if_name
 
             else:
                 interface['name'] = if_name
 
-    def _huge_page_check(self):
+    def _huge_page_check(self, allocate=False):
         """Huge page check."""
         huge_mnt = self._qemu_opt.get('huge_mnt')
         mem_size = self._qemu_opt.get('mem_size')
         """Huge page check."""
         huge_mnt = self._qemu_opt.get('huge_mnt')
         mem_size = self._qemu_opt.get('mem_size')
@@ -326,12 +326,35 @@ class QemuUtils(object):
         regex = re.compile(r'HugePages_Free:\s+(\d+)')
         match = regex.search(output)
         huge_free = int(match.group(1))
         regex = re.compile(r'HugePages_Free:\s+(\d+)')
         match = regex.search(output)
         huge_free = int(match.group(1))
+        regex = re.compile(r'HugePages_Total:\s+(\d+)')
+        match = regex.search(output)
+        huge_total = int(match.group(1))
         regex = re.compile(r'Hugepagesize:\s+(\d+)')
         match = regex.search(output)
         huge_size = int(match.group(1))
         regex = re.compile(r'Hugepagesize:\s+(\d+)')
         match = regex.search(output)
         huge_size = int(match.group(1))
+        # Check if memory reqested by qemu is available on host
         if (mem_size * 1024) > (huge_free * huge_size):
         if (mem_size * 1024) > (huge_free * huge_size):
-            raise RuntimeError('Not enough free huge pages {0} kB, required '
-                '{1} MB'.format(huge_free * huge_size, mem_size))
+            # If we want to allocate hugepage dynamically
+            if allocate:
+                mem_needed = abs((huge_free * huge_size) - (mem_size * 1024))
+                huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
+                max_map_count = huge_to_allocate*4
+                # Increase maximum number of memory map areas a process may have
+                cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format(
+                    max_map_count)
+                (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
+                # Increase hugepage count
+                cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format(
+                    huge_to_allocate)
+                (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
+                if int(ret_code) != 0:
+                    logger.debug('Mount huge pages failed {0}'.format(stderr))
+                    raise RuntimeError('Mount huge pages failed on {0}'.format(
+                        self._node['host']))
+            # If we do not want to allocate dynamicaly end with error
+            else:
+                raise RuntimeError('Not enough free huge pages: {0}, '
+                    '{1} MB'.format(huge_free, huge_free * huge_size))
         # Check if huge pages mount point exist
         has_huge_mnt = False
         (_, output, _) = self._ssh.exec_command('cat /proc/mounts')
         # Check if huge pages mount point exist
         has_huge_mnt = False
         (_, output, _) = self._ssh.exec_command('cat /proc/mounts')
@@ -344,41 +367,14 @@ class QemuUtils(object):
                 break
         # If huge page mount point not exist create one
         if not has_huge_mnt:
                 break
         # If huge page mount point not exist create one
         if not has_huge_mnt:
-            cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format(
-                huge_mnt)
+            cmd = 'mkdir -p {0}'.format(huge_mnt)
             (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
             if int(ret_code) != 0:
             (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
             if int(ret_code) != 0:
-                logger.debug('Mount huge pages failed {0}'.format(stderr))
-                raise RuntimeError('Mount huge pages failed on {0}'.format(
+                logger.debug('Create mount dir failed: {0}'.format(stderr))
+                raise RuntimeError('Create mount dir failed on {0}'.format(
                     self._node['host']))
                     self._node['host']))
-
-    def _huge_page_allocate(self):
-        """Huge page allocate."""
-        huge_mnt = self._qemu_opt.get('huge_mnt')
-        mem_size = self._qemu_opt.get('mem_size')
-        # Check size of free huge pages
-        (_, output, _) = self._ssh.exec_command('grep Huge /proc/meminfo')
-        regex = re.compile(r'HugePages_Free:\s+(\d+)')
-        match = regex.search(output)
-        huge_free = int(match.group(1))
-        regex = re.compile(r'HugePages_Total:\s+(\d+)')
-        match = regex.search(output)
-        huge_total = int(match.group(1))
-        regex = re.compile(r'Hugepagesize:\s+(\d+)')
-        match = regex.search(output)
-        huge_size = int(match.group(1))
-
-        mem_needed = abs((huge_free * huge_size) - (mem_size * 1024))
-
-        if mem_needed:
-            huge_to_allocate = (mem_needed / huge_size) + huge_total
-            # Increase limit of allowed max hugepage count
-            cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format(
-                huge_to_allocate*3)
-            (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
-            # Increase hugepage count
-            cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format(
-                huge_to_allocate)
+            cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format(
+                huge_mnt)
             (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
             if int(ret_code) != 0:
                 logger.debug('Mount huge pages failed {0}'.format(stderr))
             (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
             if int(ret_code) != 0:
                 logger.debug('Mount huge pages failed {0}'.format(stderr))
@@ -403,13 +399,7 @@ class QemuUtils(object):
 
         # By default check only if hugepages are availbale.
         # If 'huge_allocate' is set to true try to allocate as well.
 
         # By default check only if hugepages are availbale.
         # If 'huge_allocate' is set to true try to allocate as well.
-        try:
-            self._huge_page_check()
-        except RuntimeError as runtime_error:
-            if self._qemu_opt.get('huge_allocate'):
-                self._huge_page_allocate()
-            else:
-                raise runtime_error
+        self._huge_page_check(allocate=self._qemu_opt.get('huge_allocate'))
 
         # Setup QMP via unix socket
         qmp = '-qmp unix:{0},server,nowait'.format(self.__QMP_SOCK)
 
         # Setup QMP via unix socket
         qmp = '-qmp unix:{0},server,nowait'.format(self.__QMP_SOCK)
index 62b1756..4e12ded 100644 (file)
@@ -14,7 +14,9 @@
 *** Settings ***
 | Library | resources.libraries.python.topology.Topology
 | Library | resources.libraries.python.NodePath
 *** Settings ***
 | Library | resources.libraries.python.topology.Topology
 | Library | resources.libraries.python.NodePath
+| Library | resources.libraries.python.DpdkUtil
 | Library | resources.libraries.python.InterfaceUtil
 | Library | resources.libraries.python.InterfaceUtil
+| Library | resources.libraries.python.VhostUser
 | Library | resources.libraries.python.TrafficGenerator
 | Library | resources.libraries.python.TrafficGenerator.TGDropRateSearchImpl
 | Resource | resources/libraries/robot/default.robot
 | Library | resources.libraries.python.TrafficGenerator
 | Library | resources.libraries.python.TrafficGenerator.TGDropRateSearchImpl
 | Resource | resources/libraries/robot/default.robot
@@ -24,6 +26,7 @@
 | Resource | resources/libraries/robot/l2_xconnect.robot
 | Resource | resources/libraries/robot/ipv4.robot
 | Resource | resources/libraries/robot/ipv6.robot
 | Resource | resources/libraries/robot/l2_xconnect.robot
 | Resource | resources/libraries/robot/ipv4.robot
 | Resource | resources/libraries/robot/ipv6.robot
+| Resource | resources/libraries/robot/qemu.robot
 | Documentation | Performance suite keywords
 
 *** Keywords ***
 | Documentation | Performance suite keywords
 
 *** Keywords ***
 | | ... | _NOTE:_ This KW sets following suite variables:
 | | ... | - glob_loss_acceptance - Loss acceptance treshold
 | | ... | - glob_loss_acceptance_type - Loss acceptance treshold type
 | | ... | _NOTE:_ This KW sets following suite variables:
 | | ... | - glob_loss_acceptance - Loss acceptance treshold
 | | ... | - glob_loss_acceptance_type - Loss acceptance treshold type
+| | ... | - glob_vm_image - Guest VM disk image
 | | ...
 | | Set Suite Variable | ${glob_loss_acceptance} | 0.5
 | | Set Suite Variable | ${glob_loss_acceptance_type} | percentage
 | | ...
 | | Set Suite Variable | ${glob_loss_acceptance} | 0.5
 | | Set Suite Variable | ${glob_loss_acceptance_type} | percentage
+| | Set Suite Variable | ${glob_vm_image} | /var/lib/vm/csit-nested-1.3.img
 
 | 2-node circular Topology Variables Setup
 | | [Documentation]
 
 | 2-node circular Topology Variables Setup
 | | [Documentation]
 | | ${dut2_if2_pci}= | Get Interface PCI Addr | ${dut2} | ${dut2_if2}
 | | Add PCI device | ${dut1} | ${dut1_if1_pci} | ${dut1_if2_pci}
 | | Add PCI device | ${dut2} | ${dut2_if1_pci} | ${dut2_if2_pci}
 | | ${dut2_if2_pci}= | Get Interface PCI Addr | ${dut2} | ${dut2_if2}
 | | Add PCI device | ${dut1} | ${dut1_if1_pci} | ${dut1_if2_pci}
 | | Add PCI device | ${dut2} | ${dut2_if1_pci} | ${dut2_if2_pci}
+
+| Guest VM with dpdk-testpmd connected via vhost-user is setup
+| | [Documentation]
+| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting
+| | ... | DPDK testpmd. Qemu Guest is using 3 cores pinned to physical cores 5,
+| | ... | 6, 7 and 2048M. Testpmd is using 3 cores (1 main core and 2 cores
+| | ... | dedicated to io) socket-mem=1024, mem-channel=4, txq/rxq=2048,
+| | ... | burst=64, disable-hw-vlan, total-num-mbufs, driver
+| | ... | usr/lib/librte_pmd_virtio.so.
+| | ...
+| | ... | *Arguments:*
+| | ... | - dut_node - DUT node to start guest VM on. Type: dictionary
+| | ... | - sock1 - Socket path for first Vhost-User interface. Type: string
+| | ... | - sock2 - Socket path for second Vhost-User interface. Type: string
+| | ... | - vm_name - QemuUtil instance name. Type: string
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Guest VM with dpdk-testpmd connected via vhost-user is setup \
+| | ... | \| ${nodes['DUT1']} \| /tmp/sock1 \| /tmp/sock2 \| DUT1_VM \|
+| | [Arguments] | ${dut_node} | ${sock1} | ${sock2} | ${vm_name}
+| | Import Library | resources.libraries.python.QemuUtils
+| | ...            | WITH NAME | ${vm_name}
+| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1}
+| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock2}
+| | Run keyword | ${vm_name}.Qemu Set Node | ${dut_node}
+| | Run keyword | ${vm_name}.Qemu Set Smp | 3 | 3 | 1 | 1
+| | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048
+| | Run keyword | ${vm_name}.Qemu Set Huge Allocate
+| | Run keyword | ${vm_name}.Qemu Set Disk Image | ${glob_vm_image}
+| | ${vm}= | Run keyword | ${vm_name}.Qemu Start
+| | Run keyword | ${vm_name}.Qemu Set Affinity | 5 | 6 | 7
+| | Dpdk Testpmd Start | ${vm} | eal_coremask=0x7
+| | ...                | eal_mem_channels=4
+| | ...                | eal_socket_mem=1024
+| | ...                | pmd_fwd_mode=io
+| | ...                | pmd_disable_hw_vlan=${True}
+| | Return From Keyword | ${vm}
+
+| Guest VM with Linux Bridge connected via vhost-user is setup
+| | [Documentation]
+| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting
+| | ... | linux bridge. Qemu Guest is using 3 cores pinned to physical cores 5,
+| | ... | 6, 7 and 2048M.
+| | ...
+| | ... | *Arguments:*
+| | ... | - dut_node - DUT node to start guest VM on. Type: dictionary
+| | ... | - sock1 - Socket path for first Vhost-User interface. Type: string
+| | ... | - sock2 - Socket path for second Vhost-User interface. Type: string
+| | ... | - vm_name - QemuUtil instance name. Type: string
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Guest VM with Linux Bridge connected via vhost-user is setup \
+| | ... | \| ${nodes['DUT1']} \| /tmp/sock1 \| /tmp/sock2 \| DUT1_VM \|
+| | [Arguments] | ${dut_node} | ${sock1} | ${sock2} | ${vm_name}
+| | Import Library | resources.libraries.python.QemuUtils
+| | ...            | WITH NAME | ${vm_name}
+| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1}
+| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock2}
+| | Run keyword | ${vm_name}.Qemu Set Node | ${dut_node}
+| | Run keyword | ${vm_name}.Qemu Set Smp | 3 | 3 | 1 | 1
+| | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048
+| | Run keyword | ${vm_name}.Qemu Set Huge Allocate
+| | Run keyword | ${vm_name}.Qemu Set Disk Image | ${glob_vm_image}
+| | ${vm}= | Run keyword | ${vm_name}.Qemu Start
+| | Run keyword | ${vm_name}.Qemu Set Affinity | 5 | 6 | 7
+| | ${br}= | Set Variable | br0
+| | ${vhost1}= | Get Vhost User If Name By Sock | ${vm} | ${sock1}
+| | ${vhost2}= | Get Vhost User If Name By Sock | ${vm} | ${sock2}
+| | Linux Add Bridge | ${vm} | ${br} | ${vhost1} | ${vhost2}
+| | Set Interface State | ${vm} | ${vhost1} | up | if_type=name
+| | Set Interface State | ${vm} | ${vhost2} | up | if_type=name
+| | Set Interface State | ${vm} | ${br} | up | if_type=name
+| | Return From Keyword | ${vm}
+
+| Guest VM with dpdk-testpmd Teardown
+| | [Documentation]
+| | ... | Stop all qemu processes with dpdk-testpmd running on ${dut_node}.
+| | ... | Argument is dictionary of all qemu nodes running with its names.
+| | ... | Dpdk-testpmd is stopped gracefully with printing stats.
+| | ... |
+| | ... | *Arguments:*
+| | ... | - dut_node - Node where to clean qemu. Type: dictionary
+| | ... | - dut_vm_refs - VM references on node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Guest VM with dpdk-testpmd Teardown \| ${node['DUT1']} \
+| | ... | \| ${dut_vm_refs} \|
+| | ...
+| | [Arguments] | ${dut_node} | ${dut_vm_refs}
+| | :FOR | ${vm_name} | IN | @{dut_vm_refs}
+| | | ${vm}= | Get From Dictionary | ${dut_vm_refs} | ${vm_name}
+| | | Dpdk Testpmd Stop | ${vm}
+| | | Run Keyword | ${vm_name}.Qemu Set Node | ${dut_node}
+| | | Run Keyword | ${vm_name}.Qemu Kill
+| | | Run Keyword | ${vm_name}.Qemu Clear Socks
+
+| Guest VM Teardown
+| | [Documentation]
+| | ... | Stop all qemu processes running on ${dut_node}.
+| | ... | Argument is dictionary of all qemu nodes running with its names.
+| | ... |
+| | ... | *Arguments:*
+| | ... | - dut_node - Node where to clean qemu. Type: dictionary
+| | ... | - dut_vm_refs - VM references on node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Guest VM Teardown \| ${node['DUT1']} \
+| | ... | \| ${dut_vm_refs} \|
+| | ...
+| | [Arguments] | ${dut_node} | ${dut_vm_refs}
+| | :FOR | ${vm_name} | IN | @{dut_vm_refs}
+| | | ${vm}= | Get From Dictionary | ${dut_vm_refs} | ${vm_name}
+| | | Run Keyword | ${vm_name}.Qemu Set Node | ${dut_node}
+| | | Run Keyword | ${vm_name}.Qemu Kill
+| | | Run Keyword | ${vm_name}.Qemu Clear Socks
+
index a5dd0e6..a9cf010 100644 (file)
 | | ... | ELSE | Qemu Quit
 | | Qemu Clear Socks
 | | Run Keyword If | ${vm} is not None | Disconnect | ${vm}
 | | ... | ELSE | Qemu Quit
 | | Qemu Clear Socks
 | | Run Keyword If | ${vm} is not None | Disconnect | ${vm}
+
+| Kill Qemu on all DUTs
+| | [Documentation] | Kill QEMU processes on all DUTs.
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Qemu Set Node | ${nodes['${dut}']}
+| | | Qemu Kill