FIX: CPU util for NF
[csit.git] / resources / libraries / python / ContainerUtils.py
index 786a401..4feeb8b 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -141,6 +141,7 @@ class ContainerManager(object):
             self.engine.container = self.containers[container]
             # We need to install supervisor client/server system to control VPP
             # as a service
+            self.engine.execute('apt-get update')
             self.engine.install_supervisor()
             self.engine.install_vpp()
             self.engine.restart_vpp()
@@ -151,19 +152,16 @@ class ContainerManager(object):
             self.engine.container = self.containers[container]
             self.engine.restart_vpp()
 
-    def configure_vpp_in_all_containers(self, chain_topology,
-                                        dut1_if=None, dut2_if=None):
+    def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
         """Configure VPP in all containers.
 
         :param chain_topology: Topology used for chaining containers can be
             chain or cross_horiz. Chain topology is using 1 memif pair per
             container. Cross_horiz topology is using 1 memif and 1 physical
             interface in container (only single container can be configured).
-        :param dut1_if: Interface on DUT1 directly connected to DUT2.
-        :param dut2_if: Interface on DUT2 directly connected to DUT1.
-        :type container_topology: str
-        :type dut1_if: str
-        :type dut2_if: str
+        :param kwargs: Named parameters.
+        :type chain_topology: str
+        :param kwargs: dict
         """
         # Count number of DUTs based on node's host information
         dut_cnt = len(Counter([self.containers[container].node['host']
@@ -172,47 +170,78 @@ class ContainerManager(object):
         container_vat_template = 'memif_create_{topology}.vat'.format(
             topology=chain_topology)
 
-        if chain_topology == 'chain':
-            for i, container in enumerate(self.containers):
-                mid1 = i % mod + 1
-                mid2 = i % mod + 1
-                sid1 = i % mod * 2 + 1
-                sid2 = i % mod * 2 + 2
-                self.engine.container = self.containers[container]
+        for i, container in enumerate(self.containers):
+            mid1 = i % mod + 1
+            mid2 = i % mod + 1
+            sid1 = i % mod * 2 + 1
+            sid2 = i % mod * 2 + 2
+            self.engine.container = self.containers[container]
+            guest_dir = self.engine.container.mnt[0].split(':')[1]
+
+            if chain_topology == 'chain':
                 self.engine.create_vpp_startup_config()
-                self.engine.create_vpp_exec_config(container_vat_template, \
-                    mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, \
-                    socket1='memif-{c.name}-{sid}'. \
-                    format(c=self.engine.container, sid=sid1), \
-                    socket2='memif-{c.name}-{sid}'. \
-                    format(c=self.engine.container, sid=sid2))
-        elif chain_topology == 'cross_horiz':
-            if mod > 1:
-                raise RuntimeError('Container chain topology {topology} '
-                                   'supports only single container.'.
-                                   format(topology=chain_topology))
-            for i, container in enumerate(self.containers):
-                mid1 = i % mod + 1
-                sid1 = i % mod * 2 + 1
-                self.engine.container = self.containers[container]
+                self.engine.create_vpp_exec_config(
+                    container_vat_template,
+                    mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+                    socket1='{dir}/memif-{c.name}-{sid}'.
+                    format(c=self.engine.container, sid=sid1, dir=guest_dir),
+                    socket2='{dir}/memif-{c.name}-{sid}'.
+                    format(c=self.engine.container, sid=sid2, dir=guest_dir))
+            elif chain_topology == 'cross_horiz':
+                try:
+                    dut1_if = kwargs['dut1_if']
+                    dut2_if = kwargs['dut2_if']
+                except KeyError:
+                    raise AttributeError('DUT interfaces not specified!')
                 if 'DUT1' in self.engine.container.name:
-                    if_pci = Topology.get_interface_pci_addr( \
+                    if_pci = Topology.get_interface_pci_addr(
                         self.engine.container.node, dut1_if)
-                    if_name = Topology.get_interface_name( \
+                    if_name = Topology.get_interface_name(
                         self.engine.container.node, dut1_if)
                 if 'DUT2' in self.engine.container.name:
-                    if_pci = Topology.get_interface_pci_addr( \
+                    if_pci = Topology.get_interface_pci_addr(
                         self.engine.container.node, dut2_if)
-                    if_name = Topology.get_interface_name( \
+                    if_name = Topology.get_interface_name(
                         self.engine.container.node, dut2_if)
                 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
-                self.engine.create_vpp_exec_config(container_vat_template, \
-                    mid1=mid1, sid1=sid1, if_name=if_name, \
-                    socket1='memif-{c.name}-{sid}'. \
-                    format(c=self.engine.container, sid=sid1))
-        else:
-            raise RuntimeError('Container topology {topology} not implemented'.
-                               format(topology=chain_topology))
+                self.engine.create_vpp_exec_config(
+                    container_vat_template,
+                    mid1=mid1, sid1=sid1, if_name=if_name,
+                    socket1='{dir}/memif-{c.name}-{sid}'.
+                    format(c=self.engine.container, sid=sid1, dir=guest_dir))
+            elif chain_topology == 'chain_functional':
+                memif_rx_mode = 'interrupt'
+                self.engine.create_vpp_startup_config_func_dev()
+                self.engine.create_vpp_exec_config(
+                    container_vat_template,
+                    mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+                    socket1='{dir}/memif-{c.name}-{sid}'.
+                    format(c=self.engine.container, sid=sid1, dir=guest_dir),
+                    socket2='{dir}/memif-{c.name}-{sid}'.
+                    format(c=self.engine.container, sid=sid2, dir=guest_dir),
+                    rx_mode=memif_rx_mode)
+            elif chain_topology == 'chain_ip4':
+                self.engine.create_vpp_startup_config()
+                vif1_mac = kwargs['tg_if1_mac'] \
+                    if (mid1 - 1) % kwargs['nodes'] + 1 == 1 \
+                    else '52:54:00:00:{0:02X}:02'.format(mid1-1)
+                vif2_mac = kwargs['tg_if2_mac'] \
+                    if (mid2 - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
+                    else '52:54:00:00:{0:02X}:01'.format(mid2+1)
+                self.engine.create_vpp_exec_config(
+                    container_vat_template,
+                    mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+                    socket1='{dir}/memif-{c.name}-{sid}'.
+                    format(c=self.engine.container, sid=sid1, dir=guest_dir),
+                    socket2='{dir}/memif-{c.name}-{sid}'.
+                    format(c=self.engine.container, sid=sid2, dir=guest_dir),
+                    mac1='52:54:00:00:{0:02X}:01'.format(mid1),
+                    mac2='52:54:00:00:{0:02X}:02'.format(mid2),
+                    vif1_mac=vif1_mac, vif2_mac=vif2_mac)
+            else:
+                raise RuntimeError('Container topology {topology} not '
+                                   'implemented'.
+                                   format(topology=chain_topology))
 
     def stop_all_containers(self):
         """Stop all containers."""
@@ -280,10 +309,9 @@ class ContainerEngine(object):
 
     def install_supervisor(self):
         """Install supervisord inside a container."""
-        self.execute('sleep 3')
-        self.execute('apt-get update')
         self.execute('apt-get install -y supervisor')
-        self.execute('echo "{config}" > {config_file}'.
+        self.execute('echo "{config}" > {config_file} && '
+                     'supervisord -c {config_file}'.
                      format(
                          config='[unix_http_server]\n'
                          'file  = /tmp/supervisor.sock\n\n'
@@ -300,13 +328,10 @@ class ContainerEngine(object):
                          'loglevel=debug\n'
                          'nodaemon=false\n\n',
                          config_file=SUPERVISOR_CONF))
-        self.execute('supervisord -c {config_file}'.
-                     format(config_file=SUPERVISOR_CONF))
 
     def install_vpp(self):
         """Install VPP inside a container."""
         self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf')
-        self.execute('apt-get update')
         # Workaround for install xenial vpp build on bionic ubuntu.
         self.execute('apt-get install -y wget')
         self.execute('deb=$(mktemp) && wget -O "${deb}" '
@@ -334,7 +359,6 @@ class ContainerEngine(object):
                          'priority=1',
                          config_file=SUPERVISOR_CONF))
         self.execute('supervisorctl reload')
-        self.execute('supervisorctl restart vpp')
 
     def restart_vpp(self):
         """Restart VPP service inside a container."""
@@ -355,9 +379,9 @@ class ContainerEngine(object):
         vpp_config.add_unix_cli_listen()
         vpp_config.add_unix_nodaemon()
         vpp_config.add_unix_exec('/tmp/running.exec')
-        # We will pop first core from list to be main core
+        # We will pop the first core from the list to be a main core
         vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
-        # if this is not only core in list, the rest will be used as workers.
+        # If more cores in the list, the rest will be used as workers.
         if cpuset_cpus:
             corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
             vpp_config.add_cpu_corelist_workers(corelist_workers)
@@ -394,6 +418,23 @@ class ContainerEngine(object):
         self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
                      .format(config=vpp_config.get_config_str()))
 
+    def create_vpp_startup_config_func_dev(self):
+        """Create startup configuration of VPP on container for functional
+        vpp_device tests.
+        """
+        # Create config instance
+        vpp_config = VppConfigGenerator()
+        vpp_config.set_node(self.container.node)
+        vpp_config.add_unix_cli_listen()
+        vpp_config.add_unix_nodaemon()
+        vpp_config.add_unix_exec('/tmp/running.exec')
+        vpp_config.add_plugin('disable', 'dpdk_plugin.so')
+
+        # Apply configuration
+        self.execute('mkdir -p /etc/vpp/')
+        self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
+                     .format(config=vpp_config.get_config_str()))
+
     def create_vpp_exec_config(self, vat_template_file, **kwargs):
         """Create VPP exec configuration on container.
 
@@ -678,7 +719,8 @@ class Docker(ContainerEngine):
         if int(ret) != 0:
             raise RuntimeError('Failed to create container {c.name}.'
                                .format(c=self.container))
-        self._configure_cgroup('docker')
+        if self.container.cpuset_cpus:
+            self._configure_cgroup('docker')
 
     def create(self):
         """Create/deploy container.