feat(core): Add dpdk log level
[csit.git] / resources / libraries / python / ContainerUtils.py
index 74add98..3148acf 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 from collections import OrderedDict, Counter
 from io import open
+from re import search
 from string import Template
+from time import sleep
 
 from robot.libraries.BuiltIn import BuiltIn
 
 from resources.libraries.python.Constants import Constants
+from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
 from resources.libraries.python.ssh import SSH
 from resources.libraries.python.topology import Topology, SocketType
 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
+from resources.libraries.python.VPPUtil import VPPUtil
 
 
 __all__ = [
@@ -138,20 +143,52 @@ class ContainerManager:
             self.engine.container = self.containers[container]
             self.engine.execute(command)
 
-    def start_vpp_in_all_containers(self):
+    def start_vpp_in_all_containers(self, verify=True):
         """Start VPP in all containers."""
         for container in self.containers:
             self.engine.container = self.containers[container]
-            # We need to install supervisor client/server system to control VPP
-            # as a service
-            self.engine.install_supervisor()
-            self.engine.start_vpp()
+            # For multiple containers, delayed verify is faster.
+            self.engine.start_vpp(verify=False)
+        if verify:
+            self.verify_vpp_in_all_containers()
 
-    def restart_vpp_in_all_containers(self):
+    def _disconnect_papi_to_all_containers(self):
+        """Disconnect any open PAPI connections to VPPs in containers.
+
+        The current PAPI implementation caches open connections,
+        so explicit disconnect is needed before VPP becomes inaccessible.
+
+        Currently this is a protected method, as restart, stop and destroy
+        are the only dangerous methods, and all are handled by ContainerManager.
+        """
+        for container_object in self.containers.values():
+            PapiSocketExecutor.disconnect_by_node_and_socket(
+                container_object.node,
+                container_object.api_socket,
+            )
+
+    def restart_vpp_in_all_containers(self, verify=True):
         """Restart VPP in all containers."""
+        self._disconnect_papi_to_all_containers()
+        for container in self.containers:
+            self.engine.container = self.containers[container]
+            # For multiple containers, delayed verify is faster.
+            self.engine.restart_vpp(verify=False)
+        if verify:
+            self.verify_vpp_in_all_containers()
+
+    def verify_vpp_in_all_containers(self):
+        """Verify that VPP is installed and running in all containers."""
+        # For multiple containers, multiple fors are faster.
         for container in self.containers:
             self.engine.container = self.containers[container]
-            self.engine.restart_vpp()
+            self.engine.verify_vppctl()
+        for container in self.containers:
+            self.engine.container = self.containers[container]
+            self.engine.adjust_privileges()
+        for container in self.containers:
+            self.engine.container = self.containers[container]
+            self.engine.verify_vpp_papi()
 
     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
         """Configure VPP in all containers.
@@ -162,7 +199,7 @@ class ContainerManager:
             interface in container (only single container can be configured).
         :param kwargs: Named parameters.
         :type chain_topology: str
-        :param kwargs: dict
+        :type kwargs: dict
         """
         # Count number of DUTs based on node's host information
         dut_cnt = len(
@@ -208,6 +245,17 @@ class ContainerManager:
                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
                     guest_dir=guest_dir, **kwargs
                 )
+            elif chain_topology == u"chain_vswitch":
+                self._configure_vpp_chain_vswitch(
+                    mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+                    guest_dir=guest_dir, **kwargs)
+            elif chain_topology == u"chain_ipsec":
+                idx_match = search(r"\d+$", self.engine.container.name)
+                if idx_match:
+                    idx = int(idx_match.group())
+                self._configure_vpp_chain_ipsec(
+                    mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+                    guest_dir=guest_dir, nf_instance=idx, **kwargs)
             else:
                 raise RuntimeError(
                     f"Container topology {chain_topology} not implemented"
@@ -217,7 +265,7 @@ class ContainerManager:
         """Configure VPP in chain topology with l2xc.
 
         :param kwargs: Named parameters.
-        :param kwargs: dict
+        :type kwargs: dict
         """
         self.engine.create_vpp_startup_config()
         self.engine.create_vpp_exec_config(
@@ -234,7 +282,7 @@ class ContainerManager:
         """Configure VPP in cross horizontal topology (single memif).
 
         :param kwargs: Named parameters.
-        :param kwargs: dict
+        :type kwargs: dict
         """
         if u"DUT1" in self.engine.container.name:
             if_pci = Topology.get_interface_pci_addr(
@@ -258,9 +306,9 @@ class ContainerManager:
         """Configure VPP in chain topology with l2xc (functional).
 
         :param kwargs: Named parameters.
-        :param kwargs: dict
+        :type kwargs: dict
         """
-        self.engine.create_vpp_startup_config_func_dev()
+        self.engine.create_vpp_startup_config()
         self.engine.create_vpp_exec_config(
             u"memif_create_chain_functional.exec",
             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
@@ -276,14 +324,14 @@ class ContainerManager:
         """Configure VPP in chain topology with ip4.
 
         :param kwargs: Named parameters.
-        :param kwargs: dict
+        :type kwargs: dict
         """
         self.engine.create_vpp_startup_config()
 
-        vif1_mac = kwargs[u"tg_if1_mac"] \
+        vif1_mac = kwargs[u"tg_pf1_mac"] \
             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
-        vif2_mac = kwargs[u"tg_if2_mac"] \
+        vif2_mac = kwargs[u"tg_pf2_mac"] \
             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
             else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
         self.engine.create_vpp_exec_config(
@@ -299,24 +347,149 @@ class ContainerManager:
             vif1_mac=vif1_mac, vif2_mac=vif2_mac
         )
 
+    def _configure_vpp_chain_vswitch(self, **kwargs):
+        """Configure VPP as vswitch in container.
+
+        :param kwargs: Named parameters.
+        :type kwargs: dict
+        """
+        dut = self.engine.container.name.split(u"_")[0]
+        if dut == u"DUT1":
+            if1_pci = Topology.get_interface_pci_addr(
+                self.engine.container.node, kwargs[u"dut1_if2"])
+            if2_pci = Topology.get_interface_pci_addr(
+                self.engine.container.node, kwargs[u"dut1_if1"])
+            if_red_name = Topology.get_interface_name(
+                self.engine.container.node, kwargs[u"dut1_if2"])
+            if_black_name = Topology.get_interface_name(
+                self.engine.container.node, kwargs[u"dut1_if1"])
+            tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
+            tg_pf_mac = kwargs[u"tg_pf2_mac"]
+        else:
+            tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
+            tg_pf_mac = kwargs[u"tg_pf1_mac"]
+            if1_pci = Topology.get_interface_pci_addr(
+                self.engine.container.node, kwargs[u"dut2_if1"])
+            if2_pci = Topology.get_interface_pci_addr(
+                self.engine.container.node, kwargs[u"dut2_if2"])
+            if_red_name = Topology.get_interface_name(
+                self.engine.container.node, kwargs[u"dut2_if1"])
+            if_black_name = Topology.get_interface_name(
+                self.engine.container.node, kwargs[u"dut2_if2"])
+
+        n_instances = int(kwargs[u"n_instances"])
+        rxq = 1
+        if u"rxq" in kwargs:
+            rxq = int(kwargs[u"rxq"])
+        nodes = kwargs[u"nodes"]
+        cpuset_cpus = CpuUtils.get_affinity_nf(
+            nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
+            nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
+        )
+        self.engine.create_vpp_startup_config_vswitch(
+            cpuset_cpus, rxq, if1_pci, if2_pci
+        )
+
+        instances = []
+        for i in range(1, n_instances + 1):
+            instances.append(
+                f"create interface memif id {i} socket-id 1 master\n"
+                f"set interface state memif1/{i} up\n"
+                f"set interface l2 bridge memif1/{i} 1\n"
+                f"create interface memif id {i} socket-id 2 master\n"
+                f"set interface state memif2/{i} up\n"
+                f"set interface l2 bridge memif2/{i} 2\n"
+                f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
+                f"static\n\n"
+            )
+
+        self.engine.create_vpp_exec_config(
+            u"memif_create_chain_vswitch_ipsec.exec",
+            socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
+            socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
+            if_red_name=if_red_name,
+            if_black_name=if_black_name,
+            instances=u"\n\n".join(instances))
+
+
+    def _configure_vpp_chain_ipsec(self, **kwargs):
+        """Configure VPP in container with memifs.
+
+        :param kwargs: Named parameters.
+        :type kwargs: dict
+        """
+        nf_nodes = int(kwargs[u"nf_nodes"])
+        nf_instance = int(kwargs[u"nf_instance"])
+        nodes = kwargs[u"nodes"]
+        dut = self.engine.container.name.split(u"_")[0]
+        cpuset_cpus = CpuUtils.get_affinity_nf(
+            nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
+            nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
+        self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
+        local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
+
+        if dut == u"DUT1":
+            tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
+            tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
+            remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
+            tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
+            tg_pf_mac = kwargs[u"tg_pf1_mac"]
+            raddr_ip4 = kwargs[u"laddr_ip4"]
+            l_mac1 = 17
+            l_mac2 = 18
+            r_mac = 1
+        else:
+            tnl_local_ip = f"{local_ip_base}.{nf_instance}"
+            tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
+            remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
+            tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
+            tg_pf_mac = kwargs[u"tg_pf2_mac"]
+            raddr_ip4 = kwargs[u"raddr_ip4"]
+            l_mac1 = 1
+            l_mac2 = 2
+            r_mac = 17
+
+        self.engine.create_vpp_exec_config(
+            u"memif_create_chain_ipsec.exec",
+            socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
+            socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
+            mid1=nf_instance,
+            mid2=nf_instance,
+            sid1=u"1",
+            sid2=u"2",
+            mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
+            mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
+            tg_pf2_ip4=tg_pf_ip4,
+            tg_pf2_mac=tg_pf_mac,
+            raddr_ip4=raddr_ip4,
+            tnl_local_ip=tnl_local_ip,
+            tnl_remote_ip=tnl_remote_ip,
+            tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
+            remote_ip=f"{remote_ip_base}.{nf_instance}"
+        )
+        self.engine.execute(
+            f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
+            f"{dut}_{nf_instance}.config >> /tmp/running.exec"
+        )
+
     def _configure_vpp_pipeline_ip4(self, **kwargs):
         """Configure VPP in pipeline topology with ip4.
 
         :param kwargs: Named parameters.
-        :param kwargs: dict
+        :type kwargs: dict
         """
         self.engine.create_vpp_startup_config()
         node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
         mid1 = kwargs[u"mid1"]
         mid2 = kwargs[u"mid2"]
         role1 = u"master"
-        role2 = u"master" if node in (kwargs[u"nodes"], 1) else u"slave"
-        kwargs[u"mid2"] = kwargs[u"mid2"] if node in (kwargs[u"nodes"], 1) \
-            else kwargs[u"mid2"] + 1
-        vif1_mac = kwargs[u"tg_if1_mac"] \
+        role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
+        kwargs[u"mid2"] = kwargs[u"mid2"] \
+            if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
+        vif1_mac = kwargs[u"tg_pf1_mac"] \
             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
-        vif2_mac = kwargs[u"tg_if2_mac"] \
+        vif2_mac = kwargs[u"tg_pf2_mac"] \
             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
             else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
         socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
@@ -339,12 +512,16 @@ class ContainerManager:
 
     def stop_all_containers(self):
         """Stop all containers."""
+        # TODO: Rework if containers can be affected outside ContainerManager.
+        self._disconnect_papi_to_all_containers()
         for container in self.containers:
             self.engine.container = self.containers[container]
             self.engine.stop()
 
     def destroy_all_containers(self):
         """Destroy all containers."""
+        # TODO: Rework if containers can be affected outside ContainerManager.
+        self._disconnect_papi_to_all_containers()
         for container in self.containers:
             self.engine.container = self.containers[container]
             self.engine.destroy()
@@ -401,45 +578,11 @@ class ContainerEngine:
         """System info."""
         raise NotImplementedError
 
-    def install_supervisor(self):
-        """Install supervisord inside a container."""
-        if isinstance(self, LXC):
-            self.execute(u"sleep 3; apt-get update")
-            self.execute(u"apt-get install -y supervisor")
-            config = \
-                u"[unix_http_server]\n" \
-                u"file  = /tmp/supervisor.sock\n\n" \
-                u"[rpcinterface:supervisor]\n" \
-                u"supervisor.rpcinterface_factory = " \
-                u"supervisor.rpcinterface:make_main_rpcinterface\n\n" \
-                u"[supervisorctl]\n" \
-                u"serverurl = unix:///tmp/supervisor.sock\n\n" \
-                u"[supervisord]\n" \
-                u"pidfile = /tmp/supervisord.pid\n" \
-                u"identifier = supervisor\n" \
-                u"directory = /tmp\n" \
-                u"logfile = /tmp/supervisord.log\n" \
-                u"loglevel = debug\n" \
-                u"nodaemon = false\n\n"
-            self.execute(
-                f'echo "{config}" > {SUPERVISOR_CONF} && '
-                f'supervisord -c {SUPERVISOR_CONF}'
-            )
-
-    def start_vpp(self):
+    def start_vpp(self, verify=True):
         """Start VPP inside a container."""
-
-        config = \
-            u"[program:vpp]\n" \
-            u"command = /usr/bin/vpp -c /etc/vpp/startup.conf\n" \
-            u"autostart = false\n" \
-            u"autorestart = false\n" \
-            u"redirect_stderr = true\n" \
-            u"priority = 1"
         self.execute(
-            f'echo "{config}" >> {SUPERVISOR_CONF} && supervisorctl reload'
-        )
-        self.execute(u"supervisorctl start vpp")
+            u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
+            u">/tmp/vppd.log 2>&1 < /dev/null &")
 
         topo_instance = BuiltIn().get_library_instance(
             u"resources.libraries.python.topology.Topology"
@@ -448,29 +591,98 @@ class ContainerEngine:
             self.container.node,
             SocketType.PAPI,
             self.container.name,
-            f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
-            f"api.sock"
+            self.container.api_socket,
         )
         topo_instance.add_new_socket(
             self.container.node,
             SocketType.STATS,
             self.container.name,
-            f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
-            f"stats.sock"
+            self.container.stats_socket,
         )
+        if verify:
+            self.verify_vpp()
 
-    def restart_vpp(self):
+    def restart_vpp(self, verify=True):
         """Restart VPP service inside a container."""
-        self.execute(u"supervisorctl restart vpp")
-        self.execute(u"cat /tmp/supervisord.log")
+        self.execute(u"pkill vpp")
+        self.start_vpp(verify=verify)
+
+    def verify_vpp(self):
+        """Verify VPP is running and ready."""
+        self.verify_vppctl()
+        self.adjust_privileges()
+        self.verify_vpp_papi()
+
+    # TODO Rewrite to use the VPPUtil.py functionality and remove this.
+    def verify_vppctl(self, retries=120, retry_wait=1):
+        """Verify that VPP is installed and running inside container.
+
+        This function waits a while so VPP can start.
+        PCI interfaces are listed for debug purposes.
+        When the check passes, VPP API socket is created on remote side,
+        but perhaps its directory does not have the correct access rights yet.
+
+        :param retries: Check for VPP for this number of times Default: 120
+        :param retry_wait: Wait for this number of seconds between retries.
+        """
+        for _ in range(retries + 1):
+            try:
+                # Execute puts the command into single quotes,
+                # so inner arguments are enclosed in qouble quotes here.
+                self.execute(
+                    u'vppctl show pci 2>&1 | '
+                    u'fgrep -v "Connection refused" | '
+                    u'fgrep -v "No such file or directory"'
+                )
+                break
+            except (RuntimeError, AssertionError):
+                sleep(retry_wait)
+        else:
+            self.execute(u"cat /tmp/vppd.log")
+            raise RuntimeError(
+                f"VPP did not come up in container: {self.container.name}"
+            )
+
+    def adjust_privileges(self):
+        """Adjust privileges to control VPP without sudo."""
+        self.execute("chmod -R o+rwx /run/vpp")
+
+    def verify_vpp_papi(self, retries=120, retry_wait=1):
+        """Verify that VPP is available for PAPI.
 
-    def create_base_vpp_startup_config(self):
+        This also opens and caches PAPI connection for quick reuse.
+        The connection is disconnected when ContainerManager decides to do so.
+
+        :param retries: Check for VPP for this number of times Default: 120
+        :param retry_wait: Wait for this number of seconds between retries.
+        """
+        # Wait for success.
+        for _ in range(retries + 1):
+            try:
+                VPPUtil.vpp_show_version(
+                    node=self.container.node,
+                    remote_vpp_socket=self.container.api_socket,
+                    log=False,
+                )
+                break
+            except (RuntimeError, AssertionError):
+                sleep(retry_wait)
+        else:
+            self.execute(u"cat /tmp/vppd.log")
+            raise RuntimeError(
+                f"VPP PAPI fails in container: {self.container.name}"
+            )
+
+    def create_base_vpp_startup_config(self, cpuset_cpus=None):
         """Create base startup configuration of VPP on container.
 
+        :param cpuset_cpus: List of CPU cores to allocate.
+        :type cpuset_cpus: list.
         :returns: Base VPP startup configuration.
         :rtype: VppConfigGenerator
         """
-        cpuset_cpus = self.container.cpuset_cpus
+        if cpuset_cpus is None:
+            cpuset_cpus = self.container.cpuset_cpus
 
         # Create config instance
         vpp_config = VppConfigGenerator()
@@ -479,13 +691,22 @@ class ContainerEngine:
         vpp_config.add_unix_nodaemon()
         vpp_config.add_unix_exec(u"/tmp/running.exec")
         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
-        vpp_config.add_statseg_per_node_counters(value=u"on")
-        # We will pop the first core from the list to be a main core
-        vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
-        # If more cores in the list, the rest will be used as workers.
         if cpuset_cpus:
+            # We will pop the first core from the list to be a main core
+            vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
+            # If more cores in the list, the rest will be used as workers.
             corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
             vpp_config.add_cpu_corelist_workers(corelist_workers)
+        vpp_config.add_buffers_per_numa(215040)
+        vpp_config.add_plugin(u"disable", u"default")
+        vpp_config.add_plugin(u"enable", u"memif_plugin.so")
+        vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
+        vpp_config.add_main_heap_size(u"2G")
+        vpp_config.add_main_heap_page_size(self.container.page_size)
+        vpp_config.add_default_hugepage_size(self.container.page_size)
+        vpp_config.add_statseg_size(u"2G")
+        vpp_config.add_statseg_page_size(self.container.page_size)
+        vpp_config.add_statseg_per_node_counters(u"on")
 
         return vpp_config
 
@@ -493,7 +714,6 @@ class ContainerEngine:
         """Create startup configuration of VPP without DPDK on container.
         """
         vpp_config = self.create_base_vpp_startup_config()
-        vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
 
         # Apply configuration
         self.execute(u"mkdir -p /etc/vpp/")
@@ -502,46 +722,46 @@ class ContainerEngine:
             f'tee /etc/vpp/startup.conf'
         )
 
-    def create_vpp_startup_config_dpdk_dev(self, *devices):
-        """Create startup configuration of VPP with DPDK on container.
+    def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
+        """Create startup configuration of VPP vswitch.
 
-        :param devices: List of PCI devices to add.
+        :param cpuset_cpus: CPU list to run on.
+        :param rxq: Number of interface RX queues.
+        :param devices: PCI devices.
+        :type cpuset_cpus: list
+        :type rxq: int
         :type devices: list
         """
-        vpp_config = self.create_base_vpp_startup_config()
+        vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
         vpp_config.add_dpdk_dev(*devices)
+        vpp_config.add_dpdk_log_level(u".*,debug")
         vpp_config.add_dpdk_no_tx_checksum_offload()
-        vpp_config.add_dpdk_log_level(u"debug")
-        vpp_config.add_plugin(u"disable", u"default")
+        vpp_config.add_dpdk_dev_default_rxq(rxq)
         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
-        vpp_config.add_plugin(u"enable", u"memif_plugin.so")
+        vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
 
         # Apply configuration
         self.execute(u"mkdir -p /etc/vpp/")
         self.execute(
-            f'echo "{vpp_config.get_config_str()}" | '
-            f'tee /etc/vpp/startup.conf'
+            f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
         )
 
-    def create_vpp_startup_config_func_dev(self):
-        """Create startup configuration of VPP on container for functional
-        vpp_device tests.
+    def create_vpp_startup_config_ipsec(self, cpuset_cpus):
+        """Create startup configuration of VPP with IPsec on container.
+
+        :param cpuset_cpus: CPU list to run on.
+        :type cpuset_cpus: list
         """
-        # Create config instance
-        vpp_config = VppConfigGenerator()
-        vpp_config.set_node(self.container.node)
-        vpp_config.add_unix_cli_listen()
-        vpp_config.add_unix_nodaemon()
-        vpp_config.add_unix_exec(u"/tmp/running.exec")
-        vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
-        vpp_config.add_statseg_per_node_counters(value=u"on")
-        vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
+        vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
+        vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
+        vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
+        vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
+        vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
 
         # Apply configuration
         self.execute(u"mkdir -p /etc/vpp/")
         self.execute(
-            f'echo "{vpp_config.get_config_str()}" | '
-            f'tee /etc/vpp/startup.conf'
+            f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
         )
 
     def create_vpp_exec_config(self, template_file, **kwargs):
@@ -553,10 +773,9 @@ class ContainerEngine:
         :type kwargs: dict
         """
         running = u"/tmp/running.exec"
-
         template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
 
-        with open(template, "r") as src_file:
+        with open(template, u"rt") as src_file:
             src = Template(src_file.read())
             self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
 
@@ -638,7 +857,7 @@ class LXC(ContainerEngine):
             else u"amd64"
 
         image = self.container.image if self.container.image \
-            else f"-d ubuntu -r bionic -a {target_arch}"
+            else f"-d ubuntu -r focal -a {target_arch}"
 
         cmd = f"lxc-create -t download --name {self.container.name} " \
             f"-- {image} --no-validate"
@@ -650,10 +869,7 @@ class LXC(ContainerEngine):
         self._configure_cgroup(u"lxc")
 
     def build(self):
-        """Build container (compile).
-
-        TODO: Remove from parent class if no sibling implements this.
-        """
+        """Build container (compile)."""
         raise NotImplementedError
 
     def create(self):
@@ -735,7 +951,7 @@ class LXC(ContainerEngine):
             if self.container.env else u""
 
         cmd = f"lxc-attach {env} --name {self.container.name} " \
-            f"-- /bin/sh -c '{command}; exit $?'"
+            f"-- /bin/sh -c '{command}'"
 
         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
         if int(ret) != 0:
@@ -863,22 +1079,19 @@ class Docker(ContainerEngine):
                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
             setattr(self.container, u"image", img)
 
-        cmd = f"docker pull {self.container.image}"
-
-        ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
-        if int(ret) != 0:
-            raise RuntimeError(
-                f"Failed to create container {self.container.name}."
-            )
+        if "/" in self.container.image:
+            cmd = f"docker pull {self.container.image}"
+            ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
+            if int(ret) != 0:
+                raise RuntimeError(
+                    f"Failed to create container {self.container.name}."
+                )
 
         if self.container.cpuset_cpus:
             self._configure_cgroup(u"docker")
 
     def build(self):
-        """Build container (compile).
-
-        TODO: Remove from parent class if no sibling implements this.
-        """
+        """Build container (compile)."""
         raise NotImplementedError
 
     def create(self):
@@ -932,7 +1145,7 @@ class Docker(ContainerEngine):
         :raises RuntimeError: If running the command in a container failed.
         """
         cmd = f"docker exec --interactive {self.container.name} " \
-            f"/bin/sh -c '{command}; exit $?'"
+            f"/bin/sh -c '{command}'"
 
         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
         if int(ret) != 0:
@@ -1053,8 +1266,18 @@ class Container:
         except KeyError:
             # Creating new attribute
             if attr == u"node":
+                # Create and cache a connected SSH instance.
                 self.__dict__[u"ssh"] = SSH()
                 self.__dict__[u"ssh"].connect(value)
+            elif attr == u"name":
+                # Socket paths to not have mutable state,
+                # this just saves some horizontal space in callers.
+                # TODO: Rename the dir so other apps can add sockets easily.
+                # E.g. f"/tmp/app_sockets/{value}/vpp_api.sock"
+                path = f"/tmp/vpp_sockets/{value}"
+                self.__dict__[u"socket_dir"] = path
+                self.__dict__[u"api_socket"] = f"{path}/api.sock"
+                self.__dict__[u"stats_socket"] = f"{path}/stats.sock"
             self.__dict__[attr] = value
         else:
             # Updating attribute base of type