feat(multi): Multiple VPP instances 42/40742/12
authorpmikus <[email protected]>
Wed, 17 Apr 2024 12:27:25 +0000 (12:27 +0000)
committerPeter Mikus <[email protected]>
Wed, 12 Jun 2024 06:45:45 +0000 (06:45 +0000)
Signed-off-by: Peter Mikus <[email protected]>
Change-Id: I28f2752f725c9816a557667033bf7d656fb0d414

resources/libraries/python/ContainerUtils.py
resources/libraries/robot/shared/container.robot

index fc32248..ef08317 100644 (file)
@@ -26,7 +26,8 @@ from resources.libraries.python.CpuUtils import CpuUtils
 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
 from resources.libraries.python.ssh import SSH
 from resources.libraries.python.topology import Topology, SocketType
-from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
+from resources.libraries.python.VppConfigGenerator import (VppConfigGenerator,
+    VppInitConfig)
 from resources.libraries.python.VPPUtil import VPPUtil
 
 
@@ -193,10 +194,7 @@ class ContainerManager:
     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
         """Configure VPP in all containers.
 
-        :param chain_topology: Topology used for chaining containers can be
-            chain or cross_horiz. Chain topology is using 1 memif pair per
-            container. Cross_horiz topology is using 1 memif and 1 physical
-            interface in container (only single container can be configured).
+        :param chain_topology: Topology used for chaining containers.
         :param kwargs: Named parameters.
         :type chain_topology: str
         :type kwargs: dict
@@ -220,47 +218,52 @@ class ContainerManager:
             self.engine.container = self.containers[container]
             guest_dir = self.engine.container.mnt[0].split(u":")[1]
 
-            if chain_topology == u"chain":
+            if chain_topology == "chain":
                 self._configure_vpp_chain_l2xc(
                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
                     guest_dir=guest_dir, **kwargs
                 )
-            elif chain_topology == u"cross_horiz":
+            elif chain_topology == "cross_horiz":
                 self._configure_vpp_cross_horiz(
                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
                     guest_dir=guest_dir, **kwargs
                 )
-            elif chain_topology == u"chain_functional":
+            elif chain_topology == "chain_functional":
                 self._configure_vpp_chain_functional(
                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
                     guest_dir=guest_dir, **kwargs
                 )
-            elif chain_topology == u"chain_ip4":
+            elif chain_topology == "chain_ip4":
                 self._configure_vpp_chain_ip4(
                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
                     guest_dir=guest_dir, **kwargs
                 )
-            elif chain_topology == u"pipeline_ip4":
+            elif chain_topology == "pipeline_ip4":
                 self._configure_vpp_pipeline_ip4(
                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
                     guest_dir=guest_dir, **kwargs
                 )
-            elif chain_topology == u"chain_vswitch":
+            elif chain_topology == "chain_vswitch":
                 self._configure_vpp_chain_vswitch(
                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
                     guest_dir=guest_dir, **kwargs)
-            elif chain_topology == u"chain_ipsec":
+            elif chain_topology == "chain_ipsec":
                 idx_match = search(r"\d+$", self.engine.container.name)
                 if idx_match:
                     idx = int(idx_match.group())
                 self._configure_vpp_chain_ipsec(
                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
                     guest_dir=guest_dir, nf_instance=idx, **kwargs)
-            elif chain_topology == u"chain_dma":
+            elif chain_topology == "chain_dma":
                 self._configure_vpp_chain_dma(
                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
                     guest_dir=guest_dir, **kwargs
                 )
+            elif chain_topology == "vswitch_ip4scale":
+                self._configure_vpp_vswitch_ip4scale(
+                    mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+                    guest_dir=guest_dir, **kwargs
+                )
             else:
                 raise RuntimeError(
                     f"Container topology {chain_topology} not implemented"
@@ -534,6 +537,47 @@ class ContainerManager:
             vif1_mac=vif1_mac, vif2_mac=vif2_mac
         )
 
+    def _configure_vpp_vswitch_ip4scale(self, **kwargs):
+        """Configure VPP in container with.
+
+        :param kwargs: Named parameters.
+        :type kwargs: dict
+        """
+        dut = self.engine.container.name.split("_")[0]
+        sid1 = kwargs["sid1"]
+        sid2 = kwargs["sid2"]
+        vid = kwargs["mid1"]
+
+        phy_cores = BuiltIn().get_variable_value("${cpu_count_int}")
+        rx_queues = BuiltIn().get_variable_value("${rxq_count_int}")
+        ifl = BuiltIn().get_variable_value("${int}")
+
+        if1_pci = Topology.get_interface_pci_addr(
+            self.engine.container.node,
+            BuiltIn().get_variable_value(f"${{{dut}_{ifl}{sid1}}}[0]")
+        )
+        if2_pci = Topology.get_interface_pci_addr(
+            self.engine.container.node,
+            BuiltIn().get_variable_value(f"${{{dut}_{ifl}{sid2}}}[0]")
+        )
+
+        compute_resource_info = CpuUtils.get_affinity_vswitch(
+            kwargs["nodes"], phy_cores, rx_queues=rx_queues
+        )
+        offset = (phy_cores + 1) * (vid - 1)
+        offset_cpu = compute_resource_info[f"{dut}_cpu_alloc_str"].split(",")
+        offset_cpu = [int(c) + int(offset) for c in offset_cpu]
+
+        print(offset_cpu)
+        self.engine.create_vpp_startup_config_vswitch(
+            offset_cpu, compute_resource_info["rxq_count_int"],
+            if1_pci, if2_pci
+        )
+
+        #self.engine.create_vpp_exec_config(
+        #    "create_vswitch_ip4scale.exec"
+        #)
+
     def stop_all_containers(self):
         """Stop all containers."""
         # TODO: Rework if containers can be affected outside ContainerManager.
@@ -760,13 +804,17 @@ class ContainerEngine:
         :type rxq: int
         :type devices: list
         """
-        vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
+        if cpuset_cpus is None:
+            cpuset_cpus = self.container.cpuset_cpus
+
+        vpp_config = VppInitConfig.create_vpp_startup_configuration_container(
+            self.container.node, cpuset_cpus
+        )
         vpp_config.add_dpdk_dev(*devices)
-        vpp_config.add_dpdk_log_level(u"debug")
+        vpp_config.add_dpdk_log_level("debug")
         vpp_config.add_dpdk_no_tx_checksum_offload()
         vpp_config.add_dpdk_dev_default_rxq(rxq)
-        vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
-        vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
+        vpp_config.add_plugin("enable", "dpdk_plugin.so")
 
         # Apply configuration
         self.execute(u"mkdir -p /etc/vpp/")
index 9f08e6b..dbcf690 100644 (file)
 | | END
 | | Append To List | ${container_groups} | ${container_group}
 | | Save VPP PIDs
+
+| Start vswitch containers
+| | [Documentation]
+| | ... | Configure and start multiple vswitch in container on all DUTs.
+| |
+| | ... | *Arguments:*
+| | ... | - phy_cores - Number of physical cores to use. Type: integer
+| | ... | - rx_queues: Number of RX queues. Type: integer
+| |
+| | [Arguments] | ${phy_cores} | ${rx_queues}=${None}
+| |
+| | Set Test Variable | @{container_groups} | @{EMPTY}
+| | Set Test Variable | ${container_group} | VSWITCH
+| | Import Library | resources.libraries.python.ContainerUtils.ContainerManager
+| | ... | engine=${container_engine} | WITH NAME | VSWITCH
+| | Stop VPP service on all DUTs | ${nodes}
+| | FOR | ${dut} | IN | @{duts}
+| | | FOR | ${i} | IN RANGE | 1 | ${${nic_pfs}//${2}+1}
+| | | | Construct container on DUT | ${dut}
+| | | | ... | nf_chains=${1} | nf_nodes=${${nic_pfs}//${2}}
+| | | | ... | nf_chain=${1} | nf_node=${i}
+| | | | ... | auto_scale=${False} | pinning=${False}
+| | | END
+| | END
+| | Run Keyword | VSWITCH.Acquire all containers
+| | Run Keyword | VSWITCH.Create all containers
+| | Run Keyword | VSWITCH.Configure vpp in all containers
+| | ... | vswitch_ip4scale | nodes=${nodes} | rts_per_flow=${rts_per_flow}
+| | Run Keyword | VSWITCH.Start VPP In All Containers
+| | Append To List | ${container_groups} | VSWITCH
+| | Save VPP PIDs