hs-test: support for multiple multi-core containers 56/43156/9
authorAdrian Villin <[email protected]>
Tue, 10 Jun 2025 13:33:35 +0000 (15:33 +0200)
committerDave Wallace <[email protected]>
Thu, 12 Jun 2025 16:17:30 +0000 (16:17 +0000)
- framework will now allocate multiple cores for every vpp container
  when running an MTTest
- added missing test timeout in H2SpecSuite

Type: improvement

Change-Id: I31317560b54b494ab14c8b5f4d7caed9fd3315b0
Signed-off-by: Adrian Villin <[email protected]>
15 files changed:
extras/hs-test/Makefile
extras/hs-test/README.rst
extras/hs-test/hs_test.sh
extras/hs-test/http2_test.go
extras/hs-test/http_test.go
extras/hs-test/infra/cpu.go
extras/hs-test/infra/hst_suite.go
extras/hs-test/infra/suite_cpu_pinning.go
extras/hs-test/infra/suite_h2.go
extras/hs-test/infra/suite_ldp.go
extras/hs-test/infra/suite_no_topo.go
extras/hs-test/infra/suite_vpp_proxy.go
extras/hs-test/infra/suite_vpp_udp_proxy.go
extras/hs-test/ldp_test.go
extras/hs-test/proxy_test.go

index 24026fc..b02c499 100644 (file)
@@ -33,6 +33,10 @@ ifeq ($(CPUS),)
 CPUS=1
 endif
 
+ifeq ($(VPP_CPUS),)
+VPP_CPUS=1
+endif
+
 ifeq ($(PARALLEL),)
 PARALLEL=1
 endif
@@ -96,9 +100,10 @@ help:
        @echo " DEBUG=[true|false]       - attach VPP to GDB"
        @echo " TEST=[name1,name2...]    - specific test(s) to run"
        @echo " SKIP=[name1,name2...]    - specific test(s) to skip"
-       @echo " CPUS=[n-cpus]            - number of cpus to allocate to VPP and containers"
+       @echo " CPUS=[n]                 - number of cpus to allocate to each non-VPP container (default = 1)"
+       @echo " VPP_CPUS=[n]             - number of cpus to allocate to each VPP container (default = 1)"
        @echo " VPPSRC=[path-to-vpp-src] - path to vpp source files (for gdb)"
-       @echo " PARALLEL=[n-cpus]        - number of test processes to spawn to run in parallel"
+       @echo " PARALLEL=[n]             - number of test processes to spawn to run in parallel"
        @echo " REPEAT=[n]               - repeat tests up to N times or until a failure occurs"
        @echo " CPU0=[true|false]        - use cpu0"
        @echo " DRYRUN=[true|false]      - set up containers but don't run tests"
@@ -137,7 +142,8 @@ test: .deps.ok .build.ok
        @bash ./hs_test.sh --persist=$(PERSIST) --verbose=$(VERBOSE) \
                --unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST) --cpus=$(CPUS) \
                --vppsrc=$(VPPSRC) --parallel=$(PARALLEL) --repeat=$(REPEAT) --cpu0=$(CPU0) \
-               --dryrun=$(DRYRUN) --skip=$(SKIP) --no_color=$(NO_COLOR) --timeout=$(TIMEOUT); \
+               --dryrun=$(DRYRUN) --skip=$(SKIP) --no_color=$(NO_COLOR) --timeout=$(TIMEOUT) \
+               --vpp_cpus=$(VPP_CPUS); \
                ./script/compress.sh $$?
 
 .PHONY: test-debug
@@ -146,7 +152,8 @@ test-debug: .deps.ok .build_debug.ok
        @bash ./hs_test.sh --persist=$(PERSIST) --verbose=$(VERBOSE) \
                --unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST) --cpus=$(CPUS) \
                --vppsrc=$(VPPSRC) --parallel=$(PARALLEL) --repeat=$(REPEAT) --debug_build=true \
-               --cpu0=$(CPU0) --dryrun=$(DRYRUN) --skip=$(SKIP) --no_color=$(NO_COLOR) --timeout=$(TIMEOUT); \
+               --cpu0=$(CPU0) --dryrun=$(DRYRUN) --skip=$(SKIP) --no_color=$(NO_COLOR) --timeout=$(TIMEOUT) \
+               --vpp_cpus=$(VPP_CPUS); \
                ./script/compress.sh $$?
 
 .PHONY: wipe-lcov
@@ -159,13 +166,14 @@ test-cov: .deps.ok .build.cov.ok wipe-lcov
        -@bash ./hs_test.sh --coverage=true --persist=$(PERSIST) --verbose=$(VERBOSE) \
                --unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST-HS) --cpus=$(CPUS) \
                --vppsrc=$(VPPSRC) --cpu0=$(CPU0) --dryrun=$(DRYRUN) --skip=$(SKIP) --no_color=$(NO_COLOR) \
-               --timeout=$(TIMEOUT); \
+               --timeout=$(TIMEOUT) --vpp_cpus=$(VPP_CPUS); \
                ./script/compress.sh $$?
        $(MAKE) -C ../.. test-cov-post-standalone HS_TEST=1
 
 .PHONY: test-leak
 test-leak: .deps.ok .build_debug.ok
-       @bash ./hs_test.sh --test=$(TEST) --debug_build=true --leak_check=true --vppsrc=$(VPPSRC) --timeout=$(TIMEOUT)
+       @bash ./hs_test.sh --test=$(TEST) --debug_build=true --leak_check=true --vppsrc=$(VPPSRC) --timeout=$(TIMEOUT) \
+       --vpp_cpus=$(VPP_CPUS);
 
 .PHONY: test-perf
 test-perf: FORCE_BUILD=false
index ca9f1f9..815f8aa 100644 (file)
@@ -67,9 +67,8 @@ For adding a new suite, please see `Modifying the framework`_ below.
 Assumed are two docker containers, each with its own VPP instance running. One VPP then pings the other.
 This can be put in file ``extras/hs-test/my_test.go`` and run with command ``make test TEST=MyTest``.
 
-To add a multi-worker test, name it ``[name]MTTest``. Doing this, the framework will allocate 3 CPUs to a VPP container, no matter what ``CPUS`` is set to.
-Only a single multi-worker VPP container is supported for now. Please register multi-worker tests as Solo tests to avoid reusing the same cores
-when running in parallel.
+To add a multi-worker test, register it to a multi-worker suite. The suite *cannot* have ``s.SetupTest()`` in the ``BeforeEach`` block.
+Set your desired core counts using ``s.CpusPerContainer`` and/or ``s.CpusPerVppContainer`` and run ``s.SetupTest()`` at the beginning of your test.
 
 ::
 
@@ -81,7 +80,6 @@ when running in parallel.
 
         func init(){
                 RegisterMySuiteTests(MyTest)
-                RegisterSoloMySuiteTests(MyMTTest)
         }
 
         func MyMTTest(s *MySuite){
index 8be73a2..eb3607c 100644 (file)
@@ -64,6 +64,9 @@ case "${i}" in
     --cpus=*)
         args="$args -cpus ${i#*=}"
         ;;
+    --vpp_cpus=*)
+        args="$args -vpp_cpus ${i#*=}"
+        ;;
     --vppsrc=*)
         args="$args -vppsrc ${i#*=}"
         ;;
index 0d170eb..a813f41 100644 (file)
@@ -11,7 +11,7 @@ import (
 
 func init() {
        RegisterH2Tests(Http2TcpGetTest, Http2TcpPostTest, Http2MultiplexingTest, Http2TlsTest, Http2ContinuationTxTest)
-       RegisterH2SoloTests(Http2MultiplexingMTTest)
+       RegisterH2MWTests(Http2MultiplexingMWTest)
 }
 
 func Http2TcpGetTest(s *H2Suite) {
@@ -79,7 +79,9 @@ func Http2MultiplexingTest(s *H2Suite) {
        s.AssertContains(o, " 0 timeout")
 }
 
-func Http2MultiplexingMTTest(s *H2Suite) {
+func Http2MultiplexingMWTest(s *H2Suite) {
+       s.CpusPerVppContainer = 3
+       s.SetupTest()
        vpp := s.Containers.Vpp.VppInstance
        serverAddress := s.VppAddr() + ":" + s.Ports.Port1
        vpp.Vppctl("http tps uri tcp://" + serverAddress + " no-zc")
index 12811cb..8b69072 100644 (file)
@@ -41,7 +41,8 @@ func init() {
                HttpStaticRedirectTest)
        RegisterNoTopoSoloTests(HttpStaticPromTest, HttpGetTpsTest, HttpGetTpsInterruptModeTest, PromConcurrentConnectionsTest,
                PromMemLeakTest, HttpClientPostMemLeakTest, HttpInvalidClientRequestMemLeakTest, HttpPostTpsTest, HttpPostTpsInterruptModeTest,
-               PromConsecutiveConnectionsTest, HttpGetTpsTlsTest, HttpPostTpsTlsTest, HttpClientGetRepeatMTTest, HttpClientPtrGetRepeatMTTest)
+               PromConsecutiveConnectionsTest, HttpGetTpsTlsTest, HttpPostTpsTlsTest)
+       RegisterNoTopoMWTests(HttpClientGetRepeatMWTest, HttpClientPtrGetRepeatMWTest)
        RegisterNoTopo6SoloTests(HttpClientGetResponseBody6Test, HttpClientGetTlsResponseBody6Test)
 }
 
@@ -591,11 +592,15 @@ func httpClientGet6(s *NoTopo6Suite, response string, size int, proto string) {
        s.AssertContains(file_contents, response)
 }
 
-func HttpClientGetRepeatMTTest(s *NoTopoSuite) {
+func HttpClientGetRepeatMWTest(s *NoTopoSuite) {
+       s.CpusPerVppContainer = 3
+       s.SetupTest()
        httpClientRepeat(s, "", "sessions 2")
 }
 
-func HttpClientPtrGetRepeatMTTest(s *NoTopoSuite) {
+func HttpClientPtrGetRepeatMWTest(s *NoTopoSuite) {
+       s.CpusPerVppContainer = 3
+       s.SetupTest()
        httpClientRepeat(s, "", "use-ptr sessions 2")
 }
 
index d6ae14e..589c51d 100644 (file)
@@ -9,7 +9,6 @@ import (
        "strings"
 
        . "fd.io/hs-test/infra/common"
-       . "github.com/onsi/ginkgo/v2"
 )
 
 var CgroupPath = "/sys/fs/cgroup/"
@@ -20,8 +19,11 @@ type CpuContext struct {
 }
 
 type CpuAllocatorT struct {
-       cpus              []int
-       maxContainerCount int
+       cpus    []int
+       numa0   []int
+       numa1   []int
+       lastCpu int
+       suite   *HstSuite
 }
 
 func iterateAndAppend(start int, end int, slice []int) []int {
@@ -33,27 +35,40 @@ func iterateAndAppend(start int, end int, slice []int) []int {
 
 var cpuAllocator *CpuAllocatorT = nil
 
-func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*CpuContext, error) {
+func (c *CpuAllocatorT) Allocate(nCpus int, offset int) (*CpuContext, error) {
        var cpuCtx CpuContext
        // indexes, not actual cores
        var minCpu, maxCpu int
 
-       minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset
-       maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset
+       minCpu = offset
+       maxCpu = nCpus - 1 + offset
 
        if len(c.cpus)-1 < maxCpu {
-               err := fmt.Errorf("could not allocate %d CPUs; available count: %d; attempted to allocate cores with index %d-%d; max index: %d;\n"+
-                       "available cores: %v", nCpus*containerCount, len(c.cpus), minCpu, maxCpu, len(c.cpus)-1, c.cpus)
+               msg := fmt.Sprintf("could not allocate %d CPUs; available count: %d; attempted to allocate cores with index %d-%d; max index: %d;\n"+
+                       "available cores: %v", nCpus, len(c.cpus), minCpu, maxCpu, len(c.cpus)-1, c.cpus)
+               if c.suite.SkipIfNotEnoguhCpus {
+                       c.suite.Skip("skipping: " + msg)
+               }
+               err := fmt.Errorf(msg)
                return nil, err
        }
 
-       if containerCount == 1 {
-               cpuCtx.cpus = c.cpus[minCpu : minCpu+nCpus]
-       } else if containerCount > 1 && containerCount <= c.maxContainerCount {
-               cpuCtx.cpus = c.cpus[minCpu+(nCpus*(containerCount-1)) : minCpu+(nCpus*containerCount)]
+       if NumaAwareCpuAlloc {
+               if len(c.numa0) > maxCpu {
+                       c.suite.Log("Allocating CPUs from numa #0")
+                       cpuCtx.cpus = c.numa0[minCpu : minCpu+nCpus]
+               } else if len(c.numa1) > maxCpu {
+                       c.suite.Log("Allocating CPUs from numa #1")
+                       cpuCtx.cpus = c.numa1[minCpu : minCpu+nCpus]
+               } else {
+                       err := fmt.Errorf("could not allocate %d CPUs; not enough CPUs in either numa node", nCpus)
+                       return nil, err
+               }
        } else {
-               return nil, fmt.Errorf("too many containers; CPU allocation for >%d containers is not implemented", c.maxContainerCount)
+               cpuCtx.cpus = c.cpus[minCpu : minCpu+nCpus]
        }
+
+       c.lastCpu = minCpu + nCpus
        cpuCtx.cpuAllocator = c
        return &cpuCtx, nil
 }
@@ -113,14 +128,12 @@ func (c *CpuAllocatorT) readCpus() error {
                                tmpCpus = tmpCpus[1:]
                        }
 
-                       // make c.cpus divisible by maxContainerCount * nCpus, so we don't have to check which numa will be used
-                       // and we can use offsets
-                       countToRemove := len(tmpCpus) % (c.maxContainerCount * *NConfiguredCpus)
-                       if countToRemove >= len(tmpCpus) {
-                               return fmt.Errorf("requested too many CPUs per container (%d), should be no more "+
-                                       "than %d", *NConfiguredCpus, len(tmpCpus)/c.maxContainerCount)
+                       c.cpus = append(c.cpus, tmpCpus...)
+                       if i == 0 {
+                               c.numa0 = append(c.numa0, tmpCpus...)
+                       } else {
+                               c.numa1 = append(c.numa1, tmpCpus...)
                        }
-                       c.cpus = append(c.cpus, tmpCpus[:len(tmpCpus)-countToRemove]...)
                        tmpCpus = tmpCpus[:0]
                }
        } else {
@@ -169,7 +182,6 @@ func CpuAllocator() (*CpuAllocatorT, error) {
        if cpuAllocator == nil {
                var err error
                cpuAllocator = new(CpuAllocatorT)
-               cpuAllocator.maxContainerCount = 4
                err = cpuAllocator.readCpus()
                if err != nil {
                        return nil, err
index bfc6b7a..039d004 100644 (file)
@@ -29,7 +29,8 @@ const (
 )
 
 var IsUnconfiguring = flag.Bool("unconfigure", false, "remove topology")
-var NConfiguredCpus = flag.Int("cpus", 1, "number of CPUs assigned to vpp")
+var NConfiguredCpus = flag.Int("cpus", 1, "number of CPUs assigned to non-vpp containers")
+var NConfiguredVppCpus = flag.Int("vpp_cpus", 1, "number of CPUs assigned to vpp containers")
 var VppSourceFileDir = flag.String("vppsrc", "", "vpp source file directory")
 var IsDebugBuild = flag.Bool("debug_build", false, "some paths are different with debug build")
 var UseCpu0 = flag.Bool("cpu0", false, "use cpu0")
@@ -37,19 +38,21 @@ var IsLeakCheck = flag.Bool("leak_check", false, "run leak-check tests")
 
 type HstSuite struct {
        HstCommon
-       AllContainers     map[string]*Container
-       StartedContainers []*Container
-       NetConfigs        []NetConfig
-       NetInterfaces     map[string]*NetInterface
-       Ip4AddrAllocator  *Ip4AddressAllocator
-       Ip6AddrAllocator  *Ip6AddressAllocator
-       TestIds           map[string]string
-       CpuAllocator      *CpuAllocatorT
-       CpuContexts       []*CpuContext
-       CpuCount          int
-       Docker            *client.Client
-       CoverageRun       bool
-       numOfNewPorts     int
+       AllContainers       map[string]*Container
+       StartedContainers   []*Container
+       NetConfigs          []NetConfig
+       NetInterfaces       map[string]*NetInterface
+       Ip4AddrAllocator    *Ip4AddressAllocator
+       Ip6AddrAllocator    *Ip6AddressAllocator
+       TestIds             map[string]string
+       CpuAllocator        *CpuAllocatorT
+       CpuContexts         []*CpuContext
+       CpusPerContainer    int
+       CpusPerVppContainer int
+       Docker              *client.Client
+       CoverageRun         bool
+       numOfNewPorts       int
+       SkipIfNotEnoguhCpus bool
 }
 
 type colors struct {
@@ -136,42 +139,29 @@ func (s *HstSuite) SetupSuite() {
 
        var err error
        s.CpuAllocator, err = CpuAllocator()
+       s.CpuAllocator.suite = s
        if err != nil {
                Fail("failed to init cpu allocator: " + fmt.Sprint(err))
        }
-       s.CpuCount = *NConfiguredCpus
+       s.CpusPerContainer = *NConfiguredCpus
+       s.CpusPerVppContainer = *NConfiguredVppCpus
        s.CoverageRun = *IsCoverage
 }
 
 func (s *HstSuite) AllocateCpus(containerName string) []int {
        var cpuCtx *CpuContext
        var err error
-       currentTestName := CurrentSpecReport().LeafNodeText
-
-       if strings.Contains(currentTestName, "MTTest") {
-               prevContainerCount := s.CpuAllocator.maxContainerCount
-               if strings.Contains(containerName, "vpp") {
-                       // CPU range is assigned based on the Ginkgo process index (or build number if
-                       // running in the CI), *NConfiguredCpus and a maxContainerCount.
-                       // maxContainerCount is set to 4 when CpuAllocator is initialized.
-                       // 4 is not a random number - all of our suites use a maximum of 4 containers simultaneously,
-                       // and it's also the maximum number of containers we can run with *NConfiguredCpus=2 (with CPU0=true)
-                       // on processors with 8 threads. Currently, the CpuAllocator puts all cores into a slice,
-                       // makes the length of the slice divisible by 4x*NConfiguredCpus, and then the minCpu and
-                       // maxCpu (range) for each container is calculated. Then we just offset based on minCpu,
-                       // the number of started containers and *NConfiguredCpus. This way, every container
-                       // uses the correct CPUs, even if multiple NUMA nodes are available.
-                       // However, because of this, if we want to assign different number of cores to different containers,
-                       // we have to change maxContainerCount to manipulate the CPU range. Hopefully a temporary workaround.
-                       s.CpuAllocator.maxContainerCount = 1
-                       cpuCtx, err = s.CpuAllocator.Allocate(1, 3, 0)
-               } else {
-                       s.CpuAllocator.maxContainerCount = 3
-                       cpuCtx, err = s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount, 2)
-               }
-               s.CpuAllocator.maxContainerCount = prevContainerCount
+
+       if strings.Contains(containerName, "vpp") {
+               // CPUs are allocated based on s.CpusPerVppContainer/s.CpusPerContainer (defaults can be overridden globally
+               // or per test) and 'lastCpu' which serves as an offset. 'lastCpu' is incremented by 4 for each
+               // GinkgoParallelProcess() in SetupTest() in hst_suite, because all suites use 4 containers
+               // at most with 1 CPU each. GinkgoParallelProcess() offset doesn't impact MW or solo tests.
+               // Numa aware cpu allocation will use the second numa
+               // node if a container doesn't "fit" into the first node.
+               cpuCtx, err = s.CpuAllocator.Allocate(s.CpusPerVppContainer, s.CpuAllocator.lastCpu)
        } else {
-               cpuCtx, err = s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount, 0)
+               cpuCtx, err = s.CpuAllocator.Allocate(s.CpusPerContainer, s.CpuAllocator.lastCpu)
        }
 
        s.AssertNil(err)
@@ -201,6 +191,10 @@ func (s *HstSuite) TeardownSuite() {
 
 func (s *HstSuite) TeardownTest() {
        s.HstCommon.TeardownTest()
+       s.SkipIfNotEnoguhCpus = false
+       // reset to defaults
+       s.CpusPerContainer = *NConfiguredCpus
+       s.CpusPerVppContainer = *NConfiguredVppCpus
        coreDump := s.WaitForCoreDump()
        s.ResetContainers()
 
@@ -223,6 +217,8 @@ func (s *HstSuite) SkipIfNotCoverage() {
 
 func (s *HstSuite) SetupTest() {
        s.HstCommon.SetupTest()
+       // doesn't impact MW/solo tests
+       s.CpuAllocator.lastCpu = (GinkgoParallelProcess() - 1) * 4
        s.StartedContainers = s.StartedContainers[:0]
        s.SkipIfUnconfiguring()
        s.SetupContainers()
@@ -291,23 +287,6 @@ func (s *HstSuite) SkipIfMultiWorker(args ...any) {
        }
 }
 
-func (s *HstSuite) SkipIfNotEnoughAvailableCpus() {
-       var maxRequestedCpu int
-       availableCpus := len(s.CpuAllocator.cpus) - 1
-
-       if *UseCpu0 {
-               availableCpus++
-       }
-
-       maxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount)
-
-       if availableCpus < maxRequestedCpu {
-               s.Skip(fmt.Sprintf("Test case cannot allocate requested cpus "+
-                       "(%d containers * %d cpus, %d available). Try using 'CPU0=true'",
-                       s.CpuAllocator.maxContainerCount, s.CpuCount, availableCpus))
-       }
-}
-
 func (s *HstSuite) SkipUnlessLeakCheck() {
        if !*IsLeakCheck {
                s.Skip("leak-check tests excluded")
@@ -384,6 +363,7 @@ func (s *HstSuite) WaitForCoreDump() bool {
 }
 
 func (s *HstSuite) ResetContainers() {
+       s.CpuAllocator.lastCpu = 0
        for _, container := range s.StartedContainers {
                container.stop()
                s.Log("Removing container " + container.Name)
index e80682d..a9f3d02 100644 (file)
@@ -15,8 +15,7 @@ var cpuPinningSoloTests = map[string][]func(s *CpuPinningSuite){}
 
 type CpuPinningSuite struct {
        HstSuite
-       previousMaxContainerCount int
-       Interfaces                struct {
+       Interfaces struct {
                Tap *NetInterface
        }
        Containers struct {
@@ -42,10 +41,8 @@ func (s *CpuPinningSuite) SetupSuite() {
 
 func (s *CpuPinningSuite) SetupTest() {
        // Skip if we cannot allocate 3 CPUs for test container
-       s.previousMaxContainerCount = s.CpuAllocator.maxContainerCount
-       s.CpuCount = 3
-       s.CpuAllocator.maxContainerCount = 1
-       s.SkipIfNotEnoughAvailableCpus()
+       s.CpusPerVppContainer = 3
+       s.SkipIfNotEnoguhCpus = true
 
        s.HstSuite.SetupTest()
        vpp, err := s.Containers.Vpp.newVppInstance(s.Containers.Vpp.AllocatedCpus)
@@ -60,8 +57,7 @@ func (s *CpuPinningSuite) SetupTest() {
 func (s *CpuPinningSuite) TeardownTest() {
        defer s.HstSuite.TeardownTest()
        // reset vars
-       s.CpuCount = *NConfiguredCpus
-       s.CpuAllocator.maxContainerCount = s.previousMaxContainerCount
+       s.CpusPerContainer = *NConfiguredCpus
 }
 
 var _ = Describe("CpuPinningSuite", Ordered, ContinueOnFailure, func() {
index b007086..96395d3 100644 (file)
@@ -22,6 +22,7 @@ import (
 
 var h2Tests = map[string][]func(s *H2Suite){}
 var h2SoloTests = map[string][]func(s *H2Suite){}
+var h2MWTests = map[string][]func(s *H2Suite){}
 
 type H2Suite struct {
        HstSuite
@@ -45,6 +46,9 @@ func RegisterH2Tests(tests ...func(s *H2Suite)) {
 func RegisterH2SoloTests(tests ...func(s *H2Suite)) {
        h2SoloTests[GetTestFilename()] = tests
 }
+func RegisterH2MWTests(tests ...func(s *H2Suite)) {
+       h2MWTests[GetTestFilename()] = tests
+}
 
 func (s *H2Suite) SetupSuite() {
        s.HstSuite.SetupSuite()
@@ -146,6 +150,35 @@ var _ = Describe("Http2SoloSuite", Ordered, ContinueOnFailure, Serial, func() {
        }
 })
 
+var _ = Describe("Http2MWSuite", Ordered, ContinueOnFailure, Serial, func() {
+       var s H2Suite
+       BeforeAll(func() {
+               s.SetupSuite()
+       })
+       BeforeEach(func() {
+               s.SkipIfNotEnoguhCpus = true
+       })
+       AfterAll(func() {
+               s.TeardownSuite()
+       })
+       AfterEach(func() {
+               s.TeardownTest()
+       })
+
+       for filename, tests := range h2MWTests {
+               for _, test := range tests {
+                       test := test
+                       pc := reflect.ValueOf(test).Pointer()
+                       funcValue := runtime.FuncForPC(pc)
+                       testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+                       It(testName, Label("SOLO", "VPP Multi-Worker"), func(ctx SpecContext) {
+                               s.Log(testName + ": BEGIN")
+                               test(&s)
+                       }, SpecTimeout(TestTimeout))
+               }
+       }
+})
+
 type h2specTest struct {
        desc string
 }
@@ -403,7 +436,7 @@ var _ = Describe("H2SpecSuite", Ordered, ContinueOnFailure, func() {
                                o := <-oChan
                                s.Log(o)
                                s.AssertEqual(0, tg.FailedCount)
-                       })
+                       }, SpecTimeout(TestTimeout))
                }
        }
 })
index 76f4289..ed81690 100644 (file)
@@ -13,6 +13,7 @@ import (
 
 var ldpTests = map[string][]func(s *LdpSuite){}
 var ldpSoloTests = map[string][]func(s *LdpSuite){}
+var ldpMWTests = map[string][]func(s *LdpSuite){}
 
 type LdpSuite struct {
        HstSuite
@@ -37,6 +38,9 @@ func RegisterLdpTests(tests ...func(s *LdpSuite)) {
 func RegisterSoloLdpTests(tests ...func(s *LdpSuite)) {
        ldpSoloTests[GetTestFilename()] = tests
 }
+func RegisterLdpMWTests(tests ...func(s *LdpSuite)) {
+       ldpMWTests[GetTestFilename()] = tests
+}
 
 func (s *LdpSuite) SetupSuite() {
        time.Sleep(1 * time.Second)
@@ -214,3 +218,33 @@ var _ = Describe("LdpSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
                }
        }
 })
+
+var _ = Describe("LdpMWSuite", Ordered, ContinueOnFailure, Serial, func() {
+       var s LdpSuite
+       BeforeAll(func() {
+               s.SetupSuite()
+       })
+       BeforeEach(func() {
+               s.SkipIfNotEnoguhCpus = true
+       })
+       AfterAll(func() {
+               s.TeardownSuite()
+       })
+       AfterEach(func() {
+               s.TeardownTest()
+       })
+
+       // https://onsi.github.io/ginkgo/#dynamically-generating-specs
+       for filename, tests := range ldpMWTests {
+               for _, test := range tests {
+                       test := test
+                       pc := reflect.ValueOf(test).Pointer()
+                       funcValue := runtime.FuncForPC(pc)
+                       testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+                       It(testName, Label("SOLO", "VPP Multi-Worker"), func(ctx SpecContext) {
+                               s.Log(testName + ": BEGIN")
+                               test(&s)
+                       }, SpecTimeout(TestTimeout))
+               }
+       }
+})
index e464ef7..f7fc403 100644 (file)
@@ -12,6 +12,7 @@ import (
 
 var noTopoTests = map[string][]func(s *NoTopoSuite){}
 var noTopoSoloTests = map[string][]func(s *NoTopoSuite){}
+var noTopoMWTests = map[string][]func(s *NoTopoSuite){}
 
 type NoTopoSuite struct {
        HstSuite
@@ -40,6 +41,9 @@ func RegisterNoTopoTests(tests ...func(s *NoTopoSuite)) {
 func RegisterNoTopoSoloTests(tests ...func(s *NoTopoSuite)) {
        noTopoSoloTests[GetTestFilename()] = tests
 }
+func RegisterNoTopoMWTests(tests ...func(s *NoTopoSuite)) {
+       noTopoMWTests[GetTestFilename()] = tests
+}
 
 func (s *NoTopoSuite) SetupSuite() {
        s.HstSuite.SetupSuite()
@@ -245,3 +249,32 @@ var _ = Describe("NoTopoSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
                }
        }
 })
+
+var _ = Describe("NoTopoMWSuite", Ordered, ContinueOnFailure, Serial, func() {
+       var s NoTopoSuite
+       BeforeAll(func() {
+               s.SetupSuite()
+       })
+       BeforeEach(func() {
+               s.SkipIfNotEnoguhCpus = true
+       })
+       AfterAll(func() {
+               s.TeardownSuite()
+       })
+       AfterEach(func() {
+               s.TeardownTest()
+       })
+
+       for filename, tests := range noTopoMWTests {
+               for _, test := range tests {
+                       test := test
+                       pc := reflect.ValueOf(test).Pointer()
+                       funcValue := runtime.FuncForPC(pc)
+                       testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+                       It(testName, Label("SOLO", "VPP Multi-Worker"), func(ctx SpecContext) {
+                               s.Log(testName + ": BEGIN")
+                               test(&s)
+                       }, SpecTimeout(TestTimeout))
+               }
+       }
+})
index 44cc6bb..ae3f203 100644 (file)
@@ -49,14 +49,17 @@ type VppProxySuite struct {
 
 var vppProxyTests = map[string][]func(s *VppProxySuite){}
 var vppProxySoloTests = map[string][]func(s *VppProxySuite){}
+var vppProxyMWTests = map[string][]func(s *VppProxySuite){}
 
 func RegisterVppProxyTests(tests ...func(s *VppProxySuite)) {
        vppProxyTests[GetTestFilename()] = tests
 }
-
 func RegisterVppProxySoloTests(tests ...func(s *VppProxySuite)) {
        vppProxySoloTests[GetTestFilename()] = tests
 }
+func RegisterVppProxyMWTests(tests ...func(s *VppProxySuite)) {
+       vppProxyMWTests[GetTestFilename()] = tests
+}
 
 func (s *VppProxySuite) SetupSuite() {
        s.HstSuite.SetupSuite()
@@ -313,6 +316,35 @@ var _ = Describe("VppProxySuiteSolo", Ordered, ContinueOnFailure, Serial, func()
        }
 })
 
+var _ = Describe("VppProxyMWSuite", Ordered, ContinueOnFailure, Serial, func() {
+       var s VppProxySuite
+       BeforeAll(func() {
+               s.SetupSuite()
+       })
+       BeforeEach(func() {
+               s.SkipIfNotEnoguhCpus = true
+       })
+       AfterAll(func() {
+               s.TeardownSuite()
+       })
+       AfterEach(func() {
+               s.TeardownTest()
+       })
+
+       for filename, tests := range vppProxyMWTests {
+               for _, test := range tests {
+                       test := test
+                       pc := reflect.ValueOf(test).Pointer()
+                       funcValue := runtime.FuncForPC(pc)
+                       testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+                       It(testName, Label("SOLO", "VPP Multi-Worker"), func(ctx SpecContext) {
+                               s.Log(testName + ": BEGIN")
+                               test(&s)
+                       }, SpecTimeout(TestTimeout))
+               }
+       }
+})
+
 var _ = Describe("H2SpecProxySuite", Ordered, ContinueOnFailure, func() {
        var s VppProxySuite
        BeforeAll(func() {
@@ -379,5 +411,4 @@ var _ = Describe("H2SpecProxySuite", Ordered, ContinueOnFailure, func() {
                        s.AssertEqual(0, tg.FailedCount)
                }, SpecTimeout(TestTimeout))
        }
-
 })
index aad821f..7043864 100644 (file)
@@ -31,6 +31,7 @@ type VppUdpProxySuite struct {
 
 var vppUdpProxyTests = map[string][]func(s *VppUdpProxySuite){}
 var vppUdpProxySoloTests = map[string][]func(s *VppUdpProxySuite){}
+var vppUdpProxyMWTests = map[string][]func(s *VppUdpProxySuite){}
 
 func RegisterVppUdpProxyTests(tests ...func(s *VppUdpProxySuite)) {
        vppUdpProxyTests[GetTestFilename()] = tests
@@ -39,6 +40,9 @@ func RegisterVppUdpProxyTests(tests ...func(s *VppUdpProxySuite)) {
 func RegisterVppUdpProxySoloTests(tests ...func(s *VppUdpProxySuite)) {
        vppUdpProxySoloTests[GetTestFilename()] = tests
 }
+func RegisterVppUdpProxyMWTests(tests ...func(s *VppUdpProxySuite)) {
+       vppUdpProxyMWTests[GetTestFilename()] = tests
+}
 
 func (s *VppUdpProxySuite) SetupSuite() {
        s.HstSuite.SetupSuite()
@@ -210,3 +214,32 @@ var _ = Describe("VppUdpProxySuiteSolo", Ordered, ContinueOnFailure, Serial, fun
                }
        }
 })
+
+var _ = Describe("VppUdpProxyMWSuite", Ordered, ContinueOnFailure, Serial, func() {
+       var s VppUdpProxySuite
+       BeforeAll(func() {
+               s.SetupSuite()
+       })
+       BeforeEach(func() {
+               s.SkipIfNotEnoguhCpus = true
+       })
+       AfterAll(func() {
+               s.TeardownSuite()
+       })
+       AfterEach(func() {
+               s.TeardownTest()
+       })
+
+       for filename, tests := range vppUdpProxyMWTests {
+               for _, test := range tests {
+                       test := test
+                       pc := reflect.ValueOf(test).Pointer()
+                       funcValue := runtime.FuncForPC(pc)
+                       testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+                       It(testName, Label("SOLO", "VPP Multi-Worker"), func(ctx SpecContext) {
+                               s.Log(testName + ": BEGIN")
+                               test(&s)
+                       }, SpecTimeout(TestTimeout))
+               }
+       }
+})
index 43b44a7..050a1fc 100644 (file)
@@ -169,10 +169,9 @@ func RedisBenchmarkTest(s *LdpSuite) {
                if *NConfiguredCpus == 1 {
                        cmd = "redis-benchmark -q --threads 1 -h " + serverVethAddress
                } else {
-                       cmd = "redis-benchmark -q --threads " + fmt.Sprint(*NConfiguredCpus) + "-h " + serverVethAddress
+                       cmd = "redis-benchmark -q --threads " + fmt.Sprint(s.CpusPerContainer) + "-h " + serverVethAddress
                }
                s.StartClientApp(s.Containers.ClientApp, cmd, clnCh, clnRes)
-
        }()
 
        // 4.5 minutes
index 0eed1d8..e239ccd 100644 (file)
@@ -24,28 +24,34 @@ func init() {
        RegisterVppProxyTests(VppProxyHttpGetTcpTest, VppProxyHttpGetTlsTest, VppProxyHttpPutTcpTest, VppProxyHttpPutTlsTest,
                VppConnectProxyGetTest, VppConnectProxyPutTest, VppHttpsConnectProxyGetTest, VppH2ConnectProxyGetTest,
                VppH2ConnectProxyPutTest)
-       RegisterVppProxySoloTests(VppProxyHttpGetTcpMTTest, VppProxyHttpPutTcpMTTest, VppProxyTcpIperfMTTest,
-               VppProxyUdpIperfMTTest, VppConnectProxyStressTest, VppConnectProxyStressMTTest, VppConnectProxyConnectionFailedMTTest)
+       RegisterVppProxyMWTests(VppProxyHttpGetTcpMWTest, VppProxyHttpPutTcpMWTest, VppProxyTcpIperfMWTest,
+               VppProxyUdpIperfMWTest, VppConnectProxyStressMWTest, VppConnectProxyConnectionFailedMWTest)
+       RegisterVppProxySoloTests(VppConnectProxyStressTest)
        RegisterVppUdpProxyTests(VppProxyUdpTest, VppConnectUdpProxyTest, VppConnectUdpInvalidCapsuleTest,
                VppConnectUdpUnknownCapsuleTest, VppConnectUdpClientCloseTest, VppConnectUdpInvalidTargetTest)
-       RegisterVppUdpProxySoloTests(VppProxyUdpMigrationMTTest, VppConnectUdpStressMTTest, VppConnectUdpStressTest)
+       RegisterVppUdpProxySoloTests(VppConnectUdpStressTest)
+       RegisterVppUdpProxyMWTests(VppProxyUdpMigrationMWTest, VppConnectUdpStressMWTest)
        RegisterEnvoyProxyTests(EnvoyHttpGetTcpTest, EnvoyHttpPutTcpTest)
        RegisterNginxProxySoloTests(NginxMirroringTest, MirrorMultiThreadTest)
 }
 
-func VppProxyHttpGetTcpMTTest(s *VppProxySuite) {
+func VppProxyHttpGetTcpMWTest(s *VppProxySuite) {
+       s.CpusPerVppContainer = 3
+       s.SetupTest()
        VppProxyHttpGetTcpTest(s)
 }
 
-func VppProxyTcpIperfMTTest(s *VppProxySuite) {
-       vppProxyIperfMTTest(s, "tcp")
+func VppProxyTcpIperfMWTest(s *VppProxySuite) {
+       vppProxyIperfMWTest(s, "tcp")
 }
 
-func VppProxyUdpIperfMTTest(s *VppProxySuite) {
-       vppProxyIperfMTTest(s, "udp")
+func VppProxyUdpIperfMWTest(s *VppProxySuite) {
+       vppProxyIperfMWTest(s, "udp")
 }
 
-func vppProxyIperfMTTest(s *VppProxySuite, proto string) {
+func vppProxyIperfMWTest(s *VppProxySuite, proto string) {
+       s.CpusPerVppContainer = 3
+       s.SetupTest()
        s.Containers.IperfC.Run()
        s.Containers.IperfS.Run()
        vppProxy := s.Containers.VppProxy.VppInstance
@@ -110,7 +116,9 @@ func VppProxyHttpGetTlsTest(s *VppProxySuite) {
        s.CurlDownloadResource(uri)
 }
 
-func VppProxyHttpPutTcpMTTest(s *VppProxySuite) {
+func VppProxyHttpPutTcpMWTest(s *VppProxySuite) {
+       s.CpusPerVppContainer = 3
+       s.SetupTest()
        VppProxyHttpPutTcpTest(s)
 }
 
@@ -187,7 +195,9 @@ func VppH2ConnectProxyGetTest(s *VppProxySuite) {
        s.AssertContains(log, "CONNECT tunnel: HTTP/2 negotiated")
 }
 
-func VppConnectProxyConnectionFailedMTTest(s *VppProxySuite) {
+func VppConnectProxyConnectionFailedMWTest(s *VppProxySuite) {
+       s.CpusPerVppContainer = 3
+       s.SetupTest()
        s.SetupNginxServer()
        s.ConfigureVppProxy("http", s.Ports.Proxy)
 
@@ -337,7 +347,9 @@ func VppConnectProxyStressTest(s *VppProxySuite) {
        vppConnectProxyStressLoad(s, strconv.Itoa(int(s.Ports.Proxy)))
 }
 
-func VppConnectProxyStressMTTest(s *VppProxySuite) {
+func VppConnectProxyStressMWTest(s *VppProxySuite) {
+       s.CpusPerVppContainer = 3
+       s.SetupTest()
        remoteServerConn := s.StartEchoServer()
        defer remoteServerConn.Close()
 
@@ -372,7 +384,9 @@ func VppProxyUdpTest(s *VppUdpProxySuite) {
        s.AssertEqual([]byte("hello"), b[:n])
 }
 
-func VppProxyUdpMigrationMTTest(s *VppUdpProxySuite) {
+func VppProxyUdpMigrationMWTest(s *VppUdpProxySuite) {
+       s.CpusPerVppContainer = 3
+       s.SetupTest()
        remoteServerConn := s.StartEchoServer()
        defer remoteServerConn.Close()
 
@@ -645,7 +659,9 @@ func VppConnectUdpStressTest(s *VppUdpProxySuite) {
        vppConnectUdpStressLoad(s)
 }
 
-func VppConnectUdpStressMTTest(s *VppUdpProxySuite) {
+func VppConnectUdpStressMWTest(s *VppUdpProxySuite) {
+       s.CpusPerVppContainer = 3
+       s.SetupTest()
        remoteServerConn := s.StartEchoServer()
        defer remoteServerConn.Close()