CPUS=1
endif
+ifeq ($(VPP_CPUS),)
+VPP_CPUS=1
+endif
+
ifeq ($(PARALLEL),)
PARALLEL=1
endif
@echo " DEBUG=[true|false] - attach VPP to GDB"
@echo " TEST=[name1,name2...] - specific test(s) to run"
@echo " SKIP=[name1,name2...] - specific test(s) to skip"
- @echo " CPUS=[n-cpus] - number of cpus to allocate to VPP and containers"
+ @echo " CPUS=[n] - number of cpus to allocate to each non-VPP container (default = 1)"
+ @echo " VPP_CPUS=[n] - number of cpus to allocate to each VPP container (default = 1)"
@echo " VPPSRC=[path-to-vpp-src] - path to vpp source files (for gdb)"
- @echo " PARALLEL=[n-cpus] - number of test processes to spawn to run in parallel"
+ @echo " PARALLEL=[n] - number of test processes to spawn to run in parallel"
@echo " REPEAT=[n] - repeat tests up to N times or until a failure occurs"
@echo " CPU0=[true|false] - use cpu0"
@echo " DRYRUN=[true|false] - set up containers but don't run tests"
@bash ./hs_test.sh --persist=$(PERSIST) --verbose=$(VERBOSE) \
--unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST) --cpus=$(CPUS) \
--vppsrc=$(VPPSRC) --parallel=$(PARALLEL) --repeat=$(REPEAT) --cpu0=$(CPU0) \
- --dryrun=$(DRYRUN) --skip=$(SKIP) --no_color=$(NO_COLOR) --timeout=$(TIMEOUT); \
+ --dryrun=$(DRYRUN) --skip=$(SKIP) --no_color=$(NO_COLOR) --timeout=$(TIMEOUT) \
+ --vpp_cpus=$(VPP_CPUS); \
./script/compress.sh $$?
.PHONY: test-debug
@bash ./hs_test.sh --persist=$(PERSIST) --verbose=$(VERBOSE) \
--unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST) --cpus=$(CPUS) \
--vppsrc=$(VPPSRC) --parallel=$(PARALLEL) --repeat=$(REPEAT) --debug_build=true \
- --cpu0=$(CPU0) --dryrun=$(DRYRUN) --skip=$(SKIP) --no_color=$(NO_COLOR) --timeout=$(TIMEOUT); \
+ --cpu0=$(CPU0) --dryrun=$(DRYRUN) --skip=$(SKIP) --no_color=$(NO_COLOR) --timeout=$(TIMEOUT) \
+ --vpp_cpus=$(VPP_CPUS); \
./script/compress.sh $$?
.PHONY: wipe-lcov
-@bash ./hs_test.sh --coverage=true --persist=$(PERSIST) --verbose=$(VERBOSE) \
--unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST-HS) --cpus=$(CPUS) \
--vppsrc=$(VPPSRC) --cpu0=$(CPU0) --dryrun=$(DRYRUN) --skip=$(SKIP) --no_color=$(NO_COLOR) \
- --timeout=$(TIMEOUT); \
+ --timeout=$(TIMEOUT) --vpp_cpus=$(VPP_CPUS); \
./script/compress.sh $$?
$(MAKE) -C ../.. test-cov-post-standalone HS_TEST=1
.PHONY: test-leak
test-leak: .deps.ok .build_debug.ok
- @bash ./hs_test.sh --test=$(TEST) --debug_build=true --leak_check=true --vppsrc=$(VPPSRC) --timeout=$(TIMEOUT)
+ @bash ./hs_test.sh --test=$(TEST) --debug_build=true --leak_check=true --vppsrc=$(VPPSRC) --timeout=$(TIMEOUT) \
+ --vpp_cpus=$(VPP_CPUS);
.PHONY: test-perf
test-perf: FORCE_BUILD=false
Assumed are two docker containers, each with its own VPP instance running. One VPP then pings the other.
This can be put in file ``extras/hs-test/my_test.go`` and run with command ``make test TEST=MyTest``.
-To add a multi-worker test, name it ``[name]MTTest``. Doing this, the framework will allocate 3 CPUs to a VPP container, no matter what ``CPUS`` is set to.
-Only a single multi-worker VPP container is supported for now. Please register multi-worker tests as Solo tests to avoid reusing the same cores
-when running in parallel.
+To add a multi-worker test, register it to a multi-worker suite. The suite *cannot* have ``s.SetupTest()`` in the ``BeforeEach`` block.
+Set your desired core counts using ``s.CpusPerContainer`` and/or ``s.CpusPerVppContainer`` and run ``s.SetupTest()`` at the beginning of your test.
::
func init(){
RegisterMySuiteTests(MyTest)
- RegisterSoloMySuiteTests(MyMTTest)
}
func MyMTTest(s *MySuite){
--cpus=*)
args="$args -cpus ${i#*=}"
;;
+ --vpp_cpus=*)
+ args="$args -vpp_cpus ${i#*=}"
+ ;;
--vppsrc=*)
args="$args -vppsrc ${i#*=}"
;;
func init() {
RegisterH2Tests(Http2TcpGetTest, Http2TcpPostTest, Http2MultiplexingTest, Http2TlsTest, Http2ContinuationTxTest)
- RegisterH2SoloTests(Http2MultiplexingMTTest)
+ RegisterH2MWTests(Http2MultiplexingMWTest)
}
func Http2TcpGetTest(s *H2Suite) {
s.AssertContains(o, " 0 timeout")
}
-func Http2MultiplexingMTTest(s *H2Suite) {
+func Http2MultiplexingMWTest(s *H2Suite) {
+ s.CpusPerVppContainer = 3
+ s.SetupTest()
vpp := s.Containers.Vpp.VppInstance
serverAddress := s.VppAddr() + ":" + s.Ports.Port1
vpp.Vppctl("http tps uri tcp://" + serverAddress + " no-zc")
HttpStaticRedirectTest)
RegisterNoTopoSoloTests(HttpStaticPromTest, HttpGetTpsTest, HttpGetTpsInterruptModeTest, PromConcurrentConnectionsTest,
PromMemLeakTest, HttpClientPostMemLeakTest, HttpInvalidClientRequestMemLeakTest, HttpPostTpsTest, HttpPostTpsInterruptModeTest,
- PromConsecutiveConnectionsTest, HttpGetTpsTlsTest, HttpPostTpsTlsTest, HttpClientGetRepeatMTTest, HttpClientPtrGetRepeatMTTest)
+ PromConsecutiveConnectionsTest, HttpGetTpsTlsTest, HttpPostTpsTlsTest)
+ RegisterNoTopoMWTests(HttpClientGetRepeatMWTest, HttpClientPtrGetRepeatMWTest)
RegisterNoTopo6SoloTests(HttpClientGetResponseBody6Test, HttpClientGetTlsResponseBody6Test)
}
s.AssertContains(file_contents, response)
}
-func HttpClientGetRepeatMTTest(s *NoTopoSuite) {
+func HttpClientGetRepeatMWTest(s *NoTopoSuite) {
+ s.CpusPerVppContainer = 3
+ s.SetupTest()
httpClientRepeat(s, "", "sessions 2")
}
-func HttpClientPtrGetRepeatMTTest(s *NoTopoSuite) {
+func HttpClientPtrGetRepeatMWTest(s *NoTopoSuite) {
+ s.CpusPerVppContainer = 3
+ s.SetupTest()
httpClientRepeat(s, "", "use-ptr sessions 2")
}
"strings"
. "fd.io/hs-test/infra/common"
- . "github.com/onsi/ginkgo/v2"
)
var CgroupPath = "/sys/fs/cgroup/"
}
type CpuAllocatorT struct {
- cpus []int
- maxContainerCount int
+ cpus []int
+ numa0 []int
+ numa1 []int
+ lastCpu int
+ suite *HstSuite
}
func iterateAndAppend(start int, end int, slice []int) []int {
var cpuAllocator *CpuAllocatorT = nil
-func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*CpuContext, error) {
+func (c *CpuAllocatorT) Allocate(nCpus int, offset int) (*CpuContext, error) {
var cpuCtx CpuContext
// indexes, not actual cores
var minCpu, maxCpu int
- minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset
- maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset
+ minCpu = offset
+ maxCpu = nCpus - 1 + offset
if len(c.cpus)-1 < maxCpu {
- err := fmt.Errorf("could not allocate %d CPUs; available count: %d; attempted to allocate cores with index %d-%d; max index: %d;\n"+
- "available cores: %v", nCpus*containerCount, len(c.cpus), minCpu, maxCpu, len(c.cpus)-1, c.cpus)
+ msg := fmt.Sprintf("could not allocate %d CPUs; available count: %d; attempted to allocate cores with index %d-%d; max index: %d;\n"+
+ "available cores: %v", nCpus, len(c.cpus), minCpu, maxCpu, len(c.cpus)-1, c.cpus)
+ if c.suite.SkipIfNotEnoguhCpus {
+ c.suite.Skip("skipping: " + msg)
+ }
+ err := fmt.Errorf(msg)
return nil, err
}
- if containerCount == 1 {
- cpuCtx.cpus = c.cpus[minCpu : minCpu+nCpus]
- } else if containerCount > 1 && containerCount <= c.maxContainerCount {
- cpuCtx.cpus = c.cpus[minCpu+(nCpus*(containerCount-1)) : minCpu+(nCpus*containerCount)]
+ if NumaAwareCpuAlloc {
+ if len(c.numa0) > maxCpu {
+ c.suite.Log("Allocating CPUs from numa #0")
+ cpuCtx.cpus = c.numa0[minCpu : minCpu+nCpus]
+ } else if len(c.numa1) > maxCpu {
+ c.suite.Log("Allocating CPUs from numa #1")
+ cpuCtx.cpus = c.numa1[minCpu : minCpu+nCpus]
+ } else {
+ err := fmt.Errorf("could not allocate %d CPUs; not enough CPUs in either numa node", nCpus)
+ return nil, err
+ }
} else {
- return nil, fmt.Errorf("too many containers; CPU allocation for >%d containers is not implemented", c.maxContainerCount)
+ cpuCtx.cpus = c.cpus[minCpu : minCpu+nCpus]
}
+
+ c.lastCpu = minCpu + nCpus
cpuCtx.cpuAllocator = c
return &cpuCtx, nil
}
tmpCpus = tmpCpus[1:]
}
- // make c.cpus divisible by maxContainerCount * nCpus, so we don't have to check which numa will be used
- // and we can use offsets
- countToRemove := len(tmpCpus) % (c.maxContainerCount * *NConfiguredCpus)
- if countToRemove >= len(tmpCpus) {
- return fmt.Errorf("requested too many CPUs per container (%d), should be no more "+
- "than %d", *NConfiguredCpus, len(tmpCpus)/c.maxContainerCount)
+ c.cpus = append(c.cpus, tmpCpus...)
+ if i == 0 {
+ c.numa0 = append(c.numa0, tmpCpus...)
+ } else {
+ c.numa1 = append(c.numa1, tmpCpus...)
}
- c.cpus = append(c.cpus, tmpCpus[:len(tmpCpus)-countToRemove]...)
tmpCpus = tmpCpus[:0]
}
} else {
if cpuAllocator == nil {
var err error
cpuAllocator = new(CpuAllocatorT)
- cpuAllocator.maxContainerCount = 4
err = cpuAllocator.readCpus()
if err != nil {
return nil, err
)
var IsUnconfiguring = flag.Bool("unconfigure", false, "remove topology")
-var NConfiguredCpus = flag.Int("cpus", 1, "number of CPUs assigned to vpp")
+var NConfiguredCpus = flag.Int("cpus", 1, "number of CPUs assigned to non-vpp containers")
+var NConfiguredVppCpus = flag.Int("vpp_cpus", 1, "number of CPUs assigned to vpp containers")
var VppSourceFileDir = flag.String("vppsrc", "", "vpp source file directory")
var IsDebugBuild = flag.Bool("debug_build", false, "some paths are different with debug build")
var UseCpu0 = flag.Bool("cpu0", false, "use cpu0")
type HstSuite struct {
HstCommon
- AllContainers map[string]*Container
- StartedContainers []*Container
- NetConfigs []NetConfig
- NetInterfaces map[string]*NetInterface
- Ip4AddrAllocator *Ip4AddressAllocator
- Ip6AddrAllocator *Ip6AddressAllocator
- TestIds map[string]string
- CpuAllocator *CpuAllocatorT
- CpuContexts []*CpuContext
- CpuCount int
- Docker *client.Client
- CoverageRun bool
- numOfNewPorts int
+ AllContainers map[string]*Container
+ StartedContainers []*Container
+ NetConfigs []NetConfig
+ NetInterfaces map[string]*NetInterface
+ Ip4AddrAllocator *Ip4AddressAllocator
+ Ip6AddrAllocator *Ip6AddressAllocator
+ TestIds map[string]string
+ CpuAllocator *CpuAllocatorT
+ CpuContexts []*CpuContext
+ CpusPerContainer int
+ CpusPerVppContainer int
+ Docker *client.Client
+ CoverageRun bool
+ numOfNewPorts int
+ SkipIfNotEnoguhCpus bool
}
type colors struct {
var err error
s.CpuAllocator, err = CpuAllocator()
+ s.CpuAllocator.suite = s
if err != nil {
Fail("failed to init cpu allocator: " + fmt.Sprint(err))
}
- s.CpuCount = *NConfiguredCpus
+ s.CpusPerContainer = *NConfiguredCpus
+ s.CpusPerVppContainer = *NConfiguredVppCpus
s.CoverageRun = *IsCoverage
}
func (s *HstSuite) AllocateCpus(containerName string) []int {
var cpuCtx *CpuContext
var err error
- currentTestName := CurrentSpecReport().LeafNodeText
-
- if strings.Contains(currentTestName, "MTTest") {
- prevContainerCount := s.CpuAllocator.maxContainerCount
- if strings.Contains(containerName, "vpp") {
- // CPU range is assigned based on the Ginkgo process index (or build number if
- // running in the CI), *NConfiguredCpus and a maxContainerCount.
- // maxContainerCount is set to 4 when CpuAllocator is initialized.
- // 4 is not a random number - all of our suites use a maximum of 4 containers simultaneously,
- // and it's also the maximum number of containers we can run with *NConfiguredCpus=2 (with CPU0=true)
- // on processors with 8 threads. Currently, the CpuAllocator puts all cores into a slice,
- // makes the length of the slice divisible by 4x*NConfiguredCpus, and then the minCpu and
- // maxCpu (range) for each container is calculated. Then we just offset based on minCpu,
- // the number of started containers and *NConfiguredCpus. This way, every container
- // uses the correct CPUs, even if multiple NUMA nodes are available.
- // However, because of this, if we want to assign different number of cores to different containers,
- // we have to change maxContainerCount to manipulate the CPU range. Hopefully a temporary workaround.
- s.CpuAllocator.maxContainerCount = 1
- cpuCtx, err = s.CpuAllocator.Allocate(1, 3, 0)
- } else {
- s.CpuAllocator.maxContainerCount = 3
- cpuCtx, err = s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount, 2)
- }
- s.CpuAllocator.maxContainerCount = prevContainerCount
+
+ if strings.Contains(containerName, "vpp") {
+ // CPUs are allocated based on s.CpusPerVppContainer/s.CpusPerContainer (defaults can be overridden globally
+ // or per test) and 'lastCpu' which serves as an offset. 'lastCpu' is incremented by 4 for each
+ // GinkgoParallelProcess() in SetupTest() in hst_suite, because all suites use 4 containers
+ // at most with 1 CPU each. GinkgoParallelProcess() offset doesn't impact MW or solo tests.
+ // Numa aware cpu allocation will use the second numa
+ // node if a container doesn't "fit" into the first node.
+ cpuCtx, err = s.CpuAllocator.Allocate(s.CpusPerVppContainer, s.CpuAllocator.lastCpu)
} else {
- cpuCtx, err = s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount, 0)
+ cpuCtx, err = s.CpuAllocator.Allocate(s.CpusPerContainer, s.CpuAllocator.lastCpu)
}
s.AssertNil(err)
func (s *HstSuite) TeardownTest() {
s.HstCommon.TeardownTest()
+ s.SkipIfNotEnoguhCpus = false
+ // reset to defaults
+ s.CpusPerContainer = *NConfiguredCpus
+ s.CpusPerVppContainer = *NConfiguredVppCpus
coreDump := s.WaitForCoreDump()
s.ResetContainers()
func (s *HstSuite) SetupTest() {
s.HstCommon.SetupTest()
+ // doesn't impact MW/solo tests
+ s.CpuAllocator.lastCpu = (GinkgoParallelProcess() - 1) * 4
s.StartedContainers = s.StartedContainers[:0]
s.SkipIfUnconfiguring()
s.SetupContainers()
}
}
-func (s *HstSuite) SkipIfNotEnoughAvailableCpus() {
- var maxRequestedCpu int
- availableCpus := len(s.CpuAllocator.cpus) - 1
-
- if *UseCpu0 {
- availableCpus++
- }
-
- maxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount)
-
- if availableCpus < maxRequestedCpu {
- s.Skip(fmt.Sprintf("Test case cannot allocate requested cpus "+
- "(%d containers * %d cpus, %d available). Try using 'CPU0=true'",
- s.CpuAllocator.maxContainerCount, s.CpuCount, availableCpus))
- }
-}
-
func (s *HstSuite) SkipUnlessLeakCheck() {
if !*IsLeakCheck {
s.Skip("leak-check tests excluded")
}
func (s *HstSuite) ResetContainers() {
+ s.CpuAllocator.lastCpu = 0
for _, container := range s.StartedContainers {
container.stop()
s.Log("Removing container " + container.Name)
type CpuPinningSuite struct {
HstSuite
- previousMaxContainerCount int
- Interfaces struct {
+ Interfaces struct {
Tap *NetInterface
}
Containers struct {
func (s *CpuPinningSuite) SetupTest() {
// Skip if we cannot allocate 3 CPUs for test container
- s.previousMaxContainerCount = s.CpuAllocator.maxContainerCount
- s.CpuCount = 3
- s.CpuAllocator.maxContainerCount = 1
- s.SkipIfNotEnoughAvailableCpus()
+ s.CpusPerVppContainer = 3
+ s.SkipIfNotEnoguhCpus = true
s.HstSuite.SetupTest()
vpp, err := s.Containers.Vpp.newVppInstance(s.Containers.Vpp.AllocatedCpus)
func (s *CpuPinningSuite) TeardownTest() {
defer s.HstSuite.TeardownTest()
// reset vars
- s.CpuCount = *NConfiguredCpus
- s.CpuAllocator.maxContainerCount = s.previousMaxContainerCount
+ s.CpusPerContainer = *NConfiguredCpus
}
var _ = Describe("CpuPinningSuite", Ordered, ContinueOnFailure, func() {
var h2Tests = map[string][]func(s *H2Suite){}
var h2SoloTests = map[string][]func(s *H2Suite){}
+var h2MWTests = map[string][]func(s *H2Suite){}
type H2Suite struct {
HstSuite
func RegisterH2SoloTests(tests ...func(s *H2Suite)) {
h2SoloTests[GetTestFilename()] = tests
}
+func RegisterH2MWTests(tests ...func(s *H2Suite)) {
+ h2MWTests[GetTestFilename()] = tests
+}
func (s *H2Suite) SetupSuite() {
s.HstSuite.SetupSuite()
}
})
+var _ = Describe("Http2MWSuite", Ordered, ContinueOnFailure, Serial, func() {
+ var s H2Suite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SkipIfNotEnoguhCpus = true
+ })
+ AfterAll(func() {
+ s.TeardownSuite()
+ })
+ AfterEach(func() {
+ s.TeardownTest()
+ })
+
+ for filename, tests := range h2MWTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO", "VPP Multi-Worker"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(TestTimeout))
+ }
+ }
+})
+
type h2specTest struct {
desc string
}
o := <-oChan
s.Log(o)
s.AssertEqual(0, tg.FailedCount)
- })
+ }, SpecTimeout(TestTimeout))
}
}
})
var ldpTests = map[string][]func(s *LdpSuite){}
var ldpSoloTests = map[string][]func(s *LdpSuite){}
+var ldpMWTests = map[string][]func(s *LdpSuite){}
type LdpSuite struct {
HstSuite
func RegisterSoloLdpTests(tests ...func(s *LdpSuite)) {
ldpSoloTests[GetTestFilename()] = tests
}
+func RegisterLdpMWTests(tests ...func(s *LdpSuite)) {
+ ldpMWTests[GetTestFilename()] = tests
+}
func (s *LdpSuite) SetupSuite() {
time.Sleep(1 * time.Second)
}
}
})
+
+var _ = Describe("LdpMWSuite", Ordered, ContinueOnFailure, Serial, func() {
+ var s LdpSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SkipIfNotEnoguhCpus = true
+ })
+ AfterAll(func() {
+ s.TeardownSuite()
+ })
+ AfterEach(func() {
+ s.TeardownTest()
+ })
+
+ // https://onsi.github.io/ginkgo/#dynamically-generating-specs
+ for filename, tests := range ldpMWTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO", "VPP Multi-Worker"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(TestTimeout))
+ }
+ }
+})
var noTopoTests = map[string][]func(s *NoTopoSuite){}
var noTopoSoloTests = map[string][]func(s *NoTopoSuite){}
+var noTopoMWTests = map[string][]func(s *NoTopoSuite){}
type NoTopoSuite struct {
HstSuite
func RegisterNoTopoSoloTests(tests ...func(s *NoTopoSuite)) {
noTopoSoloTests[GetTestFilename()] = tests
}
+func RegisterNoTopoMWTests(tests ...func(s *NoTopoSuite)) {
+ noTopoMWTests[GetTestFilename()] = tests
+}
func (s *NoTopoSuite) SetupSuite() {
s.HstSuite.SetupSuite()
}
}
})
+
+var _ = Describe("NoTopoMWSuite", Ordered, ContinueOnFailure, Serial, func() {
+ var s NoTopoSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SkipIfNotEnoguhCpus = true
+ })
+ AfterAll(func() {
+ s.TeardownSuite()
+ })
+ AfterEach(func() {
+ s.TeardownTest()
+ })
+
+ for filename, tests := range noTopoMWTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO", "VPP Multi-Worker"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(TestTimeout))
+ }
+ }
+})
var vppProxyTests = map[string][]func(s *VppProxySuite){}
var vppProxySoloTests = map[string][]func(s *VppProxySuite){}
+var vppProxyMWTests = map[string][]func(s *VppProxySuite){}
func RegisterVppProxyTests(tests ...func(s *VppProxySuite)) {
vppProxyTests[GetTestFilename()] = tests
}
-
func RegisterVppProxySoloTests(tests ...func(s *VppProxySuite)) {
vppProxySoloTests[GetTestFilename()] = tests
}
+func RegisterVppProxyMWTests(tests ...func(s *VppProxySuite)) {
+ vppProxyMWTests[GetTestFilename()] = tests
+}
func (s *VppProxySuite) SetupSuite() {
s.HstSuite.SetupSuite()
}
})
+var _ = Describe("VppProxyMWSuite", Ordered, ContinueOnFailure, Serial, func() {
+ var s VppProxySuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SkipIfNotEnoguhCpus = true
+ })
+ AfterAll(func() {
+ s.TeardownSuite()
+ })
+ AfterEach(func() {
+ s.TeardownTest()
+ })
+
+ for filename, tests := range vppProxyMWTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO", "VPP Multi-Worker"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(TestTimeout))
+ }
+ }
+})
+
var _ = Describe("H2SpecProxySuite", Ordered, ContinueOnFailure, func() {
var s VppProxySuite
BeforeAll(func() {
s.AssertEqual(0, tg.FailedCount)
}, SpecTimeout(TestTimeout))
}
-
})
var vppUdpProxyTests = map[string][]func(s *VppUdpProxySuite){}
var vppUdpProxySoloTests = map[string][]func(s *VppUdpProxySuite){}
+var vppUdpProxyMWTests = map[string][]func(s *VppUdpProxySuite){}
func RegisterVppUdpProxyTests(tests ...func(s *VppUdpProxySuite)) {
vppUdpProxyTests[GetTestFilename()] = tests
func RegisterVppUdpProxySoloTests(tests ...func(s *VppUdpProxySuite)) {
vppUdpProxySoloTests[GetTestFilename()] = tests
}
+func RegisterVppUdpProxyMWTests(tests ...func(s *VppUdpProxySuite)) {
+ vppUdpProxyMWTests[GetTestFilename()] = tests
+}
func (s *VppUdpProxySuite) SetupSuite() {
s.HstSuite.SetupSuite()
}
}
})
+
+var _ = Describe("VppUdpProxyMWSuite", Ordered, ContinueOnFailure, Serial, func() {
+ var s VppUdpProxySuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SkipIfNotEnoguhCpus = true
+ })
+ AfterAll(func() {
+ s.TeardownSuite()
+ })
+ AfterEach(func() {
+ s.TeardownTest()
+ })
+
+ for filename, tests := range vppUdpProxyMWTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO", "VPP Multi-Worker"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(TestTimeout))
+ }
+ }
+})
if *NConfiguredCpus == 1 {
cmd = "redis-benchmark -q --threads 1 -h " + serverVethAddress
} else {
- cmd = "redis-benchmark -q --threads " + fmt.Sprint(*NConfiguredCpus) + "-h " + serverVethAddress
+ cmd = "redis-benchmark -q --threads " + fmt.Sprint(s.CpusPerContainer) + "-h " + serverVethAddress
}
s.StartClientApp(s.Containers.ClientApp, cmd, clnCh, clnRes)
-
}()
// 4.5 minutes
RegisterVppProxyTests(VppProxyHttpGetTcpTest, VppProxyHttpGetTlsTest, VppProxyHttpPutTcpTest, VppProxyHttpPutTlsTest,
VppConnectProxyGetTest, VppConnectProxyPutTest, VppHttpsConnectProxyGetTest, VppH2ConnectProxyGetTest,
VppH2ConnectProxyPutTest)
- RegisterVppProxySoloTests(VppProxyHttpGetTcpMTTest, VppProxyHttpPutTcpMTTest, VppProxyTcpIperfMTTest,
- VppProxyUdpIperfMTTest, VppConnectProxyStressTest, VppConnectProxyStressMTTest, VppConnectProxyConnectionFailedMTTest)
+ RegisterVppProxyMWTests(VppProxyHttpGetTcpMWTest, VppProxyHttpPutTcpMWTest, VppProxyTcpIperfMWTest,
+ VppProxyUdpIperfMWTest, VppConnectProxyStressMWTest, VppConnectProxyConnectionFailedMWTest)
+ RegisterVppProxySoloTests(VppConnectProxyStressTest)
RegisterVppUdpProxyTests(VppProxyUdpTest, VppConnectUdpProxyTest, VppConnectUdpInvalidCapsuleTest,
VppConnectUdpUnknownCapsuleTest, VppConnectUdpClientCloseTest, VppConnectUdpInvalidTargetTest)
- RegisterVppUdpProxySoloTests(VppProxyUdpMigrationMTTest, VppConnectUdpStressMTTest, VppConnectUdpStressTest)
+ RegisterVppUdpProxySoloTests(VppConnectUdpStressTest)
+ RegisterVppUdpProxyMWTests(VppProxyUdpMigrationMWTest, VppConnectUdpStressMWTest)
RegisterEnvoyProxyTests(EnvoyHttpGetTcpTest, EnvoyHttpPutTcpTest)
RegisterNginxProxySoloTests(NginxMirroringTest, MirrorMultiThreadTest)
}
-func VppProxyHttpGetTcpMTTest(s *VppProxySuite) {
+func VppProxyHttpGetTcpMWTest(s *VppProxySuite) {
+ s.CpusPerVppContainer = 3
+ s.SetupTest()
VppProxyHttpGetTcpTest(s)
}
-func VppProxyTcpIperfMTTest(s *VppProxySuite) {
- vppProxyIperfMTTest(s, "tcp")
+func VppProxyTcpIperfMWTest(s *VppProxySuite) {
+ vppProxyIperfMWTest(s, "tcp")
}
-func VppProxyUdpIperfMTTest(s *VppProxySuite) {
- vppProxyIperfMTTest(s, "udp")
+func VppProxyUdpIperfMWTest(s *VppProxySuite) {
+ vppProxyIperfMWTest(s, "udp")
}
-func vppProxyIperfMTTest(s *VppProxySuite, proto string) {
+func vppProxyIperfMWTest(s *VppProxySuite, proto string) {
+ s.CpusPerVppContainer = 3
+ s.SetupTest()
s.Containers.IperfC.Run()
s.Containers.IperfS.Run()
vppProxy := s.Containers.VppProxy.VppInstance
s.CurlDownloadResource(uri)
}
-func VppProxyHttpPutTcpMTTest(s *VppProxySuite) {
+func VppProxyHttpPutTcpMWTest(s *VppProxySuite) {
+ s.CpusPerVppContainer = 3
+ s.SetupTest()
VppProxyHttpPutTcpTest(s)
}
s.AssertContains(log, "CONNECT tunnel: HTTP/2 negotiated")
}
-func VppConnectProxyConnectionFailedMTTest(s *VppProxySuite) {
+func VppConnectProxyConnectionFailedMWTest(s *VppProxySuite) {
+ s.CpusPerVppContainer = 3
+ s.SetupTest()
s.SetupNginxServer()
s.ConfigureVppProxy("http", s.Ports.Proxy)
vppConnectProxyStressLoad(s, strconv.Itoa(int(s.Ports.Proxy)))
}
-func VppConnectProxyStressMTTest(s *VppProxySuite) {
+func VppConnectProxyStressMWTest(s *VppProxySuite) {
+ s.CpusPerVppContainer = 3
+ s.SetupTest()
remoteServerConn := s.StartEchoServer()
defer remoteServerConn.Close()
s.AssertEqual([]byte("hello"), b[:n])
}
-func VppProxyUdpMigrationMTTest(s *VppUdpProxySuite) {
+func VppProxyUdpMigrationMWTest(s *VppUdpProxySuite) {
+ s.CpusPerVppContainer = 3
+ s.SetupTest()
remoteServerConn := s.StartEchoServer()
defer remoteServerConn.Close()
vppConnectUdpStressLoad(s)
}
-func VppConnectUdpStressMTTest(s *VppUdpProxySuite) {
+func VppConnectUdpStressMWTest(s *VppUdpProxySuite) {
+ s.CpusPerVppContainer = 3
+ s.SetupTest()
remoteServerConn := s.StartEchoServer()
defer remoteServerConn.Close()