hs-test: simplify core allocation 56/42656/2
authorAdrian Villin <[email protected]>
Wed, 2 Apr 2025 17:01:17 +0000 (19:01 +0200)
committerFlorin Coras <[email protected]>
Thu, 3 Apr 2025 07:01:16 +0000 (07:01 +0000)
- no longer distinguishing between release and debug build in the CI
- core allocation will always start from core 1 (or 0 if CPU0=true)

Type: improvement

Change-Id: I4568bda01bd90fba14ca81f6669bdab3b7521415
Signed-off-by: Adrian Villin <[email protected]>
extras/hs-test/framework_test.go
extras/hs-test/infra/cpu.go
extras/hs-test/infra/hst_suite.go

index f3bf1be..be62b61 100644 (file)
@@ -33,6 +33,8 @@ func TestHst(t *testing.T) {
                TestTimeout = time.Minute * 5
        }
 
+       RunningInCi = os.Getenv("BUILD_NUMBER") != ""
+
        output, err := os.ReadFile("/sys/devices/system/node/online")
        if err == nil && strings.Contains(string(output), "-") {
                NumaAwareCpuAlloc = true
index 743a4ed..4afc96b 100644 (file)
@@ -6,7 +6,6 @@ import (
        "fmt"
        "os"
        "os/exec"
-       "strconv"
        "strings"
 
        . "github.com/onsi/ginkgo/v2"
@@ -21,8 +20,6 @@ type CpuContext struct {
 
 type CpuAllocatorT struct {
        cpus              []int
-       runningInCi       bool
-       buildNumber       int
        maxContainerCount int
 }
 
@@ -40,13 +37,8 @@ func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*Cp
        // indexes, not actual cores
        var minCpu, maxCpu int
 
-       if c.runningInCi {
-               minCpu = ((c.buildNumber) * c.maxContainerCount * nCpus) + offset
-               maxCpu = ((c.buildNumber + 1) * c.maxContainerCount * nCpus) - 1 + offset
-       } else {
-               minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset
-               maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset
-       }
+       minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset
+       maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset
 
        if len(c.cpus)-1 < maxCpu {
                err := fmt.Errorf("could not allocate %d CPUs; available count: %d; attempted to allocate cores with index %d-%d; max index: %d;\n"+
@@ -66,33 +58,9 @@ func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*Cp
 }
 
 func (c *CpuAllocatorT) readCpus() error {
-       var first, second, third, fourth int
-       var file *os.File
-       var err error
-
-       if c.runningInCi {
-               // non-debug build runs on node0, debug on node1
-               if *IsDebugBuild {
-                       file, err = os.Open("/sys/devices/system/node/node1/cpulist")
-               } else {
-                       file, err = os.Open("/sys/devices/system/node/node0/cpulist")
-               }
-               if err != nil {
-                       return err
-               }
-               defer file.Close()
-
-               sc := bufio.NewScanner(file)
-               sc.Scan()
-               line := sc.Text()
-               _, err = fmt.Sscanf(line, "%d-%d,%d-%d", &first, &second, &third, &fourth)
-               if err != nil {
-                       return err
-               }
+       var first, second int
 
-               c.cpus = iterateAndAppend(first, second, c.cpus)
-               c.cpus = iterateAndAppend(third, fourth, c.cpus)
-       } else if NumaAwareCpuAlloc {
+       if NumaAwareCpuAlloc {
                var range1, range2 int
                var tmpCpus []int
 
@@ -124,7 +92,7 @@ func (c *CpuAllocatorT) readCpus() error {
                        line := sc.Text()
 
                        for _, coreRange := range strings.Split(line, ",") {
-                               if strings.IndexRune(coreRange, '-') != -1 {
+                               if strings.ContainsRune(coreRange, '-') {
                                        _, err = fmt.Sscanf(coreRange, "%d-%d", &range1, &range2)
                                        if err != nil {
                                                return err
@@ -148,7 +116,8 @@ func (c *CpuAllocatorT) readCpus() error {
                        // and we can use offsets
                        countToRemove := len(tmpCpus) % (c.maxContainerCount * *NConfiguredCpus)
                        if countToRemove >= len(tmpCpus) {
-                               return fmt.Errorf("requested too much CPUs per container (%d) should be no more than %d", *NConfiguredCpus, len(tmpCpus)/c.maxContainerCount)
+                               return fmt.Errorf("requested too many CPUs per container (%d), should be no more "+
+                                       "than %d", *NConfiguredCpus, len(tmpCpus)/c.maxContainerCount)
                        }
                        c.cpus = append(c.cpus, tmpCpus[:len(tmpCpus)-countToRemove]...)
                        tmpCpus = tmpCpus[:0]
@@ -200,16 +169,6 @@ func CpuAllocator() (*CpuAllocatorT, error) {
                var err error
                cpuAllocator = new(CpuAllocatorT)
                cpuAllocator.maxContainerCount = 4
-               buildNumberStr := os.Getenv("BUILD_NUMBER")
-
-               if buildNumberStr != "" {
-                       cpuAllocator.runningInCi = true
-                       // get last digit of build number
-                       cpuAllocator.buildNumber, err = strconv.Atoi(buildNumberStr[len(buildNumberStr)-1:])
-                       if err != nil {
-                               return nil, err
-                       }
-               }
                err = cpuAllocator.readCpus()
                if err != nil {
                        return nil, err
index 5ef4883..c2dfc59 100644 (file)
@@ -46,6 +46,7 @@ var ParallelTotal = flag.Lookup("ginkgo.parallel.total")
 var DryRun = flag.Bool("dryrun", false, "set up containers but don't run tests")
 var NumaAwareCpuAlloc bool
 var TestTimeout time.Duration
+var RunningInCi bool
 
 type HstSuite struct {
        AllContainers     map[string]*Container
@@ -443,11 +444,7 @@ func (s *HstSuite) SkipIfNotEnoughAvailableCpus() {
                availableCpus++
        }
 
-       if s.CpuAllocator.runningInCi {
-               maxRequestedCpu = ((s.CpuAllocator.buildNumber + 1) * s.CpuAllocator.maxContainerCount * s.CpuCount)
-       } else {
-               maxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount)
-       }
+       maxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount)
 
        if availableCpus < maxRequestedCpu {
                s.Skip(fmt.Sprintf("Test case cannot allocate requested cpus "+
@@ -516,7 +513,7 @@ func (s *HstSuite) WaitForCoreDump() bool {
                                output, _ := exechelper.Output(cmd)
                                AddReportEntry("VPP Backtrace", StringerStruct{Label: string(output)})
                                os.WriteFile(s.getLogDirPath()+"backtrace.log", output, os.FileMode(0644))
-                               if s.CpuAllocator.runningInCi {
+                               if RunningInCi {
                                        err = os.Remove(corePath)
                                        if err == nil {
                                                s.Log("removed " + corePath)