kube-test: bare metal cluster support 15/43615/9
authorAdrian Villin <[email protected]>
Wed, 24 Sep 2025 08:52:08 +0000 (04:52 -0400)
committerFlorin Coras <[email protected]>
Thu, 16 Oct 2025 16:39:52 +0000 (16:39 +0000)
- scripts/quick-import.sh simplifies loading images to nodes
- added NO_REGISTRY option to hopefully avoid issues when
  building docker images
- kube-test differentiates between a KinD and a bare metal cluster
- namespace is deleted at the very end of a test run (REPEAT=N now works)
- added SKIP_CONFIG: when set to true, cluster config won't get updated
- waiting for daemonsets/deployments is now done with WaitGroups

Type: test

Change-Id: Ia02b642062b1ce75d3e4bb2bdb408cd592660f97
Signed-off-by: Adrian Villin <[email protected]>
19 files changed:
.gitignore
test-c/kube-test/Makefile
test-c/kube-test/docker/setup-local-registry.sh
test-c/kube-test/framework_test.go
test-c/kube-test/infra/deployment.go
test-c/kube-test/infra/pod.go
test-c/kube-test/infra/suite_base.go
test-c/kube-test/infra/suite_kube.go
test-c/kube-test/infra/suite_large-mtu.go
test-c/kube-test/infra/utils.go
test-c/kube-test/kube_test.go
test-c/kube-test/kube_test.sh
test-c/kube-test/kubernetes/baremetal-calicovpp-config-template.yaml [new file with mode: 0644]
test-c/kube-test/kubernetes/kind-calicovpp-config-template.yaml [moved from test-c/kube-test/kubernetes/calico-config-template.yaml with 100% similarity]
test-c/kube-test/kubernetes/pod-definitions-template.yaml [moved from test-c/kube-test/kubernetes/pod-definitions.yaml with 85% similarity]
test-c/kube-test/script/build-images.sh
test-c/kube-test/script/build_kube.sh
test-c/kube-test/script/quick-import.sh [new file with mode: 0755]
test-c/kube-test/script/setup-cluster.sh

index 963a9da..dcff345 100644 (file)
@@ -154,7 +154,9 @@ compile_commands.json
 /test-c/kube-test/summary/
 /test-c/kube-test/.last_state_hash
 /test-c/kube-test/.kube_deps.ok
-/test-c/kube-test/kubernetes/calico-config.yaml
+/test-c/kube-test/kubernetes/baremetal-calicovpp-config.yaml
+/test-c/kube-test/kubernetes/kind-calicovpp-config.yaml
+/test-c/kube-test/kubernetes/pod-definitions.yaml
 /test-c/kube-test/kubernetes/.vars
 
 # ./configure
index 4ab065e..0e1ffa9 100644 (file)
@@ -54,8 +54,15 @@ help:
        @echo " install-kube-deps        - install software dependencies for kind cluster"
        @echo
        @echo "'make build' and 'make test' arguments:"
-       @echo " UBUNTU_VERSION           - ubuntu version for docker image"
-       @echo " FORCE_BUILD=[true|false] - force docker image building"
+       @echo " UBUNTU_VERSION                       - ubuntu version for docker image"
+       @echo " FORCE_BUILD=[true|false]             - force docker image building"
+       @echo " NO_REGISTRY=[true|false]             - attempts to build docker images without using a local registry"
+       @echo "'make test' args:"
+       @echo " SKIP_CONFIG=[true|false]             - skips cluster config update. Useful if running the same test/suite back to back"
+       @echo " when testing on a bare metal cluster:"
+       @echo " CALICOVPP_INTERFACE=[interface name] - uplink interface name for CalicoVPP"
+       @echo " CALICOVPP_VERSION=[version]          - CalicoVPP VPP and Agent versions"
+       @echo " KUBE_WRK1 and KUBE_WRK2=[name]       - cluster worker names"
        @echo
        @echo "'make test' specific arguments:"
        @echo " PERSIST=[true|false]     - whether clean up topology and dockers after test"
@@ -132,7 +139,7 @@ build-debug: .deps.ok build-vpp-debug
 
 .PHONY: install-kube-deps
 install-kube-deps: .deps.ok
-       -@if ! command -v kube >/dev/null 2>&1; then \
+       -@if ! command -v kind >/dev/null 2>&1; then \
                echo "Installing KinD"; \
                go install sigs.k8s.io/[email protected]; \
                echo "Creating symlink from '$(HOME)/go/bin/kind' to '/usr/bin/kind'"; \
@@ -162,18 +169,22 @@ install-deps:
     else \
         echo "Installing Go 1.23"; \
                wget -t 2 https://go.dev/dl/go1.23.10.linux-$(ARCH).tar.gz -O /tmp/go1.23.10.linux-$(ARCH).tar.gz && sudo tar -C /usr/local -xzf /tmp/go1.23.10.linux-$(ARCH).tar.gz; \
-               sudo ln -s /usr/local/go/bin/go /usr/bin/go ; \
+               echo "Go installed successfully. Add it to your PATH by running:"; \
+        echo "echo 'export PATH=/usr/local/go/bin:\$$PATH' >> ~/.bashrc && source ~/.bashrc"; \
        fi
-       @sudo -E apt-get update
-       @sudo -E apt-get install -y apt-transport-https ca-certificates curl software-properties-common \
-               bridge-utils gpg
-       @if [ ! -f /usr/share/keyrings/docker-archive-keyring.gpg ] ; then \
-               curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg; \
-               echo "deb [arch=$(ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(UBUNTU_CODENAME) stable" \
-                       | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null ; \
-               apt-get update; \
+       @if ! command -v docker >/dev/null 2>&1; then \
+               sudo -E apt-get update; \
+               sudo -E apt-get install -y apt-transport-https ca-certificates curl software-properties-common; \
+               if [ ! -f /usr/share/keyrings/docker-archive-keyring.gpg ] ; then \
+                       curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg; \
+                       echo "deb [arch=$(ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(UBUNTU_CODENAME) stable" \
+                               | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null ; \
+                       apt-get update; \
+               fi; \
+               sudo -E apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin; \
+       else \
+               echo "Docker already installed. You may need to update."; \
        fi
-       @sudo -E apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
        @touch .deps.ok
 
 .PHONY: checkstyle-go
index ea8cb1c..c885763 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/bash
 # Script to set up a local Docker registry
-
+NO_REGISTRY=${NO_REGISTRY:-"false"}
 set -e
 
 DOCKER_LOGIN_SCRIPT="/scratch/nomad/.docker-ro/dlogin.sh"
@@ -18,6 +18,10 @@ fi
 REGISTRY_NAME="local-registry"
 REGISTRY_PORT=${1:-5001}
 
+if [ "$NO_REGISTRY" = "true" ]; then
+    echo "NO_REGISTRY=true -> not setting up a registry."
+    exit 0
+fi
 # Check if registry container is already running
 if docker container inspect "$REGISTRY_NAME" &>/dev/null; then
     echo "=== Local registry '$REGISTRY_NAME' is already running ==="
index 3aa121d..23b39a8 100644 (file)
@@ -3,6 +3,8 @@ package main
 import (
        "fmt"
        "os"
+       "os/exec"
+       "strings"
        "testing"
        "time"
 
@@ -28,16 +30,43 @@ func TestKube(t *testing.T) {
        TestTimeout = time.Minute * time.Duration(*Timeout)
 
        // creates a file with PPID, used for 'make cleanup-kube'
-       ppid := fmt.Sprint(os.Getppid())
-       ppid = ppid[:len(ppid)-1]
+       Ppid = fmt.Sprint(os.Getppid())
+       Ppid = Ppid[:len(Ppid)-1]
        f, _ := os.Create(".last_ppid")
-       f.Write([]byte(ppid))
+       f.Write([]byte(Ppid))
        f.Close()
 
+       Kubeconfig = os.Getenv("KUBECONFIG")
+       if Kubeconfig == "" {
+               Kubeconfig = os.Getenv("HOME") + "/.kube/config"
+       }
+       _, err := os.Stat(Kubeconfig)
+       if err != nil {
+               fmt.Println("** Kubeconfig not found **")
+               os.Exit(1)
+       }
+       contents, err := os.ReadFile(Kubeconfig)
+       if err != nil {
+               fmt.Println("** Error reading Kubeconfig **")
+               os.Exit(1)
+       }
+       if strings.Contains(string(contents), "cluster: kind-kind") {
+               KindCluster = true
+       }
+       fmt.Printf("\nKubeconfig: '%s'\nKinD cluster: %v\n", Kubeconfig, KindCluster)
+
        RegisterFailHandler(Fail)
-       RunSpecs(t, "Kube Test")
+       RunSpecs(t, "kube-test")
        if *DryRun || *IsPersistent {
                fmt.Println("\033[36m" + "Use 'make cleanup-kube' to remove pods " +
-                       "and namespaces. \nPPID: " + ppid + "\033[0m")
+                       "and namespaces. \nPPID: " + Ppid + "\033[0m")
+       }
+       // deleting the namespace here since we use the same namespace for every suite
+       if !*DryRun || !*IsPersistent {
+               fmt.Println("Deleting kube-test namespace")
+               cmd := exec.Command("kubectl", "delete", "ns", "kube-test"+Ppid)
+               fmt.Println(cmd.String())
+               o, _ := cmd.CombinedOutput()
+               fmt.Printf("%s", string(o))
        }
 }
index a57b11b..2b3d125 100644 (file)
@@ -11,7 +11,10 @@ import (
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
-func (s *KubeSuite) loadDockerImages() {
+func (s *BaseSuite) loadDockerImages() {
+       if !KindCluster {
+               return
+       }
        s.Log("This may take a while. If you encounter problems, " +
                "try loading docker images manually: 'kind load docker-image [image]'")
 
@@ -27,7 +30,7 @@ func (s *KubeSuite) loadDockerImages() {
        }
 }
 
-func (s *KubeSuite) createNamespace(name string) {
+func (s *BaseSuite) createNamespace(name string) {
        namespace := &corev1.Namespace{
                ObjectMeta: metav1.ObjectMeta{
                        Name: name,
@@ -40,16 +43,16 @@ func (s *KubeSuite) createNamespace(name string) {
        s.Log("Namespace '%s' created", name)
 }
 
-func (s *KubeSuite) deletePod(namespace string, podName string) error {
+func (s *BaseSuite) deletePod(namespace string, podName string) error {
        delete(s.CurrentlyRunning, podName)
        return s.ClientSet.CoreV1().Pods(namespace).Delete(context.TODO(), podName, metav1.DeleteOptions{GracePeriodSeconds: int64Ptr(0)})
 }
 
-func (s *KubeSuite) deleteNamespace(namespace string) error {
+func (s *BaseSuite) DeleteNamespace(namespace string) error {
        return s.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
 }
 
-func (s *KubeSuite) DeployPod(pod *Pod) {
+func (s *BaseSuite) DeployPod(pod *Pod) {
        pod.CreatedPod = &corev1.Pod{
                ObjectMeta: metav1.ObjectMeta{
                        Namespace: s.Namespace,
@@ -71,11 +74,6 @@ func (s *KubeSuite) DeployPod(pod *Pod) {
                                        },
                                        Command:         []string{"tail", "-f", "/dev/null"},
                                        ImagePullPolicy: corev1.PullIfNotPresent,
-                                       Ports: []corev1.ContainerPort{
-                                               {
-                                                       ContainerPort: 5201,
-                                               },
-                                       },
                                },
                        },
                        NodeName: pod.Worker,
index 87bb5bc..5e6ec14 100644 (file)
@@ -16,7 +16,7 @@ import (
 )
 
 type Pod struct {
-       suite         *KubeSuite
+       suite         *BaseSuite
        Name          string
        Image         string
        ContainerName string
@@ -49,7 +49,25 @@ type Config struct {
        Pods []PodYaml `yaml:"pods"`
 }
 
-func (s *KubeSuite) LoadPodConfigs() {
+func (s *BaseSuite) LoadPodConfigs() {
+       envVarsSet := os.Getenv("KUBE_WRK1") != "" && os.Getenv("KUBE_WRK2") != ""
+
+       if KindCluster {
+               if !envVarsSet {
+                       os.Setenv("KUBE_WRK1", "kind-worker")
+                       os.Setenv("KUBE_WRK2", "kind-worker2")
+               }
+               s.Envsubst("kubernetes/pod-definitions-template.yaml", "kubernetes/pod-definitions.yaml")
+       } else {
+               _, err := os.Stat("kubernetes/pod-definitions.yaml")
+               if errors.Is(err, os.ErrNotExist) {
+                       if !envVarsSet {
+                               s.AssertNil(err, "Please set KUBE_WRK1 and KUBE_WRK2 env vars")
+                       }
+                       s.Envsubst("kubernetes/pod-definitions-template.yaml", "kubernetes/pod-definitions.yaml")
+               }
+       }
+
        data, err := os.ReadFile("kubernetes/pod-definitions.yaml")
        s.AssertNil(err)
 
@@ -62,14 +80,14 @@ func (s *KubeSuite) LoadPodConfigs() {
        }
 }
 
-func newPod(suite *KubeSuite, input PodYaml) (*Pod, error) {
+func newPod(suite *BaseSuite, input PodYaml) (*Pod, error) {
        var pod = new(Pod)
        pod.suite = suite
-       pod.Name = input.Name + suite.Ppid
+       pod.Name = input.Name + Ppid
        pod.Image = input.Image[0].Name
        pod.ContainerName = input.Container[0].Name
        pod.Worker = input.Worker[0].Name
-       pod.Namespace = input.Namespace[0].Name + suite.Ppid
+       pod.Namespace = input.Namespace[0].Name + Ppid
 
        if suite.AllPods == nil {
                suite.AllPods = make(map[string]*Pod)
@@ -84,7 +102,7 @@ func newPod(suite *KubeSuite, input PodYaml) (*Pod, error) {
        return pod, nil
 }
 
-func (s *KubeSuite) initPods() {
+func (s *BaseSuite) initPods() {
        s.Pods.Ab = s.getPodsByName("ab")
        s.Pods.ClientGeneric = s.getPodsByName("client-generic")
        s.Pods.ServerGeneric = s.getPodsByName("server-generic")
@@ -92,12 +110,12 @@ func (s *KubeSuite) initPods() {
        s.Pods.NginxProxy = s.getPodsByName("nginx-proxy")
 }
 
-func (s *KubeSuite) getPodsByName(podName string) *Pod {
-       return s.AllPods[podName+s.Ppid]
+func (s *BaseSuite) getPodsByName(podName string) *Pod {
+       return s.AllPods[podName+Ppid]
 }
 
 func (pod *Pod) CopyToPod(src string, dst string) {
-       cmd := exec.Command("kubectl", "--kubeconfig="+pod.suite.KubeconfigPath, "cp", src, pod.Namespace+"/"+pod.Name+":"+dst)
+       cmd := exec.Command("kubectl", "--kubeconfig="+Kubeconfig, "cp", src, pod.Namespace+"/"+pod.Name+":"+dst)
        out, err := cmd.CombinedOutput()
        pod.suite.AssertNil(err, string(out))
 }
index 26bc1da..b9221e7 100644 (file)
@@ -1,6 +1,7 @@
 package kube_test
 
 import (
+       "context"
        "flag"
        "fmt"
        "io"
@@ -8,35 +9,64 @@ import (
        "os"
        "os/exec"
        "strings"
+       "sync"
        "time"
 
+       "github.com/a8m/envsubst"
        "github.com/joho/godotenv"
        . "github.com/onsi/ginkgo/v2"
+
+       "k8s.io/client-go/kubernetes"
+       "k8s.io/client-go/rest"
 )
 
 var IsCoverage = flag.Bool("coverage", false, "use coverage run config")
 var IsPersistent = flag.Bool("persist", false, "persists topology config")
 var IsVerbose = flag.Bool("verbose", false, "verbose test output")
-var WhoAmI = flag.String("whoami", "root", "what user ran kube-test")
 var IsVppDebug = flag.Bool("debug", false, "attach gdb to vpp")
 var DryRun = flag.Bool("dryrun", false, "set up containers but don't run tests")
 var Timeout = flag.Int("timeout", 30, "test timeout override (in minutes)")
 var TestTimeout time.Duration
+var Kubeconfig string
+var KindCluster bool
+var Ppid string
 
 const (
-       LogDir string = "/tmp/kube-test/"
+       LogDir      string = "/tmp/kube-test/"
+       EnvVarsFile string = "kubernetes/.vars"
 )
 
 type BaseSuite struct {
-       Ppid    string
-       Logger  *log.Logger
-       LogFile *os.File
+       ClientSet        *kubernetes.Clientset
+       Config           *rest.Config
+       Namespace        string
+       CurrentlyRunning map[string]*Pod
+       images           []string
+       AllPods          map[string]*Pod
+       MainContext      context.Context
+       Logger           *log.Logger
+       LogFile          *os.File
+       Pods             struct {
+               ServerGeneric *Pod
+               ClientGeneric *Pod
+               Nginx         *Pod
+               NginxProxy    *Pod
+               Ab            *Pod
+       }
+}
+
+type kubeComponent struct {
+       name         string
+       namespace    string
+       resourceType string
+       resourceName string
 }
 
 func init() {
-       cmd := exec.Command("mkdir", "-p", LogDir)
-       if err := cmd.Run(); err != nil {
-               panic(err)
+       if err := os.Mkdir(LogDir, os.FileMode(0777)); err != nil {
+               if !os.IsExist(err) {
+                       panic(fmt.Sprint(err))
+               }
        }
 }
 
@@ -47,14 +77,14 @@ func (s *BaseSuite) Skip(args string) {
 func (s *BaseSuite) SetupTest() {
        TestCounterFunc()
        s.Log("[* TEST SETUP]")
+       s.WaitForComponents()
 }
 
 func (s *BaseSuite) SetupSuite() {
        s.CreateLogger()
        s.Log("[* SUITE SETUP]")
-       s.Ppid = fmt.Sprint(os.Getppid())
-       // remove last number so we have space to prepend a process index (interfaces have a char limit)
-       s.Ppid = s.Ppid[:len(s.Ppid)-1]
+       Ppid = fmt.Sprint(os.Getppid())
+       Ppid = Ppid[:len(Ppid)-1]
 }
 
 func (s *BaseSuite) TeardownTest() {
@@ -71,6 +101,13 @@ func (s *BaseSuite) TeardownSuite() {
        s.Log("[* SUITE TEARDOWN]")
 }
 
+// reads a file and writes a new one with substituted vars
+func (s *BaseSuite) Envsubst(inputPath string, outputPath string) {
+       o, err := envsubst.ReadFile(inputPath)
+       s.AssertNil(err)
+       s.AssertNil(os.WriteFile(outputPath, o, 0644))
+}
+
 func (s *BaseSuite) GetCurrentSuiteName() string {
        return CurrentSpecReport().ContainerHierarchyTexts[0]
 }
@@ -104,39 +141,113 @@ func (s *BaseSuite) Log(log any, arg ...any) {
        }
 }
 
+func (s *BaseSuite) WaitForComponents() {
+       s.Log("Waiting for components.")
+
+       var wg sync.WaitGroup
+
+       // Define all the simple, single-command checks.
+       checks := []kubeComponent{
+               {name: "calico-vpp-node", namespace: "calico-vpp-dataplane", resourceType: "ds", resourceName: "calico-vpp-node"},
+               {name: "calico-node", namespace: "calico-system", resourceType: "ds", resourceName: "calico-node"},
+               {name: "coredns", namespace: "kube-system", resourceType: "deployment", resourceName: "coredns"},
+               {name: "calico-kube-controllers", namespace: "calico-system", resourceType: "deployment", resourceName: "calico-kube-controllers"},
+       }
+
+       wg.Add(len(checks))
+
+       for _, check := range checks {
+               go func(c kubeComponent) {
+                       defer wg.Done()
+
+                       cmd := exec.Command("kubectl", "-n", c.namespace, "rollout", "status", fmt.Sprintf("%s/%s", c.resourceType, c.resourceName))
+                       s.Log(cmd.String())
+
+                       output, err := cmd.CombinedOutput()
+                       s.Log(string(output))
+                       s.AssertNil(err)
+               }(check)
+       }
+
+       wg.Add(1)
+       go func() {
+               defer wg.Done()
+
+               cmd := exec.Command("kubectl", "-n", "calico-apiserver", "rollout", "status", "deployment/calico-apiserver")
+               s.Log(cmd.String())
+               output, err := cmd.CombinedOutput()
+               s.Log(string(output))
+
+               if err != nil {
+                       s.Log("trying calico-system namespace")
+                       cmd = exec.Command("kubectl", "-n", "calico-system", "rollout", "status", "deployment/calico-apiserver")
+                       s.Log(cmd.String())
+                       output, err = cmd.CombinedOutput()
+                       s.Log(string(output))
+               }
+               s.AssertNil(err)
+       }()
+
+       wg.Wait()
+
+       s.Log("All components are ready")
+}
+
 // sets CALICO_NETWORK_CONFIG, ADDITIONAL_VPP_CONFIG, env vars, applies configs and rollout restarts cluster
-func (s *KubeSuite) SetMtuAndRestart(CALICO_NETWORK_CONFIG string, ADDITIONAL_VPP_CONFIG string) {
+func (s *BaseSuite) SetMtuAndRestart(CALICO_NETWORK_CONFIG string, ADDITIONAL_VPP_CONFIG string) {
+       if os.Getenv("SKIP_CONFIG") == "true" {
+               s.Log("** SKIP_CONFIG=true, not updating configuration! **")
+               return
+       }
        os.Setenv("CALICO_NETWORK_CONFIG", CALICO_NETWORK_CONFIG)
        os.Setenv("ADDITIONAL_VPP_CONFIG", ADDITIONAL_VPP_CONFIG)
-       s.AssertNil(godotenv.Load("kubernetes/.vars"))
 
-       s.Envsubst("kubernetes/calico-config-template.yaml", "kubernetes/calico-config.yaml")
+       // Kube-test expects a running cluster when running tests, therefore
+       // kubernetes/.vars file is initialized by scripts/setup-cluster.sh when testing on a KinD cluster,
+       // but initialized by kube-test itself when testing on a bare metal cluster.
+       if KindCluster {
+               s.AssertNil(godotenv.Load("kubernetes/.vars"))
+               s.Envsubst("kubernetes/kind-calicovpp-config-template.yaml", "kubernetes/kind-calicovpp-config.yaml")
+
+               cmd := exec.Command("kubectl", "apply", "-f", "kubernetes/kind-calicovpp-config.yaml")
+               s.Log(cmd.String())
+               o, err := cmd.CombinedOutput()
+               s.Log(string(o))
+               s.AssertNil(err)
+       } else {
+               fileValues, err := godotenv.Read(EnvVarsFile)
+
+               if err == nil {
+                       s.Log("File '%s' exists. Checking env vars", EnvVarsFile)
+                       s.handleExistingVarsFile(fileValues)
+               } else if os.IsNotExist(err) {
+                       s.Log("'%s' not found. Checking env vars", EnvVarsFile)
+                       s.handleNewVarsFile()
+               } else {
+                       s.AssertNil(err)
+               }
+               godotenv.Load("kubernetes/.vars")
+               s.Envsubst("kubernetes/baremetal-calicovpp-config-template.yaml", "kubernetes/baremetal-calicovpp-config.yaml")
+
+               cmd := exec.Command("kubectl", "apply", "-f", "kubernetes/baremetal-calicovpp-config.yaml")
+               s.Log(cmd.String())
+               o, err := cmd.CombinedOutput()
+               s.Log(string(o))
+               s.AssertNil(err)
+       }
 
-       cmd := exec.Command("kubectl", "apply", "-f", "kubernetes/calico-config.yaml")
+       cmd := exec.Command("kubectl", "-n", "calico-vpp-dataplane", "rollout", "restart", "ds/calico-vpp-node")
        s.Log(cmd.String())
        o, err := cmd.CombinedOutput()
        s.Log(string(o))
        s.AssertNil(err)
 
-       cmd = exec.Command("kubectl", "-n", "calico-vpp-dataplane", "rollout", "restart", "ds/calico-vpp-node")
-       s.Log(cmd.String())
-       o, err = cmd.CombinedOutput()
-       s.Log(string(o))
-       s.AssertNil(err)
-
-       cmd = exec.Command("kubectl", "-n", "calico-vpp-dataplane", "rollout", "status", "ds/calico-vpp-node")
-       s.Log(cmd.String())
-       o, err = cmd.CombinedOutput()
-       s.Log(string(o))
-       s.AssertNil(err)
-
-       cmd = exec.Command("kubectl", "-n", "calico-system", "rollout", "status", "ds/calico-node")
-       s.Log(cmd.String())
-       o, err = cmd.CombinedOutput()
-       s.Log(string(o))
-       s.AssertNil(err)
+       s.Log("Config applied, sleeping for 30s")
+       time.Sleep(time.Second * 30)
+}
 
-       // let vpp-dataplane recover, should help with stability issues
-       s.Log("Waiting for 20 seconds")
-       time.Sleep(time.Second * 20)
+func (s *BaseSuite) SkipIfBareMetalCluster() {
+       if !KindCluster {
+               Skip("Kube-Test running on a bare metal cluster. Skipping")
+       }
 }
index 1acf31b..ac56c02 100644 (file)
@@ -3,37 +3,19 @@ package kube_test
 import (
        "context"
        "fmt"
-       "os"
        "reflect"
        "regexp"
        "runtime"
        "strings"
 
-       "github.com/a8m/envsubst"
        . "github.com/onsi/ginkgo/v2"
 
        "k8s.io/client-go/kubernetes"
-       "k8s.io/client-go/rest"
        "k8s.io/client-go/tools/clientcmd"
 )
 
 type KubeSuite struct {
        BaseSuite
-       ClientSet        *kubernetes.Clientset
-       Config           *rest.Config
-       Namespace        string
-       KubeconfigPath   string
-       CurrentlyRunning map[string]*Pod
-       images           []string
-       AllPods          map[string]*Pod
-       MainContext      context.Context
-       Pods             struct {
-               ServerGeneric *Pod
-               ClientGeneric *Pod
-               Nginx         *Pod
-               NginxProxy    *Pod
-               Ab            *Pod
-       }
 }
 
 var imagesLoaded bool
@@ -77,26 +59,16 @@ func (s *KubeSuite) SetupSuite() {
        s.CurrentlyRunning = make(map[string]*Pod)
        s.LoadPodConfigs()
        s.initPods()
-       if !imagesLoaded {
-               s.loadDockerImages()
-       }
-
-       if *WhoAmI == "root" {
-               s.KubeconfigPath = "/.kube/config"
-       } else {
-               s.KubeconfigPath = "/home/" + *WhoAmI + "/.kube/config"
-       }
-       s.Log("User: '%s'", *WhoAmI)
-       s.Log("Config path: '%s'", s.KubeconfigPath)
 
        var err error
-       s.Config, err = clientcmd.BuildConfigFromFlags("", s.KubeconfigPath)
+       s.Config, err = clientcmd.BuildConfigFromFlags("", Kubeconfig)
        s.AssertNil(err)
 
        s.ClientSet, err = kubernetes.NewForConfig(s.Config)
        s.AssertNil(err)
 
        if !imagesLoaded {
+               s.loadDockerImages()
                s.createNamespace(s.Namespace)
                imagesLoaded = true
        }
@@ -105,7 +77,7 @@ func (s *KubeSuite) SetupSuite() {
 func (s *KubeSuite) TeardownTest() {
        s.BaseSuite.TeardownTest()
        if len(s.CurrentlyRunning) != 0 {
-               s.Log("Removing:")
+               s.Log("Removing pods:")
                for _, pod := range s.CurrentlyRunning {
                        s.Log("   %s", pod.Name)
                        s.AssertNil(s.deletePod(s.Namespace, pod.Name))
@@ -115,11 +87,6 @@ func (s *KubeSuite) TeardownTest() {
 
 func (s *KubeSuite) TeardownSuite() {
        s.BaseSuite.TeardownSuite()
-       if len(s.CurrentlyRunning) == 0 {
-               return
-       }
-       s.Log("Removing:\n   %s", s.Namespace)
-       s.AssertNil(s.deleteNamespace(s.Namespace))
 }
 
 // Quick and dirty fix for now. Runs 'ldd /usr/lib/libvcl_ldpreload.so'
@@ -170,12 +137,6 @@ func (s *KubeSuite) CreateNginxConfig(pod *Pod) {
        )
 }
 
-func (s *KubeSuite) Envsubst(inputPath string, outputPath string) {
-       o, err := envsubst.ReadFile(inputPath)
-       s.AssertNil(err)
-       os.WriteFile(outputPath, o, 0644)
-}
-
 func (s *KubeSuite) CreateNginxProxyConfig(pod *Pod) {
        pod.Exec(context.TODO(), []string{"/bin/bash", "-c", "mkdir -p /tmp/nginx"})
        values := struct {
@@ -234,7 +195,7 @@ var _ = Describe("KubeSuite", Ordered, ContinueOnFailure, func() {
        }
 })
 
-var _ = Describe("KubeMWSuite", Ordered, ContinueOnFailure, Label("Multi-worker"), func() {
+var _ = Describe("KubeMWSuite", Ordered, ContinueOnFailure, Label("Perf", "Multi-worker"), func() {
        var s KubeSuite
        BeforeAll(func() {
                s.SetupSuite()
index 33e6537..4d7c2a1 100644 (file)
@@ -26,15 +26,18 @@ func (s *LargeMtuSuite) SetupSuite() {
 var _ = Describe("LargeMtuSuite", Ordered, ContinueOnFailure, Label("Large MTU"), func() {
        var s LargeMtuSuite
        BeforeAll(func() {
+               s.SkipIfBareMetalCluster()
                s.SetupSuite()
        })
        BeforeEach(func() {
                s.SetupTest()
        })
        AfterEach(func() {
+               s.SkipIfBareMetalCluster()
                s.TeardownTest()
        })
        AfterAll(func() {
+               s.SkipIfBareMetalCluster()
                s.TeardownSuite()
        })
 
index 8763594..29fb761 100644 (file)
@@ -3,10 +3,13 @@ package kube_test
 import (
        "encoding/json"
        "fmt"
+       "os"
        "path/filepath"
        "runtime"
        "strings"
        "time"
+
+       "github.com/joho/godotenv"
 )
 
 func boolPtr(b bool) *bool {
@@ -150,3 +153,50 @@ func (s *BaseSuite) LogJsonIperfOutput(result IPerfResult) {
        }
        s.Log("*******************************************\n")
 }
+
+func (s *BaseSuite) handleExistingVarsFile(fileValues map[string]string) error {
+       varsToWatch := []string{"CALICOVPP_VERSION", "CALICOVPP_INTERFACE"}
+       needsWrite := false
+
+       for _, key := range varsToWatch {
+               envValue := os.Getenv(key)
+               if envValue != "" {
+                       if fileValue, ok := fileValues[key]; !ok || fileValue != envValue {
+                               s.Log("Updating '%s'. New value: '%s'", key, envValue)
+                               fileValues[key] = envValue
+                               needsWrite = true
+                       }
+               }
+       }
+
+       if needsWrite {
+               if err := godotenv.Write(fileValues, EnvVarsFile); err != nil {
+                       return err
+               }
+               s.Log("File %s updated", EnvVarsFile)
+       } else {
+               s.Log("%s OK", EnvVarsFile)
+       }
+       return nil
+}
+
+func (s *BaseSuite) handleNewVarsFile() error {
+       iface := os.Getenv("CALICOVPP_INTERFACE")
+       version := os.Getenv("CALICOVPP_VERSION")
+
+       if iface != "" && version != "" {
+               newFileValues := map[string]string{
+                       "CALICOVPP_INTERFACE": iface,
+                       "CALICOVPP_VERSION":   version,
+               }
+
+               s.Log("\nCreating '%s' from environment variables\n", EnvVarsFile)
+               if err := godotenv.Write(newFileValues, EnvVarsFile); err != nil {
+                       return err
+               }
+       } else {
+               return fmt.Errorf("Error: '%s' not found and env vars are not set. "+
+                       "To create it, please set both CALICOVPP_INTERFACE and CALICOVPP_VERSION env vars", EnvVarsFile)
+       }
+       return nil
+}
index c35449a..acbdffc 100644 (file)
@@ -42,8 +42,8 @@ func kubeIperfVclTest(s *KubeSuite, clientArgs string) IPerfResult {
 
        o, err := s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c",
                vcl + " " + ldp + " iperf3 -s -D --logfile /iperf_server.log -B " + s.Pods.ServerGeneric.IpAddress})
-       s.Log("Sleeping for 5s")
-       time.Sleep(time.Second * 5)
+       s.Log("Sleeping for 2s")
+       time.Sleep(time.Second * 2)
        s.AssertNil(err)
        out, err := s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c", "pidof iperf3"})
        s.Log(out)
@@ -81,8 +81,8 @@ func kubeIperfVclMtuTest(s *LargeMtuSuite, clientArgs string) IPerfResult {
 
        o, err := s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c",
                vcl + " " + ldp + " iperf3 -s -D --logfile /iperf_server.log -B " + s.Pods.ServerGeneric.IpAddress})
-       s.Log("Sleeping for 5s")
-       time.Sleep(time.Second * 5)
+       s.Log("Sleeping for 2s")
+       time.Sleep(time.Second * 2)
        s.AssertNil(err)
        out, err := s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c", "pidof iperf3"})
        s.Log(out)
index 265b493..abfbd7d 100755 (executable)
@@ -84,8 +84,6 @@ if [ $focused_test -eq 0 ] && { [ $persist_set -eq 1 ] || [ $dryrun_set -eq 1 ];
     exit 2
 fi
 
-args="$args -whoami $(whoami)"
-
 if [ -n "${BUILD_NUMBER}" ]; then
         ginkgo_args="$ginkgo_args --no-color"
 fi
diff --git a/test-c/kube-test/kubernetes/baremetal-calicovpp-config-template.yaml b/test-c/kube-test/kubernetes/baremetal-calicovpp-config-template.yaml
new file mode 100644 (file)
index 0000000..d80426b
--- /dev/null
@@ -0,0 +1,361 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: calico-vpp-dataplane
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: calico-vpp-node-sa
+  namespace: calico-vpp-dataplane
+---
+apiVersion: v1
+data:
+  CALICOVPP_CONFIG_TEMPLATE: |-
+    unix {
+      nodaemon
+      full-coredump
+      cli-listen /var/run/vpp/cli.sock
+      pidfile /run/vpp/vpp.pid
+      exec /etc/vpp/startup.exec
+    }
+    api-trace { on }
+    socksvr {
+        socket-name /var/run/vpp/vpp-api.sock
+    }
+    plugins {
+        plugin default { enable }
+        plugin dpdk_plugin.so { enable }
+        plugin calico_plugin.so { enable }
+        plugin ping_plugin.so { enable }
+        plugin dispatch_trace_plugin.so { enable }
+    }
+    buffers {
+      buffers-per-numa 131072
+    }
+    ${ADDITIONAL_VPP_CONFIG}
+  CALICOVPP_INITIAL_CONFIG: |-
+    {
+      "vppStartupSleepSeconds": 1,
+      "corePattern": "/var/lib/vpp/vppcore.%e.%p"
+    }
+  CALICOVPP_FEATURE_GATES: |-
+    {
+      "vclEnabled": true
+    }
+  CALICOVPP_INTERFACES: |-
+    {
+      "maxPodIfSpec": {
+        "rx": 10, "tx": 10, "rxqsz": 1024, "txqsz": 1024
+      },
+      "defaultPodIfSpec": {
+        "rx": 1, "tx":1, "isl3": true
+      },
+      "vppHostTapSpec": {
+        "rx": 1, "tx":1, "rxqsz": 1024, "txqsz": 1024, "isl3": false
+      },
+      "uplinkInterfaces": [
+        {
+          "interfaceName": "${CALICOVPP_INTERFACE}",
+          "vppDriver": "dpdk"
+        }
+      ]
+    }
+  SERVICE_PREFIX: 10.96.0.0/12
+kind: ConfigMap
+metadata:
+  name: calico-vpp-config
+  namespace: calico-vpp-dataplane
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  labels:
+    k8s-app: calico-vpp-node
+  name: calico-vpp-node
+  namespace: calico-vpp-dataplane
+spec:
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      k8s-app: calico-vpp-node
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        k8s-app: calico-vpp-node
+    spec:
+      containers:
+      - env:
+        - name: DATASTORE_TYPE
+          value: kubernetes
+        - name: WAIT_FOR_DATASTORE
+          value: "true"
+        - name: NODENAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        envFrom:
+        - configMapRef:
+            name: calico-vpp-config
+        image: docker.io/calicovpp/vpp:${CALICOVPP_VERSION}
+        imagePullPolicy: IfNotPresent
+        name: vpp
+        resources:
+          limits:
+            cpu: "10"
+            hugepages-2Mi: 4Gi
+            memory: 4Gi
+          requests:
+            cpu: "10"
+            memory: 4Gi
+        securityContext:
+          privileged: true
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+        volumeMounts:
+        - mountPath: /lib/firmware
+          name: lib-firmware
+        - mountPath: /var/run/vpp
+          name: vpp-rundir
+        - mountPath: /var/lib/vpp
+          name: vpp-data
+        - mountPath: /etc/vpp
+          name: vpp-config
+        - mountPath: /dev
+          name: devices
+        - mountPath: /sys
+          name: hostsys
+        - mountPath: /run/netns/
+          mountPropagation: Bidirectional
+          name: netns
+        - mountPath: /host
+          name: host-root
+      - env:
+        - name: DATASTORE_TYPE
+          value: kubernetes
+        - name: WAIT_FOR_DATASTORE
+          value: "true"
+        - name: NODENAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        envFrom:
+        - configMapRef:
+            name: calico-vpp-config
+        image: docker.io/calicovpp/agent:${CALICOVPP_VERSION}
+        imagePullPolicy: IfNotPresent
+        name: agent
+        resources:
+          limits:
+            cpu: "2"
+            memory: 1Gi
+          requests:
+            cpu: "2"
+            memory: 1Gi
+        securityContext:
+          privileged: true
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: File
+        volumeMounts:
+        - mountPath: /var/run/calico
+          name: var-run-calico
+        - mountPath: /var/lib/calico/felix-plugins
+          name: felix-plugins
+        - mountPath: /var/run/vpp
+          name: vpp-rundir
+        - mountPath: /run/netns/
+          mountPropagation: Bidirectional
+          name: netns
+      dnsPolicy: ClusterFirst
+      hostNetwork: true
+      hostPID: true
+      nodeSelector:
+        kubernetes.io/os: linux
+      priorityClassName: system-node-critical
+      restartPolicy: Always
+      schedulerName: default-scheduler
+      securityContext: {}
+      serviceAccount: calico-vpp-node-sa
+      serviceAccountName: calico-vpp-node-sa
+      terminationGracePeriodSeconds: 10
+      tolerations:
+      - effect: NoSchedule
+        operator: Exists
+      - key: CriticalAddonsOnly
+        operator: Exists
+      - effect: NoExecute
+        operator: Exists
+      volumes:
+      - hostPath:
+          path: /lib/firmware
+          type: ""
+        name: lib-firmware
+      - hostPath:
+          path: /var/run/vpp
+          type: ""
+        name: vpp-rundir
+      - hostPath:
+          path: /var/lib/vpp
+          type: DirectoryOrCreate
+        name: vpp-data
+      - hostPath:
+          path: /etc/vpp
+          type: ""
+        name: vpp-config
+      - hostPath:
+          path: /dev
+          type: ""
+        name: devices
+      - hostPath:
+          path: /sys
+          type: ""
+        name: hostsys
+      - hostPath:
+          path: /var/run/calico
+          type: ""
+        name: var-run-calico
+      - hostPath:
+          path: /run/netns
+          type: ""
+        name: netns
+      - hostPath:
+          path: /var/lib/calico/felix-plugins
+          type: ""
+        name: felix-plugins
+      - hostPath:
+          path: /
+          type: ""
+        name: host-root
+  updateStrategy:
+    rollingUpdate:
+      maxSurge: 0
+      maxUnavailable: 1
+    type: RollingUpdate
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: calico-vpp-node-role
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - nodes
+  - namespaces
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - endpoints
+  - services
+  verbs:
+  - watch
+  - list
+  - get
+- apiGroups:
+  - discovery.k8s.io
+  resources:
+  - endpointslices
+  verbs:
+  - watch
+  - list
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - nodes/status
+  verbs:
+  - patch
+  - update
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - networkpolicies
+  verbs:
+  - watch
+  - list
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - namespaces
+  - serviceaccounts
+  verbs:
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - pods/status
+  verbs:
+  - patch
+- apiGroups:
+  - crd.projectcalico.org
+  resources:
+  - globalfelixconfigs
+  - felixconfigurations
+  - bgppeers
+  - bgpfilters
+  - globalbgpconfigs
+  - bgpconfigurations
+  - ippools
+  - ipamblocks
+  - globalnetworkpolicies
+  - globalnetworksets
+  - networkpolicies
+  - networksets
+  - clusterinformations
+  - hostendpoints
+  - blockaffinities
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - crd.projectcalico.org
+  resources:
+  - blockaffinities
+  - ipamblocks
+  - ipamhandles
+  verbs:
+  - get
+  - list
+  - create
+  - update
+  - delete
+- apiGroups:
+  - crd.projectcalico.org
+  resources:
+  - ipamconfigs
+  verbs:
+  - get
+- apiGroups:
+  - crd.projectcalico.org
+  resources:
+  - blockaffinities
+  verbs:
+  - watch
@@ -1,11 +1,11 @@
 definitions:
   image-names:
     - image: &kube-test
-        name: "kube-test/vpp:latest"
+        name: "docker.io/kube-test/vpp:latest"
     - image: &nginx-ldp
-        name: "kube-test/nginx-ldp:latest"
+        name: "docker.io/kube-test/nginx-ldp:latest"
     - image: &ab
-        name: "kube-test/ab:latest"
+        name: "docker.io/kube-test/ab:latest"
 
   container-names:
     - container: &client
@@ -19,9 +19,9 @@ definitions:
 
   worker-names:
     - worker: &worker1
-        name: "kind-worker"
+        name: ${KUBE_WRK1}
     - worker: &worker2
-        name: "kind-worker2"
+        name: ${KUBE_WRK2}
 
 pods:
   - name: "client-generic"
index 32916dd..b855207 100755 (executable)
@@ -12,6 +12,7 @@ DOCKER_BUILD_DIR="/scratch/docker-build"
 DOCKER_CACHE_DIR="${DOCKER_BUILD_DIR}/docker_cache"
 DOCKER_HST_BUILDER="hst_builder"
 DOCKER_LOGIN_SCRIPT="/scratch/nomad/.docker-ro/dlogin.sh"
+NO_REGISTRY=${NO_REGISTRY:-"false"}
 
 if [ -d "${DOCKER_BUILD_DIR}" ] ; then
   mkdir -p "${DOCKER_CACHE_DIR}"
@@ -50,11 +51,17 @@ docker buildx build ${DOCKER_CACHE_ARGS} \
     exit 1
 }
 
-# Push the base image to the local registry
-docker push $BASE_TAG || {
-    echo "Error: Failed to push base image to local registry"
-    exit 1
+if [ "$NO_REGISTRY" = "true" ]; then
+  set -x
+  docker image tag $BASE_TAG kube-test/vpp:latest
+  set +x
+else
+  # Push the base image to the local registry
+  docker push $BASE_TAG || {
+      echo "Error: Failed to push base image to local registry"
+      exit 1
 }
+fi
 
 # Function to build each image
 build_image() {
index aa3d405..5dc10ef 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env bash
+NO_REGISTRY=${NO_REGISTRY:-"false"}
 
 if [ "$(lsb_release -is)" != Ubuntu ]; then
        echo "Host stack test framework is supported only on Ubuntu"
@@ -43,16 +44,18 @@ if [ -x "$DOCKER_LOGIN_SCRIPT" ] ; then
   $DOCKER_LOGIN_SCRIPT
 fi
 
-# Set up the local registry before creating containers
-echo "=== Setting up local registry ==="
-if [ -x "$(dirname "$0")/../docker/setup-local-registry.sh" ]; then
-  "$(dirname "$0")/../docker/setup-local-registry.sh" "$REGISTRY_PORT"
-else
-  echo "Warning: setup-local-registry.sh not found or not executable"
-  echo "Attempting to create and use local registry at localhost:5000"
-  if ! docker ps | grep -q "local-registry"; then
-    docker run -d --restart=always -p $REGISTRY_PORT:5000 --name local-registry registry:2
-  fi
+if [ $NO_REGISTRY != "true" ]; then
+    # Set up the local registry before creating containers
+    echo "=== Setting up local registry ==="
+    if [ -x "$(dirname "$0")/../docker/setup-local-registry.sh" ]; then
+      "$(dirname "$0")/../docker/setup-local-registry.sh" "$REGISTRY_PORT"
+    else
+      echo "Warning: setup-local-registry.sh not found or not executable"
+      echo "Attempting to create and use local registry at localhost:5000"
+      if ! docker ps | grep -q "local-registry"; then
+        docker run -d --restart=always -p $REGISTRY_PORT:5000 --name local-registry registry:2
+      fi
+    fi
 fi
 
 echo "Taking build objects from ${VPP_BUILD_ROOT}"
diff --git a/test-c/kube-test/script/quick-import.sh b/test-c/kube-test/script/quick-import.sh
new file mode 100755 (executable)
index 0000000..b08e739
--- /dev/null
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+CALICOVPP_DIR="$HOME/vpp-dataplane"
+VPP_DIR=$(pwd)
+VPP_DIR=${VPP_DIR%test-c*}
+
+if [ "$1" = "" ]; then
+    echo "This script will save and import images to both nodes.
+To import kube-test images only, \$2 = kt
+To import CalicoVPP images only, \$2 = cv
+To import all, leave \$2 empty.
+Only run this script on the master node.
+    Usage:
+    ./quick-import.sh user@remote:path [ kt | cv ]"
+    exit 1
+fi
+
+remote_user="${1%%:*}"
+remote_path="${1#*:}"
+
+set -xe
+
+if [ "$2" = "kt" ] || [ "$2" = "" ]; then
+    make build
+    docker save -o kube-test-images.tar $(docker images | grep kube-test | awk '{print $1":"$2}')
+    sudo ctr -n k8s.io images import kube-test-images.tar
+    scp kube-test-images.tar $1
+    ssh $remote_user "sudo ctr -n k8s.io images import \"$remote_path\""/kube-test-images.tar
+fi
+
+if [ "$2" = "cv" ] || [ "$2" = "" ]; then
+    if [ ! -d "$CALICOVPP_DIR" ]; then
+          git clone https://github.com/projectcalico/vpp-dataplane.git $CALICOVPP_DIR
+    else
+        echo "Repo found, resetting"
+        cd $CALICOVPP_DIR
+        git reset --hard origin/master
+        git pull
+        cd $CALICOVPP_DIR/vpp-manager/vpp_build
+        git reset --hard origin/master
+        cd $VPP_DIR/test-c/kube-test
+    fi
+
+    make -C $CALICOVPP_DIR image TAG=latest
+    docker save -o calicovpp-images.tar docker.io/calicovpp/vpp:latest docker.io/calicovpp/agent:latest docker.io/calicovpp/multinet-monitor:latest
+    sudo ctr -n k8s.io images import calicovpp-images.tar
+    scp calicovpp-images.tar $1
+    ssh $remote_user "sudo ctr -n k8s.io images import \"$remote_path\""/calicovpp-images.tar
+fi
index e4f45f6..0e922e7 100755 (executable)
@@ -16,7 +16,7 @@ export TIGERA_VERSION="${TIGERA_VERSION:-master}"
 echo "CALICOVPP_VERSION=$CALICOVPP_VERSION" > kubernetes/.vars
 export DOCKER_BUILD_PROXY=$HTTP_PROXY
 
-envsubst < kubernetes/calico-config-template.yaml > kubernetes/calico-config.yaml
+envsubst < kubernetes/kind-calicovpp-config-template.yaml > kubernetes/kind-calicovpp-config.yaml
 kind_config=$(cat kubernetes/kind-config.yaml)
 kind_config=$(cat <<EOF
 $kind_config
@@ -65,7 +65,7 @@ help() {
   echo "'release-cluster' starts up a KinD cluster and uses latest CalicoVPP release (e.g. v3.29),
     or you can override versions by using env variables 'CALICOVPP_VERSION' and 'TIGERA_VERSION':
     CALICOVPP_VERSION: latest | v[x].[y].[z] (default=latest)
-    TIGERA_VERSION:    master | v[x].[y].[z] (default=v3.28.3)"
+    TIGERA_VERSION:    master | v[x].[y].[z] (default="release-v3.31")"
 
   echo -e "\nTo shut down the cluster, use 'kind delete cluster'"
 }
@@ -108,7 +108,7 @@ cherry_pick() {
 build_load_start_cni() {
   # make -C $VPP_DIR/test-c/kube-test build-vpp-release
   make -C $CALICOVPP_DIR image-kind
-  kubectl create --save-config -f kubernetes/calico-config.yaml
+  kubectl create --save-config -f kubernetes/kind-calicovpp-config.yaml
 }
 
 restore_repo() {
@@ -150,7 +150,7 @@ setup_master() {
 
 rebuild_master() {
   echo "Shutting down pods may take some time, timeout is set to 1m."
-  timeout 1m kubectl delete -f kubernetes/calico-config.yaml || true
+  timeout 1m kubectl delete -f kubernetes/kind-calicovpp-config.yaml || true
   cherry_pick
   build_load_start_cni
   # temporarily disabled
@@ -169,7 +169,7 @@ setup_release() {
 
   while [[ "$(kubectl api-resources --api-group=operator.tigera.io | grep Installation)" == "" ]]; do echo "waiting for Installation kubectl resource"; sleep 2; done
 
-  kubectl create --save-config -f kubernetes/calico-config.yaml
+  kubectl create --save-config -f kubernetes/kind-calicovpp-config.yaml
 
   echo "Done. Please wait for the cluster to come fully online before running tests."
   echo "Use 'watch kubectl get pods -A' to monitor cluster status."