/extras/hs-test/.kind_deps.ok
/extras/hs-test/.go_cache/
/extras/hs-test/kubernetes/calico-config.yaml
+/extras/hs-test/kubernetes/.vars
# ./configure
/CMakeFiles
require (
github.com/Microsoft/go-winio v0.6.2 // indirect
+ github.com/a8m/envsubst v1.4.2
github.com/containerd/log v0.1.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
github.com/imdario/mergo v0.3.16 // indirect
+ github.com/joho/godotenv v1.5.1
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/lunixbochs/struc v0.0.0-20200521075829-a4cb8d33dbbe // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/a8m/envsubst v1.4.2 h1:4yWIHXOLEJHQEFd4UjrWDrYeYlV7ncFWJOCBRLOZHQg=
+github.com/a8m/envsubst v1.4.2/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
+github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
pod.IpAddress = pod.CreatedPod.Status.PodIP
time.Sleep(time.Second * 1)
counter++
- if counter >= 10 {
+ if counter >= 40 {
Fail("Unable to get IP. Check if all pods are running. " + fmt.Sprint(err))
}
}
return s.AllPods[podName+s.Ppid]
}
-func (pod *Pod) CopyToPod(namespace string, src string, dst string) {
- cmd := exec.Command("kubectl", "--kubeconfig="+pod.suite.KubeconfigPath, "cp", src, namespace+"/"+pod.Name+":"+dst)
+func (pod *Pod) CopyToPod(src string, dst string) {
+ cmd := exec.Command("kubectl", "--kubeconfig="+pod.suite.KubeconfigPath, "cp", src, pod.Namespace+"/"+pod.Name+":"+dst)
out, err := cmd.CombinedOutput()
pod.suite.AssertNil(err, string(out))
}
err = f.Close()
pod.suite.AssertNil(err, err)
- pod.CopyToPod(pod.suite.Namespace, f.Name(), targetConfigName)
+ pod.CopyToPod(f.Name(), targetConfigName)
}
import (
"context"
"fmt"
+ "os"
+ "os/exec"
"reflect"
"regexp"
"runtime"
"strings"
. "fd.io/hs-test/infra/common"
+ "github.com/a8m/envsubst"
. "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
+ "github.com/joho/godotenv"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
CurrentlyRunning map[string]*Pod
images []string
AllPods map[string]*Pod
+ MainContext context.Context
Pods struct {
ServerGeneric *Pod
ClientGeneric *Pod
NginxProxy *Pod
Ab *Pod
}
- MainContext context.Context
}
+var imagesLoaded bool
var kindTests = map[string][]func(s *KindSuite){}
const VclConfIperf = "echo \"vcl {\n" +
func (s *KindSuite) SetupSuite() {
s.HstCommon.SetupSuite()
- RegisterFailHandler(func(message string, callerSkip ...int) {
- Fail(message, callerSkip...)
- })
s.CurrentlyRunning = make(map[string]*Pod)
s.LoadPodConfigs()
s.initPods()
- s.loadDockerImages()
- var err error
+ if !imagesLoaded {
+ s.loadDockerImages()
+ }
+
if *WhoAmI == "root" {
s.KubeconfigPath = "/.kube/config"
} else {
s.Log("User: '%s'", *WhoAmI)
s.Log("Config path: '%s'", s.KubeconfigPath)
+ var err error
s.Config, err = clientcmd.BuildConfigFromFlags("", s.KubeconfigPath)
s.AssertNil(err)
s.ClientSet, err = kubernetes.NewForConfig(s.Config)
s.AssertNil(err)
- s.createNamespace(s.Namespace)
+ if !imagesLoaded {
+ s.createNamespace(s.Namespace)
+ imagesLoaded = true
+ }
+}
+
+// sets CALICO_NETWORK_CONFIG, ADDITIONAL_VPP_CONFIG, env vars, applies configs and rollout restarts cluster
+func (s *KindSuite) SetMtuAndRestart(CALICO_NETWORK_CONFIG string, ADDITIONAL_VPP_CONFIG string) {
+ os.Setenv("CALICO_NETWORK_CONFIG", CALICO_NETWORK_CONFIG)
+ os.Setenv("ADDITIONAL_VPP_CONFIG", ADDITIONAL_VPP_CONFIG)
+ s.AssertNil(godotenv.Load("kubernetes/.vars"))
+
+ s.Envsubst("kubernetes/calico-config-template.yaml", "kubernetes/calico-config.yaml")
+
+ cmd := exec.Command("kubectl", "apply", "-f", "kubernetes/calico-config.yaml")
+ s.Log(cmd.String())
+ o, err := cmd.CombinedOutput()
+ s.Log(string(o))
+ s.AssertNil(err)
+
+ cmd = exec.Command("kubectl", "-n", "calico-vpp-dataplane", "rollout", "restart", "ds/calico-vpp-node")
+ s.Log(cmd.String())
+ o, err = cmd.CombinedOutput()
+ s.Log(string(o))
+ s.AssertNil(err)
+
+ cmd = exec.Command("kubectl", "-n", "calico-vpp-dataplane", "rollout", "status", "ds/calico-vpp-node")
+ s.Log(cmd.String())
+ o, err = cmd.CombinedOutput()
+ s.Log(string(o))
+ s.AssertNil(err)
+
+ cmd = exec.Command("kubectl", "-n", "calico-system", "rollout", "status", "ds/calico-node")
+ s.Log(cmd.String())
+ o, err = cmd.CombinedOutput()
+ s.Log(string(o))
+ s.AssertNil(err)
}
func (s *KindSuite) TeardownTest() {
s.Log("Removing:")
for _, pod := range s.CurrentlyRunning {
s.Log(" %s", pod.Name)
- s.deletePod(s.Namespace, pod.Name)
+ s.AssertNil(s.deletePod(s.Namespace, pod.Name))
}
}
}
// and searches for the first version string, then creates symlinks.
func (s *KindSuite) FixVersionNumber(pods ...*Pod) {
regex := regexp.MustCompile(`lib.*\.so\.([0-9]+\.[0-9]+)`)
- o, _ := s.Pods.ServerGeneric.Exec(context.TODO(), []string{"/bin/bash", "-c",
- "ldd /usr/lib/libvcl_ldpreload.so"})
- match := regex.FindStringSubmatch(o)
+ var match []string
+ for _, pod := range pods {
+ if strings.Contains(pod.Name, "generic") {
+ o, _ := pod.Exec(context.TODO(), []string{"/bin/bash", "-c",
+ "ldd /usr/lib/libvcl_ldpreload.so"})
+ match = regex.FindStringSubmatch(o)
+ break
+ }
+ }
if len(match) > 1 {
version := match[1]
)
}
+func (s *KindSuite) Envsubst(inputPath string, outputPath string) {
+ o, err := envsubst.ReadFile(inputPath)
+ s.AssertNil(err)
+ os.WriteFile(outputPath, o, 0644)
+}
+
func (s *KindSuite) CreateNginxProxyConfig(pod *Pod) {
pod.Exec(context.TODO(), []string{"/bin/bash", "-c", "mkdir -p /tmp/nginx"})
values := struct {
var s KindSuite
BeforeAll(func() {
s.SetupSuite()
+ s.SetMtuAndRestart("", "")
})
BeforeEach(func() {
s.SetupTest()
--- /dev/null
+package hst_kind
+
+import (
+ "reflect"
+ "runtime"
+ "strings"
+
+ . "fd.io/hs-test/infra/common"
+ . "github.com/onsi/ginkgo/v2"
+)
+
+type LargeMtuSuite struct {
+ KindSuite
+}
+
+var largeMtuTests = map[string][]func(s *LargeMtuSuite){}
+
+func RegisterLargeMtuTests(tests ...func(s *LargeMtuSuite)) {
+ largeMtuTests[GetTestFilename()] = tests
+}
+
+func (s *LargeMtuSuite) SetupSuite() {
+ s.KindSuite.SetupSuite()
+ s.SetMtuAndRestart("mtu: 9000", "tcp { mtu 8960 }")
+}
+
+var _ = Describe("LargeMtuSuite", Ordered, ContinueOnFailure, Label("Perf"), func() {
+ var s LargeMtuSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterEach(func() {
+ s.TeardownTest()
+ })
+ AfterAll(func() {
+ s.TeardownSuite()
+ })
+
+ for filename, tests := range largeMtuTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(TestTimeout))
+ }
+ }
+})
import (
"context"
"errors"
+ "fmt"
"time"
+ . "fd.io/hs-test/infra/common"
. "fd.io/hs-test/infra/kind"
. "github.com/onsi/ginkgo/v2"
)
func init() {
- RegisterKindTests(KindIperfVclTest, NginxRpsTest, NginxProxyMirroringTest)
+ RegisterKindTests(KindTcpIperfVclTest, KindUdpIperfVclTest, NginxRpsTest, NginxProxyMirroringTest)
+ RegisterLargeMtuTests(KindTcpIperfVclLargeMTUTest)
}
const vcl string = "VCL_CONFIG=/vcl.conf"
const ldp string = "LD_PRELOAD=/usr/lib/libvcl_ldpreload.so"
-func KindIperfVclTest(s *KindSuite) {
+func kindIperfVclTest(s *KindSuite, clientArgs string) IPerfResult {
s.DeployPod(s.Pods.ClientGeneric)
s.DeployPod(s.Pods.ServerGeneric)
ctx, cancel := context.WithTimeout(s.MainContext, time.Second*40)
s.FixVersionNumber(s.Pods.ClientGeneric, s.Pods.ServerGeneric)
+ iperfClientCmd := fmt.Sprintf("%s %s iperf3 %s -J -b 40g -c %s",
+ vcl, ldp, clientArgs, s.Pods.ServerGeneric.IpAddress)
+
o, err := s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c",
vcl + " " + ldp + " iperf3 -s -D -4 -B " + s.Pods.ServerGeneric.IpAddress})
s.AssertNil(err, o)
- o, err = s.Pods.ClientGeneric.Exec(ctx, []string{"/bin/bash", "-c",
- vcl + " " + ldp + " iperf3 -J -l 1460 -b 10g -c " + s.Pods.ServerGeneric.IpAddress})
+ o, err = s.Pods.ClientGeneric.Exec(ctx, []string{"/bin/bash", "-c", iperfClientCmd})
+
+ s.AssertNil(err, o)
+ result := s.ParseJsonIperfOutput([]byte(o))
+ s.LogJsonIperfOutput(result)
+ return result
+}
+// TODO: use interfaces to avoid duplicated code
+func kindIperfVclMtuTest(s *LargeMtuSuite, clientArgs string) IPerfResult {
+ s.DeployPod(s.Pods.ClientGeneric)
+ s.DeployPod(s.Pods.ServerGeneric)
+ ctx, cancel := context.WithTimeout(s.MainContext, time.Second*40)
+ defer cancel()
+
+ _, err := s.Pods.ClientGeneric.Exec(ctx, []string{"/bin/bash", "-c", VclConfIperf})
s.AssertNil(err)
+ _, err = s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c", VclConfIperf})
+ s.AssertNil(err)
+
+ s.FixVersionNumber(s.Pods.ClientGeneric, s.Pods.ServerGeneric)
+
+ iperfClientCmd := fmt.Sprintf("%s %s iperf3 %s -J -b 40g -c %s",
+ vcl, ldp, clientArgs, s.Pods.ServerGeneric.IpAddress)
+
+ o, err := s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c",
+ vcl + " " + ldp + " iperf3 -s -D -4 -B " + s.Pods.ServerGeneric.IpAddress})
+ s.AssertNil(err, o)
+ o, err = s.Pods.ClientGeneric.Exec(ctx, []string{"/bin/bash", "-c", iperfClientCmd})
+
+ s.AssertNil(err, o)
result := s.ParseJsonIperfOutput([]byte(o))
s.LogJsonIperfOutput(result)
- s.AssertIperfMinTransfer(result, 2000)
+ return result
+}
+
+func KindTcpIperfVclTest(s *KindSuite) {
+ s.AssertIperfMinTransfer(kindIperfVclTest(s, "-M 1460"), 2000)
+}
+
+func KindTcpIperfVclLargeMTUTest(s *LargeMtuSuite) {
+ s.AssertIperfMinTransfer(kindIperfVclMtuTest(s, "-M 8960"), 2000)
+}
+
+func KindUdpIperfVclTest(s *KindSuite) {
+ s.AssertIperfMinTransfer(kindIperfVclTest(s, "-l 1460 -u"), 2000)
}
func NginxRpsTest(s *KindSuite) {
---
apiVersion: v1
data:
+ CALICOVPP_BGP_LOG_LEVEL: ""
+ CALICOVPP_CONFIG_EXEC_TEMPLATE: ""
CALICOVPP_CONFIG_TEMPLATE: |-
- unix {
- nodaemon
- full-coredump
- cli-listen /var/run/vpp/cli.sock
- pidfile /run/vpp/vpp.pid
- exec /etc/vpp/startup.exec
- }
- api-trace { on }
- cpu {
- workers 0
- }
- socksvr {
- socket-name /var/run/vpp/vpp-api.sock
- }
- plugins {
- plugin default { enable }
- plugin dpdk_plugin.so { disable }
- plugin calico_plugin.so { enable }
- plugin ping_plugin.so { disable }
- plugin dispatch_trace_plugin.so { enable }
- }
- buffers {
- buffers-per-numa 131072
- }
- CALICOVPP_INITIAL_CONFIG: |-
+ unix {
+ nodaemon
+ full-coredump
+ log /var/run/vpp/vpp.log
+ cli-listen /var/run/vpp/cli.sock
+ pidfile /run/vpp/vpp.pid
+ }
+ buffers {
+ buffers-per-numa 131072
+ }
+ socksvr { socket-name /var/run/vpp/vpp-api.sock }
+ plugins {
+ plugin default { enable }
+ plugin calico_plugin.so { enable }
+ plugin dpdk_plugin.so { disable }
+ }
+ ${ADDITIONAL_VPP_CONFIG}
+ CALICOVPP_DEBUG: |-
{
- "vppStartupSleepSeconds": 1,
- "corePattern": "/var/lib/vpp/vppcore.%e.%p",
- "redirectToHostRules": [
- {
- "proto": "udp",
- "port": 53,
- "ip": "172.18.0.1"
- },
- {
- "proto": "tcp",
- "port": 53,
- "ip": "172.18.0.1"
+ "servicesEnabled": true,
+ "gsoEnabled": true
}
- ]
- }
CALICOVPP_FEATURE_GATES: |-
{
- "vclEnabled": true
- }
- CALICOVPP_INTERFACES: |-
+ "memifEnabled": false,
+ "vclEnabled": true,
+ "multinetEnabled": false,
+ "ipsecEnabled": false
+ }
+ CALICOVPP_INIT_SCRIPT_TEMPLATE: ""
+ CALICOVPP_INITIAL_CONFIG: |-
{
- "uplinkInterfaces": [
- {
- "interfaceName": "eth0",
- "vppDriver": "af_packet"
+ "vppStartupSleepSeconds": 0,
+ "corePattern": "/var/lib/vpp/vppcore.%e.%p",
+ "defaultGWs": "",
+ "redirectToHostRules": [
+ {
+ "proto": "udp",
+ "port": 53,
+ "ip": "172.18.0.1"
+ }
+ ]
}
- ]
- }
- SERVICE_PREFIX: 10.96.0.0/16
+ CALICOVPP_INTERFACES: |-
+ {
+ "uplinkInterfaces": [
+ {
+ "interfaceName": "eth0",
+ "vppDriver": "af_packet"
+ }
+ ]
+ }
+ CALICOVPP_IPSEC: |-
+ {}
+ CALICOVPP_IPSEC_IKEV2_PSK: "keykeykey"
+ CALICOVPP_LOG_FORMAT: pretty
+ CALICOVPP_LOG_LEVEL: ""
+ CALICOVPP_SRV6: |-
+ {}
+ CALICOVPP_SWAP_DRIVER: ""
+ DEBUG: ""
+ SERVICE_PREFIX: 11.96.0.0/12,fd10::0/120
kind: ConfigMap
metadata:
name: calico-vpp-config
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- - name: NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
+ - name: LD_LIBRARY_PATH
+ value: /repo/vpp-manager/vpp_build/build-root/install-vpp-native/vpp/
envFrom:
- configMapRef:
name: calico-vpp-config
- image: docker.io/calicovpp/agent:${CALICOVPP_VERSION}
- imagePullPolicy: IfNotPresent
- name: agent
+ image: localhost:5000/calicovpp/vpp:${CALICOVPP_VERSION}
+ imagePullPolicy: Always
+ name: vpp
resources:
+ limits:
+ memory: 80Gi
requests:
- cpu: 250m
+ cpu: 1
+ memory: 4Gi
securityContext:
privileged: true
volumeMounts:
- - mountPath: /var/run/calico
- name: var-run-calico
- readOnly: false
- - mountPath: /var/lib/calico/felix-plugins
- name: felix-plugins
- readOnly: false
+ - mountPath: /repo
+ name: repo-directory
+ - mountPath: /etc/ssl/certs/
+ name: ssl-certs
+ - mountPath: /usr/share/ca-certificates
+ name: share-certs
+ - mountPath: /lib/firmware
+ name: lib-firmware
- mountPath: /var/run/vpp
name: vpp-rundir
+ - mountPath: /var/lib/vpp
+ name: vpp-data
+ - mountPath: /etc/vpp
+ name: vpp-config
+ - mountPath: /dev
+ name: devices
+ - mountPath: /sys
+ name: hostsys
- mountPath: /run/netns/
mountPropagation: Bidirectional
name: netns
+ - mountPath: /host
+ name: host-root
- env:
- name: DATASTORE_TYPE
value: kubernetes
valueFrom:
fieldRef:
fieldPath: spec.nodeName
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
envFrom:
- configMapRef:
name: calico-vpp-config
- image: docker.io/calicovpp/vpp:${CALICOVPP_VERSION}
- imagePullPolicy: IfNotPresent
- name: vpp
+ image: localhost:5000/calicovpp/agent:${CALICOVPP_VERSION}
+ imagePullPolicy: Always
+ name: agent
resources:
requests:
- cpu: 500m
- memory: 512Mi
+ cpu: 250m
securityContext:
privileged: true
volumeMounts:
- - mountPath: /lib/firmware
- name: lib-firmware
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico/felix-plugins
+ name: felix-plugins
+ readOnly: false
- mountPath: /var/run/vpp
name: vpp-rundir
- - mountPath: /var/lib/vpp
- name: vpp-data
- - mountPath: /etc/vpp
- name: vpp-config
- - mountPath: /dev
- name: devices
- - mountPath: /sys
- name: hostsys
- mountPath: /run/netns/
mountPropagation: Bidirectional
name: netns
- - mountPath: /host
- name: host-root
hostNetwork: true
hostPID: true
nodeSelector:
- effect: NoExecute
operator: Exists
volumes:
+ - hostPath:
+ path: ${HOME}/vpp-dataplane
+ name: repo-directory
+ - hostPath:
+ path: /etc/ssl/certs/
+ name: ssl-certs
+ - hostPath:
+ path: /usr/share/ca-certificates
+ name: share-certs
- hostPath:
path: /lib/firmware
name: lib-firmware
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
+---
+apiVersion: operator.tigera.io/v1
+kind: APIServer
+metadata:
+ name: default
+spec: {}
+---
+apiVersion: operator.tigera.io/v1
+kind: Installation
+metadata:
+ name: default
+spec:
+ calicoNetwork:
+ ipPools:
+ - cidr: 11.0.0.0/24
+ encapsulation: IPIP
+ natOutgoing: Enabled
+ linuxDataplane: VPP
+ nodeAddressAutodetectionV4:
+ interface: eth0
+ nodeAddressAutodetectionV6:
+ interface: eth0
+ ${CALICO_NETWORK_CONFIG}
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"]
+ endpoint = ["http://kind-registry:5000"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://kind-registry:5000"]
networking:
disableDefaultCNI: true
- podSubnet: "11.0.0.0/16"
- serviceSubnet: "11.96.0.0/12"
+ podSubnet: "11.0.0.0/16,fd20::0/64"
+ serviceSubnet: "11.96.0.0/12,fd10::0/120"
+ ipFamily: dual
nodes:
-- role: control-plane
-- role: worker
-- role: worker
\ No newline at end of file
--- /dev/null
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: local-registry-hosting
+ namespace: kube-public
+data:
+ localRegistryHosting.v1: |
+ host: "localhost:5000"
+ help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
VPP_DIR=$(pwd)
VPP_DIR=${VPP_DIR%extras*}
COMMIT_HASH=$(git rev-parse HEAD)
+reg_name='kind-registry'
+reg_port='5000'
+export CALICO_NETWORK_CONFIG=${CALICO_NETWORK_CONFIG:-9000}
+export CALICOVPP_VERSION="${CALICOVPP_VERSION:-latest}"
+export TIGERA_VERSION="${TIGERA_VERSION:-v3.28.3}"
+echo "CALICOVPP_VERSION=$CALICOVPP_VERSION" > kubernetes/.vars
export DOCKER_BUILD_PROXY=$HTTP_PROXY
-# ---------------- images ----------------
-export CALICO_AGENT_IMAGE=localhost:5000/calicovpp/agent:latest
-export CALICO_VPP_IMAGE=localhost:5000/calicovpp/vpp:latest
-export MULTINET_MONITOR_IMAGE=localhost:5000/calicovpp/multinet-monitor:latest
-export IMAGE_PULL_POLICY=Always
-
-# ---------------- interfaces ----------------
-export CALICOVPP_INTERFACES='{
- "uplinkInterfaces": [
- {
- "interfaceName": "eth0",
- "vppDriver": "af_packet"
- }
- ]
- }'
-export CALICOVPP_DISABLE_HUGEPAGES=true
-export CALICOVPP_CONFIG_TEMPLATE="
- unix {
- nodaemon
- full-coredump
- log /var/run/vpp/vpp.log
- cli-listen /var/run/vpp/cli.sock
- pidfile /run/vpp/vpp.pid
- }
- buffers {
- buffers-per-numa 131072
- }
- socksvr { socket-name /var/run/vpp/vpp-api.sock }
- plugins {
- plugin default { enable }
- plugin calico_plugin.so { enable }
- plugin dpdk_plugin.so { disable }
- }"
-export CALICOVPP_ENABLE_VCL=true
+
+envsubst < kubernetes/calico-config-template.yaml > kubernetes/calico-config.yaml
+kind_config=$(cat kubernetes/kind-config.yaml)
+kind_config=$(cat <<EOF
+$kind_config
+- role: control-plane
+ extraMounts:
+ - hostPath: $HOME
+ containerPath: $HOME
+- role: worker
+ extraMounts:
+ - hostPath: $HOME
+ containerPath: $HOME
+- role: worker
+ extraMounts:
+ - hostPath: $HOME
+ containerPath: $HOME
+EOF
+)
+
+if [ "$(docker network inspect kind -f '{{ index .Options "com.docker.network.driver.mtu" }}')" -ne "9000" ]; then
+ echo "Deleting kind network"
+ docker network rm kind || true
+ echo "Creating custom kind network"
+ docker network create kind --driver bridge --opt com.docker.network.driver.mtu=9000 --opt com.docker.network.bridge.enable_ip_masquerade=true --ipv6
+fi
+
+# create registry
+if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
+ docker run \
+ -d --restart=always -p "127.0.0.1:${reg_port}" --name "${reg_name}" \
+ registry:2
+fi
+
+connect_registry() {
+ if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
+ docker network connect "kind" "${reg_name}"
+ fi
+}
help() {
echo "Usage:"
echo -e "\nTo shut down the cluster, use 'kind delete cluster'"
}
+push_release_to_registry() {
+ docker pull docker.io/calicovpp/vpp:$CALICOVPP_VERSION
+ docker image tag docker.io/calicovpp/vpp:$CALICOVPP_VERSION localhost:5000/calicovpp/vpp:$CALICOVPP_VERSION
+ docker push localhost:5000/calicovpp/vpp:$CALICOVPP_VERSION
+ docker pull docker.io/calicovpp/agent:$CALICOVPP_VERSION
+ docker image tag docker.io/calicovpp/agent:$CALICOVPP_VERSION localhost:5000/calicovpp/agent:$CALICOVPP_VERSION
+ docker push localhost:5000/calicovpp/agent:$CALICOVPP_VERSION
+ docker pull docker.io/calicovpp/multinet-monitor:$CALICOVPP_VERSION
+ docker image tag docker.io/calicovpp/multinet-monitor:$CALICOVPP_VERSION localhost:5000/calicovpp/multinet-monitor:$CALICOVPP_VERSION
+ docker push localhost:5000/calicovpp/multinet-monitor:$CALICOVPP_VERSION
+}
+
+push_master_to_registry() {
+ docker image tag calicovpp/vpp:latest localhost:5000/calicovpp/vpp:latest
+ docker push localhost:5000/calicovpp/vpp:latest
+ docker image tag calicovpp/agent:latest localhost:5000/calicovpp/agent:latest
+ docker push localhost:5000/calicovpp/agent:latest
+ docker image tag calicovpp/multinet-monitor:latest localhost:5000/calicovpp/multinet-monitor:latest
+ docker push localhost:5000/calicovpp/multinet-monitor:latest
+}
+
cherry_pick() {
STASHED_CHANGES=0
echo "checkpoint: $COMMIT_HASH"
make -C $VPP_DIR/extras/hs-test build-vpp-release
make -C $CALICOVPP_DIR dev-kind
make -C $CALICOVPP_DIR load-kind
- $CALICOVPP_DIR/yaml/overlays/dev/kustomize.sh up
+ kubectl create --save-config -f kubernetes/calico-config.yaml
}
restore_repo() {
else
cd $CALICOVPP_DIR
git pull
- cd $VPP_DIR
+ cd $VPP_DIR/extras/hs-test
fi
- make -C $CALICOVPP_DIR kind-new-cluster N_KIND_WORKERS=2
- kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.3/manifests/tigera-operator.yaml
+ echo -e "$kind_config" | kind create cluster --config=-
+ kubectl apply -f kubernetes/registry.yaml
+ connect_registry
+ push_master_to_registry
+ kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/$TIGERA_VERSION/manifests/tigera-operator.yaml
cherry_pick
build_load_start_cni
rebuild_master() {
echo "Shutting down pods may take some time, timeout is set to 1m."
- timeout 1m $CALICOVPP_DIR/yaml/overlays/dev/kustomize.sh dn || true
+ timeout 1m kubectl delete -f kubernetes/calico-config.yaml || true
cherry_pick
build_load_start_cni
restore_repo
}
setup_release() {
- export CALICOVPP_VERSION="${CALICOVPP_VERSION:-latest}"
- export TIGERA_VERSION="${TIGERA_VERSION:-v3.28.3}"
echo "CALICOVPP_VERSION=$CALICOVPP_VERSION"
echo "TIGERA_VERSION=$TIGERA_VERSION"
- envsubst < kubernetes/calico-config-template.yaml > kubernetes/calico-config.yaml
-
- kind create cluster --config kubernetes/kind-config.yaml
+ echo -e "$kind_config" | kind create cluster --config=-
+ kubectl apply -f kubernetes/registry.yaml
+ connect_registry
+ push_release_to_registry
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/$TIGERA_VERSION/manifests/tigera-operator.yaml
echo "Waiting for tigera-operator pod to start up."
kubectl -n tigera-operator wait --for=condition=Ready pod --all --timeout=1m
- kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/master/yaml/calico/installation-default.yaml
- kubectl create -f kubernetes/calico-config.yaml
+ kubectl create --save-config -f kubernetes/calico-config.yaml
echo "Done. Please wait for the cluster to come fully online before running tests."
echo "Use 'watch kubectl get pods -A' to monitor cluster status."