s.AssertNil(err)
// let vpp-dataplane recover, should help with stability issues
- s.Log("Waiting for 10 seconds")
- time.Sleep(time.Second * 10)
+ s.Log("Waiting for 20 seconds")
+ time.Sleep(time.Second * 20)
}
var imagesLoaded bool
var kubeTests = map[string][]func(s *KubeSuite){}
+var kubeMWTests = map[string][]func(s *KubeSuite){}
const VclConfIperf = "echo \"vcl {\n" +
"rx-fifo-size 4000000\n" +
func RegisterKubeTests(tests ...func(s *KubeSuite)) {
kubeTests[GetTestFilename()] = tests
}
+func RegisterKubeMWTests(tests ...func(s *KubeSuite)) {
+ kubeMWTests[GetTestFilename()] = tests
+}
func (s *KubeSuite) SetupTest() {
s.MainContext = context.Background()
)
}
-var _ = Describe("KubeSuite", Ordered, ContinueOnFailure, Label("Perf"), func() {
+var _ = Describe("KubeSuite", Ordered, ContinueOnFailure, func() {
var s KubeSuite
BeforeAll(func() {
s.SetupSuite()
- s.SetMtuAndRestart("", "")
+ s.SetMtuAndRestart("mtu: 0", "tcp { mtu 1460 }\n cpu { workers 0 }")
})
BeforeEach(func() {
s.SetupTest()
}
}
})
+
+var _ = Describe("KubeMWSuite", Ordered, ContinueOnFailure, Label("Multi-worker"), func() {
+ var s KubeSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ s.SetMtuAndRestart("mtu: 0", "tcp { mtu 1460 }\n cpu { workers 2 }")
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterEach(func() {
+ s.TeardownTest()
+ })
+ AfterAll(func() {
+ s.TeardownSuite()
+ })
+
+ for filename, tests := range kubeMWTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(TestTimeout))
+ }
+ }
+})
func (s *LargeMtuSuite) SetupSuite() {
s.KubeSuite.SetupSuite()
- s.SetMtuAndRestart("mtu: 9000", "tcp { mtu 8960 }")
+ s.SetMtuAndRestart("mtu: 0", "tcp { mtu 8960 }\n cpu { workers 0 }")
}
-var _ = Describe("LargeMtuSuite", Ordered, ContinueOnFailure, Label("Perf"), func() {
+var _ = Describe("LargeMtuSuite", Ordered, ContinueOnFailure, Label("Large MTU"), func() {
var s LargeMtuSuite
BeforeAll(func() {
s.SetupSuite()
func init() {
RegisterKubeTests(KubeTcpIperfVclTest, KubeUdpIperfVclTest, NginxRpsTest, NginxProxyMirroringTest)
+ RegisterKubeMWTests(KubeTcpIperfVclMWTest, KubeUdpIperfVclMWTest)
RegisterLargeMtuTests(KubeTcpIperfVclLargeMTUTest)
}
o, err := s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c",
vcl + " " + ldp + " iperf3 -s -D --logfile /iperf_server.log -B " + s.Pods.ServerGeneric.IpAddress})
- s.Log("Sleeping for 2s")
- time.Sleep(time.Second * 2)
+ s.Log("Sleeping for 5s")
+ time.Sleep(time.Second * 5)
+ s.AssertNil(err)
+ out, err := s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c", "pidof iperf3"})
+ s.Log(out)
+ s.AssertNil(err)
s.AssertNil(err, o)
o, err = s.Pods.ClientGeneric.Exec(ctx, []string{"/bin/bash", "-c", iperfClientCmd})
o, err := s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c",
vcl + " " + ldp + " iperf3 -s -D --logfile /iperf_server.log -B " + s.Pods.ServerGeneric.IpAddress})
- s.Log("Sleeping for 2s")
- time.Sleep(time.Second * 2)
+ s.Log("Sleeping for 5s")
+ time.Sleep(time.Second * 5)
+ s.AssertNil(err)
+ out, err := s.Pods.ServerGeneric.Exec(ctx, []string{"/bin/bash", "-c", "pidof iperf3"})
+ s.Log(out)
+ s.AssertNil(err)
s.AssertNil(err, o)
o, err = s.Pods.ClientGeneric.Exec(ctx, []string{"/bin/bash", "-c", iperfClientCmd})
s.AssertIperfMinTransfer(kubeIperfVclTest(s, "-l 1460 -u"), 2000)
}
+func KubeTcpIperfVclMWTest(s *KubeSuite) {
+ s.AssertIperfMinTransfer(kubeIperfVclTest(s, "-M 1460"), 200)
+}
+
+func KubeUdpIperfVclMWTest(s *KubeSuite) {
+ s.AssertIperfMinTransfer(kubeIperfVclTest(s, "-l 1460 -u"), 200)
+}
+
func NginxRpsTest(s *KubeSuite) {
ctx, cancel := context.WithTimeout(s.MainContext, time.Minute*3)
defer cancel()
CALICOVPP_BGP_LOG_LEVEL: ""
CALICOVPP_CONFIG_EXEC_TEMPLATE: ""
CALICOVPP_CONFIG_TEMPLATE: |-
- unix {
- nodaemon
- full-coredump
- log /var/run/vpp/vpp.log
- cli-listen /var/run/vpp/cli.sock
- pidfile /run/vpp/vpp.pid
- }
- buffers {
- buffers-per-numa 131072
- }
- socksvr { socket-name /var/run/vpp/vpp-api.sock }
- plugins {
- plugin default { enable }
- plugin calico_plugin.so { enable }
- plugin dpdk_plugin.so { disable }
- }
- ${ADDITIONAL_VPP_CONFIG}
+ unix {
+ nodaemon
+ full-coredump
+ log /var/run/vpp/vpp.log
+ cli-listen /var/run/vpp/cli.sock
+ pidfile /run/vpp/vpp.pid
+ }
+ socksvr {
+ socket-name /var/run/vpp/vpp-api.sock
+ }
+ buffers {
+ buffers-per-numa 16384
+ page-size 4K
+ }
+ plugins {
+ plugin default { enable }
+ plugin calico_plugin.so { enable }
+ plugin dpdk_plugin.so { disable }
+ }
+ ${ADDITIONAL_VPP_CONFIG}
CALICOVPP_DEBUG: |-
{
"servicesEnabled": true,
}
CALICOVPP_FEATURE_GATES: |-
{
- "memifEnabled": false,
- "vclEnabled": true,
- "multinetEnabled": false,
- "ipsecEnabled": false
- }
+ "prometheusEnabled": true,
+ "vclEnabled": true,
+ "memifEnabled": true
+ }
CALICOVPP_INIT_SCRIPT_TEMPLATE: ""
CALICOVPP_INITIAL_CONFIG: |-
{
}
CALICOVPP_INTERFACES: |-
{
- "uplinkInterfaces": [
- {
- "interfaceName": "eth0",
- "vppDriver": "af_packet"
- }
- ]
- }
+ "defaultPodIfSpec": {
+ "rx": 1,
+ "tx": 1,
+ "rxqsz": 128,
+ "txqsz": 128,
+ "isl3": true,
+ "rxMode": "interrupt"
+ },
+ "vppHostTapSpec": {
+ "rx": 1,
+ "tx": 1,
+ "rxqsz": 512,
+ "txqsz": 512,
+ "isl3": false,
+ "rxMode": "interrupt"
+ },
+ "uplinkInterfaces": [
+ {
+ "interfaceName": "eth0",
+ "vppDriver": "af_packet",
+ "rxMode": "interrupt"
+ }
+ ]
+ }
CALICOVPP_IPSEC: |-
{}
CALICOVPP_IPSEC_IKEV2_PSK: "keykeykey"
CALICOVPP_LOG_FORMAT: pretty
- CALICOVPP_LOG_LEVEL: ""
+ CALICOVPP_LOG_LEVEL: "debug"
CALICOVPP_SRV6: |-
{}
CALICOVPP_SWAP_DRIVER: ""
metadata:
name: default
spec:
+ registry: localhost:5000
calicoNetwork:
ipPools:
- cidr: 11.0.0.0/24
echo -e "\nTo shut down the cluster, use 'kind delete cluster'"
}
+push_calico_to_registry() {
+ for component in pod2daemon-flexvol cni node typha apiserver csi kube-controllers node-driver-registrar; do
+ docker pull docker.io/calico/$component:$TIGERA_VERSION
+ docker image tag docker.io/calico/$component:$TIGERA_VERSION localhost:5000/calico/$component:$TIGERA_VERSION
+ docker push localhost:5000/calico/$component:$TIGERA_VERSION
+ done
+}
+
push_release_to_registry() {
- docker pull docker.io/calicovpp/vpp:$CALICOVPP_VERSION
- docker image tag docker.io/calicovpp/vpp:$CALICOVPP_VERSION localhost:5000/calicovpp/vpp:$CALICOVPP_VERSION
- docker push localhost:5000/calicovpp/vpp:$CALICOVPP_VERSION
- docker pull docker.io/calicovpp/agent:$CALICOVPP_VERSION
- docker image tag docker.io/calicovpp/agent:$CALICOVPP_VERSION localhost:5000/calicovpp/agent:$CALICOVPP_VERSION
- docker push localhost:5000/calicovpp/agent:$CALICOVPP_VERSION
- docker pull docker.io/calicovpp/multinet-monitor:$CALICOVPP_VERSION
- docker image tag docker.io/calicovpp/multinet-monitor:$CALICOVPP_VERSION localhost:5000/calicovpp/multinet-monitor:$CALICOVPP_VERSION
- docker push localhost:5000/calicovpp/multinet-monitor:$CALICOVPP_VERSION
+ for component in vpp agent multinet-monitor; do
+ docker pull docker.io/calicovpp/$component:$CALICOVPP_VERSION
+ docker image tag docker.io/calicovpp/$component:$CALICOVPP_VERSION localhost:5000/calicovpp/$component:$CALICOVPP_VERSION
+ docker push localhost:5000/calicovpp/$component:$CALICOVPP_VERSION
+ done
}
push_master_to_registry() {
- docker image tag calicovpp/vpp:latest localhost:5000/calicovpp/vpp:latest
- docker push localhost:5000/calicovpp/vpp:latest
- docker image tag calicovpp/agent:latest localhost:5000/calicovpp/agent:latest
- docker push localhost:5000/calicovpp/agent:latest
- docker image tag calicovpp/multinet-monitor:latest localhost:5000/calicovpp/multinet-monitor:latest
- docker push localhost:5000/calicovpp/multinet-monitor:latest
+ for component in vpp agent multinet-monitor; do
+ docker pull docker.io/calicovpp/$component:latest
+ docker image tag docker.io/calicovpp/$component:latest localhost:5000/calicovpp/$component:latest
+ docker push localhost:5000/calicovpp/$component:latest
+ done
}
cherry_pick() {
git clone https://github.com/projectcalico/vpp-dataplane.git $CALICOVPP_DIR
else
cd $CALICOVPP_DIR
+ git reset --hard origin/master
git pull
cd $VPP_DIR/test-c/kube-test
fi
kubectl apply -f kubernetes/registry.yaml
connect_registry
push_master_to_registry
+ push_calico_to_registry
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/$TIGERA_VERSION/manifests/tigera-operator.yaml
cherry_pick
kubectl apply -f kubernetes/registry.yaml
connect_registry
push_release_to_registry
+ push_calico_to_registry
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/$TIGERA_VERSION/manifests/tigera-operator.yaml
echo "Waiting for tigera-operator pod to start up."
echo "To delete the cluster, use 'kind delete cluster'"
}
+red () { printf "\e[0;31m$1\e[0m\n" >&2 ; }
+
case "$COMMAND" in
master-cluster)
setup_master
help
;;
esac
+
+red "If ImagePullBackOff: add \"NO_PROXY=kind-registry\" and \"no_proxy=kind-registry\" to /etc/environment"