@echo " setup-cluster - setup KinD cluster for performance testing"
@echo " checkstyle-go - check style of .go source files"
@echo " fixstyle-go - format .go source files"
- @echo " cleanup-hst - stops and removes all docker contaiers and namespaces"
+ @echo " cleanup-hst - removes all docker containers and namespaces from last test run"
+ @echo " cleanup-perf - removes all kubernetes pods and namespaces from last test run"
@echo " list-tests - list all tests"
@echo
@echo "'make build' and 'make test' arguments:"
@echo "****************************"
@echo "Done."
@echo "****************************"
+
+.PHONY: cleanup-perf
+cleanup-perf:
+ @if [ ! -f ".last_hst_ppid" ]; then \
+ echo "'.last_hst_ppid' file does not exist."; \
+ exit 1; \
+ fi
+ @echo "****************************"
+ @echo "Removing kubernetes pods:"
+ @kubectl delete pods --all --grace-period=0 -n namespace$$(cat .last_hst_ppid)
+ @echo "****************************"
+ @echo "Removing kubernetes namespace:"
+ @kubectl delete namespace namespace$$(cat .last_hst_ppid)
+ @echo "****************************"
+ @echo "Done."
+ @echo "****************************"
)
const (
- logDir string = "/tmp/hs-test/"
+ LogDir string = "/tmp/hs-test/"
volumeDir string = "/volumes"
)
}
if _, ok := yamlInput["volumes"]; ok {
- workingVolumeDir := logDir + suite.GetCurrentTestName() + volumeDir
+ workingVolumeDir := LogDir + suite.GetCurrentTestName() + volumeDir
workDirReplacer := strings.NewReplacer("$HST_DIR", workDir)
volDirReplacer := strings.NewReplacer("$HST_VOLUME_DIR", workingVolumeDir)
for _, volu := range yamlInput["volumes"].([]interface{}) {
func (c *Container) CreateConfigFromTemplate(targetConfigName string, templateName string, values any) {
template := template.Must(template.ParseFiles(templateName))
- f, err := os.CreateTemp(logDir, "hst-config")
+ f, err := os.CreateTemp(LogDir, "hst-config")
c.Suite.AssertNil(err, err)
defer os.Remove(f.Name())
}
func init() {
- cmd := exec.Command("mkdir", "-p", logDir)
+ cmd := exec.Command("mkdir", "-p", LogDir)
if err := cmd.Run(); err != nil {
panic(err)
}
return s.Label
}
-func getTestFilename() string {
+func GetTestFilename() string {
_, filename, _, _ := runtime.Caller(2)
return filepath.Base(filename)
}
func (s *HstSuite) getLogDirPath() string {
testId := s.GetTestId()
testName := s.GetCurrentTestName()
- logDirPath := logDir + testName + "/" + testId + "/"
+ logDirPath := LogDir + testName + "/" + testId + "/"
cmd := exec.Command("mkdir", "-p", logDirPath)
if err := cmd.Run(); err != nil {
func (s *HstSuite) SetupKindSuite() {
s.CreateLogger()
s.Log("[* SUITE SETUP]")
- s.newDockerClient()
RegisterFailHandler(func(message string, callerSkip ...int) {
s.HstFail()
Fail(message, callerSkip...)
for _, elem := range yamlTopo.Volumes {
volumeMap := elem["volume"].(VolumeConfig)
hostDir := volumeMap["host-dir"].(string)
- workingVolumeDir := logDir + s.GetCurrentTestName() + volumeDir
+ workingVolumeDir := LogDir + s.GetCurrentTestName() + volumeDir
volDirReplacer := strings.NewReplacer("$HST_VOLUME_DIR", workingVolumeDir)
hostDir = volDirReplacer.Replace(hostDir)
s.Volumes = append(s.Volumes, hostDir)
--- /dev/null
+package hst
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func (s *KindSuite) CreateNamespace(name string) {
+ namespace := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: s.Namespace,
+ },
+ }
+
+ // Create the namespace in the cluster
+ _, err := s.ClientSet.CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{})
+ s.AssertNil(err)
+ s.Log("Namespace '%s' created", s.Namespace)
+}
+
+func (s *KindSuite) DeployServerClient(imageNameServer string, imageNameClient string, serverPod string, clientPod string) {
+ var err error
+ var counter uint8
+ var serverDetails *corev1.Pod
+ s.CurrentlyRunning = append(s.CurrentlyRunning, serverPod, clientPod)
+
+ server := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: s.Namespace,
+ Name: serverPod,
+ Labels: map[string]string{
+ "app": serverPod,
+ },
+ Annotations: map[string]string{
+ "cni.projectcalico.org/vppVcl": "enable",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "server",
+ Image: imageNameServer,
+ SecurityContext: &corev1.SecurityContext{
+ Privileged: boolPtr(true),
+ },
+ Command: []string{"tail", "-f", "/dev/null"},
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ Ports: []corev1.ContainerPort{
+ {
+ ContainerPort: 5201,
+ },
+ },
+ },
+ },
+ NodeName: "kind-worker",
+ },
+ }
+
+ // Create the Pod
+ _, err = s.ClientSet.CoreV1().Pods(s.Namespace).Create(context.TODO(), server, metav1.CreateOptions{})
+ s.AssertNil(err)
+ s.Log("Pod '%s' created", serverPod)
+
+ // Get IP
+ s.Log("Obtaining IP from '%s'", server.Name)
+ for s.ServerIp == "" {
+ serverDetails, err = s.ClientSet.CoreV1().Pods(s.Namespace).Get(context.TODO(), serverPod, metav1.GetOptions{})
+ s.ServerIp = serverDetails.Status.PodIP
+ time.Sleep(time.Second * 1)
+ counter++
+ if counter >= 10 {
+ Fail("Unable to get IP. Check if all pods are running. " + fmt.Sprint(err))
+ }
+ }
+
+ s.Log("IP: %s", s.ServerIp)
+
+ client := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: s.Namespace,
+ Name: clientPod,
+ Annotations: map[string]string{
+ "cni.projectcalico.org/vppVcl": "enable",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "client",
+ Image: imageNameClient,
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ Command: []string{"tail", "-f", "/dev/null"},
+ Ports: []corev1.ContainerPort{
+ {
+ ContainerPort: 5201,
+ },
+ },
+ SecurityContext: &corev1.SecurityContext{
+ Privileged: boolPtr(true),
+ },
+ },
+ },
+ NodeName: "kind-worker2",
+ },
+ }
+
+ _, err = s.ClientSet.CoreV1().Pods(s.Namespace).Create(context.TODO(), client, metav1.CreateOptions{})
+ s.AssertNil(err)
+ s.Log("Pod '%s' created", clientPod)
+
+ // let pods start properly
+ time.Sleep(time.Second * 5)
+}
--- /dev/null
+package hst
+
+import (
+ "context"
+ "errors"
+ "os"
+ "os/exec"
+ "reflect"
+ "runtime"
+ "strings"
+ "text/template"
+ "time"
+
+ . "fd.io/hs-test/infra"
+ . "github.com/onsi/ginkgo/v2"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+)
+
+type KindSuite struct {
+ HstSuite
+ ClientSet *kubernetes.Clientset
+ Config *rest.Config
+ ServerIp string
+ Namespace string
+ KubeconfigPath string
+ ImageNames
+ PodNames
+ ContainerNames
+}
+
+type ImageNames struct {
+ HstVpp string
+ Nginx string
+ Ab string
+}
+
+type PodNames struct {
+ ClientVpp string
+ ServerVpp string
+ Nginx string
+ Ab string
+ CurrentlyRunning []string
+}
+
+type ContainerNames struct {
+ Server string
+ Client string
+}
+
+var kindTests = map[string][]func(s *KindSuite){}
+
+func RegisterKindTests(tests ...func(s *KindSuite)) {
+ kindTests[GetTestFilename()] = tests
+}
+
+func deletePod(clientset *kubernetes.Clientset, namespace, podName string) error {
+ return clientset.CoreV1().Pods(namespace).Delete(context.TODO(), podName, metav1.DeleteOptions{GracePeriodSeconds: int64Ptr(0)})
+}
+
+func deleteNamespace(clientset *kubernetes.Clientset, namespace string) error {
+ return clientset.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
+}
+
+func (s *KindSuite) loadDockerImages() {
+ s.Log("This may take a while. If you encounter problems, " +
+ "try loading docker images manually: 'kind load docker-image [image]'")
+ value := reflect.ValueOf(s.ImageNames)
+ reflType := reflect.TypeOf(s.ImageNames)
+ var cmd *exec.Cmd
+ var out []byte
+ var err error
+
+ if reflType.Kind() == reflect.Struct {
+ for i := range value.NumField() {
+ if value.Field(i).Kind() == reflect.String {
+ fieldValue := value.Field(i).Interface().(string)
+ s.Log("loading docker image %s...", fieldValue)
+ cmd = exec.Command("kind", "load", "docker-image", fieldValue)
+ out, err = cmd.CombinedOutput()
+ s.Log(string(out))
+ s.AssertNil(err, string(out))
+ }
+ }
+ } else {
+ s.AssertNil(errors.New("not a struct"))
+ }
+}
+
+func (s *KindSuite) SetupSuite() {
+ s.SetupKindSuite()
+ s.ImageNames.Ab = "hs-test/ab:latest"
+ s.ImageNames.Nginx = "hs-test/nginx-ldp:latest"
+ s.ImageNames.HstVpp = "hs-test/vpp:latest"
+ s.PodNames.ServerVpp = "server" + s.Ppid
+ s.PodNames.ClientVpp = "client" + s.Ppid
+ s.PodNames.Nginx = "nginx-ldp" + s.Ppid
+ s.PodNames.Ab = "ab" + s.Ppid
+ s.Namespace = "namespace" + s.Ppid
+ s.ContainerNames.Client = "client"
+ s.ContainerNames.Server = "server"
+
+ s.loadDockerImages()
+
+ var err error
+ if *SudoUser == "root" {
+ s.KubeconfigPath = "/.kube/config"
+ } else {
+ s.KubeconfigPath = "/home/" + *SudoUser + "/.kube/config"
+ }
+
+ s.Config, err = clientcmd.BuildConfigFromFlags("", s.KubeconfigPath)
+ s.AssertNil(err)
+
+ s.ClientSet, err = kubernetes.NewForConfig(s.Config)
+ s.AssertNil(err)
+
+ s.CreateNamespace(s.Namespace)
+}
+
+func (s *KindSuite) TeardownTest() {
+ if *IsPersistent {
+ return
+ }
+ s.Log("[TEST TEARDOWN]")
+ s.ServerIp = ""
+ if len(s.CurrentlyRunning) != 0 {
+ for _, pod := range s.CurrentlyRunning {
+ s.Log(" %s", pod)
+ deletePod(s.ClientSet, s.Namespace, pod)
+ }
+ }
+}
+
+func (s *KindSuite) TeardownSuite() {
+ if *IsPersistent {
+ return
+ }
+ s.Log("[SUITE TEARDOWN]")
+ s.Log(" %s", s.Namespace)
+ s.AssertNil(deleteNamespace(s.ClientSet, s.Namespace))
+}
+
+func (s *KindSuite) CreateConfigFromTemplate(targetConfigName string, templateName string, values any) {
+ template := template.Must(template.ParseFiles(templateName))
+
+ f, err := os.CreateTemp(LogDir, "hst-config")
+ s.AssertNil(err, err)
+ defer os.Remove(f.Name())
+
+ err = template.Execute(f, values)
+ s.AssertNil(err, err)
+
+ err = f.Close()
+ s.AssertNil(err, err)
+
+ s.CopyToPod(s.PodNames.Nginx, s.Namespace, f.Name(), targetConfigName)
+}
+
+func (s *KindSuite) CreateNginxConfig() {
+ values := struct {
+ Workers uint8
+ }{
+ Workers: 1,
+ }
+ s.CreateConfigFromTemplate(
+ "/nginx.conf",
+ "./resources/nginx/nginx.conf",
+ values,
+ )
+}
+
+var _ = Describe("KindSuite", Ordered, ContinueOnFailure, Label("Perf"), func() {
+ var s KindSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ AfterEach(func() {
+ s.TeardownTest()
+ })
+ AfterAll(func() {
+ s.TeardownSuite()
+ })
+
+ for filename, tests := range kindTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(time.Minute*15))
+ }
+ }
+})
--- /dev/null
+package hst
+
+import (
+ "bytes"
+ "context"
+ "os/exec"
+ "time"
+
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+func (s *KindSuite) CopyToPod(podName string, namespace string, src string, dst string) {
+ cmd := exec.Command("kubectl", "--kubeconfig="+s.KubeconfigPath, "cp", src, namespace+"/"+podName+":"+dst)
+ out, err := cmd.CombinedOutput()
+ s.AssertNil(err, string(out))
+}
+
+func (s *KindSuite) Exec(podName string, containerName string, command []string) (string, error) {
+ var stdout, stderr bytes.Buffer
+
+ // Prepare the request
+ req := s.ClientSet.CoreV1().RESTClient().Post().
+ Resource("pods").
+ Name(podName).
+ Namespace(s.Namespace).
+ SubResource("exec").
+ Param("container", containerName).
+ Param("stdout", "true").
+ Param("stderr", "true").
+ Param("tty", "true")
+
+ for _, cmd := range command {
+ req = req.Param("command", cmd)
+ }
+ s.Log("%s: %s", podName, command)
+
+ executor, err := remotecommand.NewSPDYExecutor(s.Config, "POST", req.URL())
+ if err != nil {
+ s.Log("Error creating executor: %s", err.Error())
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 500*time.Second)
+ defer cancel()
+
+ err = executor.StreamWithContext(ctx, remotecommand.StreamOptions{
+ Stdout: &stdout,
+ Stderr: &stderr,
+ Tty: true,
+ })
+
+ output := stdout.String() + stderr.String()
+
+ if err != nil {
+ return output, err
+ }
+
+ return output, nil
+}
+
+// Alternative exec function. Use if regular Exec() doesn't work.
+func (s *KindSuite) ExecAlt(podName string, containerName string, namespace string, command []string) (string, error) {
+ baseCmd := []string{
+ "kubectl",
+ "--kubeconfig=" + s.KubeconfigPath,
+ "-n", namespace,
+ "exec",
+ podName,
+ "--",
+ }
+ fullCmd := append(baseCmd, command...)
+ cmd := exec.Command(fullCmd[0], fullCmd[1:]...)
+ s.Log(cmd)
+ out, err := cmd.CombinedOutput()
+
+ return string(out), err
+}
+
+func boolPtr(b bool) *bool {
+ return &b
+}
+
+func int64Ptr(integer int64) *int64 {
+ return &integer
+}
}
func RegisterCpuPinningTests(tests ...func(s *CpuPinningSuite)) {
- cpuPinningTests[getTestFilename()] = tests
+ cpuPinningTests[GetTestFilename()] = tests
}
func RegisterCpuPinningSoloTests(tests ...func(s *CpuPinningSuite)) {
- cpuPinningSoloTests[getTestFilename()] = tests
+ cpuPinningSoloTests[GetTestFilename()] = tests
}
func (s *CpuPinningSuite) SetupSuite() {
var envoyProxySoloTests = map[string][]func(s *EnvoyProxySuite){}
func RegisterEnvoyProxyTests(tests ...func(s *EnvoyProxySuite)) {
- envoyProxyTests[getTestFilename()] = tests
+ envoyProxyTests[GetTestFilename()] = tests
}
func RegisterEnvoyProxySoloTests(tests ...func(s *EnvoyProxySuite)) {
- envoyProxySoloTests[getTestFilename()] = tests
+ envoyProxySoloTests[GetTestFilename()] = tests
}
func (s *EnvoyProxySuite) SetupSuite() {
}
func RegisterH2Tests(tests ...func(s *H2Suite)) {
- h2Tests[getTestFilename()] = tests
+ h2Tests[GetTestFilename()] = tests
}
func (s *H2Suite) SetupSuite() {
var iperfSoloTests = map[string][]func(s *IperfSuite){}
func RegisterIperfTests(tests ...func(s *IperfSuite)) {
- iperfTests[getTestFilename()] = tests
+ iperfTests[GetTestFilename()] = tests
}
func RegisterIperfSoloTests(tests ...func(s *IperfSuite)) {
- iperfSoloTests[getTestFilename()] = tests
+ iperfSoloTests[GetTestFilename()] = tests
}
func (s *IperfSuite) SetupSuite() {
+++ /dev/null
-package hst
-
-import (
- "bytes"
- "context"
- "fmt"
- "reflect"
- "runtime"
- "strings"
- "time"
-
- . "github.com/onsi/ginkgo/v2"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/tools/clientcmd"
- "k8s.io/client-go/tools/remotecommand"
-)
-
-type KindSuite struct {
- HstSuite
- ClientSet *kubernetes.Clientset
- Config *rest.Config
- ClientName string
- ServerName string
- ServerIp string
- Namespace string
-}
-
-var kindTests = map[string][]func(s *KindSuite){}
-
-const imageName string = "hs-test/vpp:latest"
-
-func RegisterKindTests(tests ...func(s *KindSuite)) {
- kindTests[getTestFilename()] = tests
-}
-
-func boolPtr(b bool) *bool {
- return &b
-}
-
-func deletePod(clientset *kubernetes.Clientset, namespace, podName string) error {
- return clientset.CoreV1().Pods(namespace).Delete(context.TODO(), podName, metav1.DeleteOptions{})
-}
-
-func deleteNamespace(clientset *kubernetes.Clientset, namespace string) error {
- return clientset.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
-}
-
-func (s *KindSuite) SetupSuite() {
- s.SetupKindSuite()
-
- var err error
- var kubeconfig string
- if *SudoUser == "root" {
- kubeconfig = "/.kube/config"
- } else {
- kubeconfig = "/home/" + *SudoUser + "/.kube/config"
- }
- s.Log(kubeconfig)
- s.Config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
- s.AssertNil(err)
-
- s.ClientSet, err = kubernetes.NewForConfig(s.Config)
- s.AssertNil(err)
-
- s.Deploy()
-}
-
-// Deletes pods in a namespace. Lastly, deletes the namespace itself.
-func (s *KindSuite) Teardown(podNames ...string) {
- if *IsPersistent {
- return
- }
- s.Log("Teardown:")
- if len(podNames) != 0 {
- for _, pod := range podNames {
- s.Log(" %s", pod)
- deletePod(s.ClientSet, s.Namespace, pod)
- }
- }
-
- s.Log(" %s", s.Namespace)
- s.AssertNil(deleteNamespace(s.ClientSet, s.Namespace))
-}
-
-func (s *KindSuite) Exec(podName string, containerName string, namespace string, command []string) (string, error) {
- var stdout, stderr bytes.Buffer
-
- // Prepare the request
- req := s.ClientSet.CoreV1().RESTClient().Post().
- Resource("pods").
- Name(podName).
- Namespace(namespace).
- SubResource("exec").
- Param("container", containerName).
- Param("stdout", "true").
- Param("stderr", "true").
- Param("tty", "true")
-
- for _, cmd := range command {
- req = req.Param("command", cmd)
- }
- s.Log("%s: %s", podName, command)
-
- executor, err := remotecommand.NewSPDYExecutor(s.Config, "POST", req.URL())
- if err != nil {
- s.Log("Error creating executor: %s", err.Error())
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
- defer cancel()
-
- err = executor.StreamWithContext(ctx, remotecommand.StreamOptions{
- Stdout: &stdout,
- Stderr: &stderr,
- Tty: true,
- })
-
- output := stdout.String() + stderr.String()
-
- if err != nil {
- return output, err
- }
-
- return output, nil
-}
-
-func (s *KindSuite) Deploy() {
- var err error
- var counter uint8
- var serverDetails *corev1.Pod
- s.ServerName = "server"
- s.ClientName = "client"
- s.Namespace = "custom-namespace"
-
- namespace := &corev1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: s.Namespace,
- },
- }
-
- // Create the namespace in the cluster
- _, err = s.ClientSet.CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{})
- s.AssertNil(err)
- s.Log("Namespace '%s' created", s.Namespace)
-
- server := &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: s.Namespace,
- Name: s.ServerName,
- Labels: map[string]string{
- "app": s.ServerName,
- },
- Annotations: map[string]string{
- "cni.projectcalico.org/vppVcl": "enable",
- },
- },
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Name: s.ServerName,
- Image: imageName,
- SecurityContext: &corev1.SecurityContext{
- Privileged: boolPtr(true),
- },
- Command: []string{"tail", "-f", "/dev/null"},
- ImagePullPolicy: corev1.PullIfNotPresent,
- Ports: []corev1.ContainerPort{
- {
- ContainerPort: 5201,
- },
- },
- },
- },
- NodeName: "kind-worker",
- },
- }
-
- // Create the Pod
- _, err = s.ClientSet.CoreV1().Pods(s.Namespace).Create(context.TODO(), server, metav1.CreateOptions{})
- s.AssertNil(err)
- s.Log("Pod '%s' created", s.ServerName)
-
- // Get IP
- s.Log("Obtaining IP from '%s'", server.Name)
- for s.ServerIp == "" {
- serverDetails, err = s.ClientSet.CoreV1().Pods(s.Namespace).Get(context.TODO(), s.ServerName, metav1.GetOptions{})
- s.ServerIp = serverDetails.Status.PodIP
- time.Sleep(time.Second * 1)
- counter++
- if counter >= 10 {
- Fail("Unable to get IP. Check if all pods are running. " + fmt.Sprint(err))
- }
- }
-
- s.Log("IP: %s", s.ServerIp)
-
- client := &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: s.Namespace,
- Name: s.ClientName,
- Annotations: map[string]string{
- "cni.projectcalico.org/vppVcl": "enable",
- },
- },
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Name: s.ClientName,
- Image: imageName,
- ImagePullPolicy: corev1.PullIfNotPresent,
- Command: []string{"tail", "-f", "/dev/null"},
- Ports: []corev1.ContainerPort{
- {
- ContainerPort: 5201,
- },
- },
- SecurityContext: &corev1.SecurityContext{
- Privileged: boolPtr(true),
- },
- },
- },
- NodeName: "kind-worker2",
- },
- }
-
- _, err = s.ClientSet.CoreV1().Pods(s.Namespace).Create(context.TODO(), client, metav1.CreateOptions{})
- s.AssertNil(err)
- s.Log("Pod '%s' created", s.ClientName)
-
- // let pods start properly
- time.Sleep(time.Second * 5)
-}
-
-var _ = Describe("KindSuite", Ordered, ContinueOnFailure, Label("Perf"), func() {
- var s KindSuite
- BeforeAll(func() {
- s.SetupSuite()
- })
-
- AfterAll(func() {
- s.Teardown(s.ClientName, s.ServerName)
- })
-
- for filename, tests := range kindTests {
- for _, test := range tests {
- test := test
- pc := reflect.ValueOf(test).Pointer()
- funcValue := runtime.FuncForPC(pc)
- testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
- It(testName, func(ctx SpecContext) {
- s.Log(testName + ": BEGIN")
- test(&s)
- }, SpecTimeout(TestTimeout))
- }
- }
-})
}
func RegisterLdpTests(tests ...func(s *LdpSuite)) {
- ldpTests[getTestFilename()] = tests
+ ldpTests[GetTestFilename()] = tests
}
func RegisterSoloLdpTests(tests ...func(s *LdpSuite)) {
- ldpSoloTests[getTestFilename()] = tests
+ ldpSoloTests[GetTestFilename()] = tests
}
func (s *LdpSuite) SetupSuite() {
}
func RegisterNginxProxyTests(tests ...func(s *NginxProxySuite)) {
- nginxProxyTests[getTestFilename()] = tests
+ nginxProxyTests[GetTestFilename()] = tests
}
func RegisterNginxProxySoloTests(tests ...func(s *NginxProxySuite)) {
- nginxProxySoloTests[getTestFilename()] = tests
+ nginxProxySoloTests[GetTestFilename()] = tests
}
func (s *NginxProxySuite) SetupSuite() {
}
func RegisterNoTopoTests(tests ...func(s *NoTopoSuite)) {
- noTopoTests[getTestFilename()] = tests
+ noTopoTests[GetTestFilename()] = tests
}
func RegisterNoTopoSoloTests(tests ...func(s *NoTopoSuite)) {
- noTopoSoloTests[getTestFilename()] = tests
+ noTopoSoloTests[GetTestFilename()] = tests
}
func (s *NoTopoSuite) SetupSuite() {
}
func RegisterNoTopo6Tests(tests ...func(s *NoTopo6Suite)) {
- noTopo6Tests[getTestFilename()] = tests
+ noTopo6Tests[GetTestFilename()] = tests
}
func RegisterNoTopo6SoloTests(tests ...func(s *NoTopo6Suite)) {
- noTopo6SoloTests[getTestFilename()] = tests
+ noTopo6SoloTests[GetTestFilename()] = tests
}
func (s *NoTopo6Suite) SetupSuite() {
}
func RegisterVethTests(tests ...func(s *VethsSuite)) {
- vethTests[getTestFilename()] = tests
+ vethTests[GetTestFilename()] = tests
}
func RegisterSoloVethTests(tests ...func(s *VethsSuite)) {
- vethSoloTests[getTestFilename()] = tests
+ vethSoloTests[GetTestFilename()] = tests
}
func (s *VethsSuite) SetupSuite() {
}
func RegisterVeth6Tests(tests ...func(s *Veths6Suite)) {
- veth6Tests[getTestFilename()] = tests
+ veth6Tests[GetTestFilename()] = tests
}
func RegisterSoloVeth6Tests(tests ...func(s *Veths6Suite)) {
- veth6SoloTests[getTestFilename()] = tests
+ veth6SoloTests[GetTestFilename()] = tests
}
func (s *Veths6Suite) SetupSuite() {
var vppProxySoloTests = map[string][]func(s *VppProxySuite){}
func RegisterVppProxyTests(tests ...func(s *VppProxySuite)) {
- vppProxyTests[getTestFilename()] = tests
+ vppProxyTests[GetTestFilename()] = tests
}
func RegisterVppProxySoloTests(tests ...func(s *VppProxySuite)) {
- vppProxySoloTests[getTestFilename()] = tests
+ vppProxySoloTests[GetTestFilename()] = tests
}
func (s *VppProxySuite) SetupSuite() {
var vppUdpProxySoloTests = map[string][]func(s *VppUdpProxySuite){}
func RegisterVppUdpProxyTests(tests ...func(s *VppUdpProxySuite)) {
- vppUdpProxyTests[getTestFilename()] = tests
+ vppUdpProxyTests[GetTestFilename()] = tests
}
func RegisterVppUdpProxySoloTests(tests ...func(s *VppUdpProxySuite)) {
- vppUdpProxySoloTests[getTestFilename()] = tests
+ vppUdpProxySoloTests[GetTestFilename()] = tests
}
func (s *VppUdpProxySuite) SetupSuite() {
package main
import (
- . "fd.io/hs-test/infra"
+ "time"
+
+ . "fd.io/hs-test/infra/infra_kind"
+ . "github.com/onsi/ginkgo/v2"
)
func init() {
- RegisterKindTests(KindIperfVclTest)
+ RegisterKindTests(KindIperfVclTest, NginxRpsTest)
}
func KindIperfVclTest(s *KindSuite) {
+ s.DeployServerClient(s.ImageNames.HstVpp, s.ImageNames.HstVpp, s.PodNames.ServerVpp, s.PodNames.ClientVpp)
+
vclPath := "/vcl.conf"
ldpPath := "/usr/lib/libvcl_ldpreload.so"
"app-socket-api abstract:vpp/session\n" +
"}\" > /vcl.conf"
- s.Exec(s.ClientName, s.ClientName, s.Namespace, []string{"/bin/bash", "-c", symLink})
- s.Exec(s.ServerName, s.ServerName, s.Namespace, []string{"/bin/bash", "-c", symLink})
+ s.Exec(s.PodNames.ClientVpp, s.ContainerNames.Client, []string{"/bin/bash", "-c", symLink})
+ s.Exec(s.PodNames.ServerVpp, s.ContainerNames.Server, []string{"/bin/bash", "-c", symLink})
- _, err := s.Exec(s.ClientName, s.ClientName, s.Namespace, []string{"/bin/bash", "-c", vclConf})
+ _, err := s.Exec(s.PodNames.ClientVpp, s.ContainerNames.Client, []string{"/bin/bash", "-c", vclConf})
s.AssertNil(err)
- _, err = s.Exec(s.ServerName, s.ServerName, s.Namespace, []string{"/bin/bash", "-c", vclConf})
+ _, err = s.Exec(s.PodNames.ServerVpp, s.ContainerNames.Server, []string{"/bin/bash", "-c", vclConf})
s.AssertNil(err)
- _, err = s.Exec(s.ServerName, s.ServerName, s.Namespace, []string{"/bin/bash", "-c",
+ _, err = s.Exec(s.PodNames.ServerVpp, s.ContainerNames.Server, []string{"/bin/bash", "-c",
"VCL_CONFIG=" + vclPath + " LD_PRELOAD=" + ldpPath + " iperf3 -s -D -4"})
s.AssertNil(err)
- output, err := s.Exec(s.ClientName, s.ClientName, s.Namespace, []string{"/bin/bash", "-c",
+ output, err := s.Exec(s.PodNames.ClientVpp, s.ContainerNames.Client, []string{"/bin/bash", "-c",
"VCL_CONFIG=" + vclPath + " LD_PRELOAD=" + ldpPath + " iperf3 -c " + s.ServerIp})
s.Log(output)
s.AssertNil(err)
}
+
+func NginxRpsTest(s *KindSuite) {
+ s.DeployServerClient(s.ImageNames.Nginx, s.ImageNames.Ab, s.PodNames.Nginx, s.PodNames.Ab)
+ s.CreateNginxConfig()
+ vcl := "VCL_CONFIG=/vcl.conf"
+ ldp := "LD_PRELOAD=/usr/lib/libvcl_ldpreload.so"
+
+ // temporary workaround
+ symLink := "for file in /usr/lib/*.so; do\n" +
+ "if [ -e \"$file\" ]; then\n" +
+ "base=$(basename \"$file\")\n" +
+ "newlink=\"/usr/lib/${base}.25.06\"\n" +
+ "ln -s \"$file\" \"$newlink\"\n" +
+ "fi\n" +
+ "done"
+
+ vclConf := "echo \"vcl {\n" +
+ "heapsize 64M\n" +
+ "rx-fifo-size 4000000\n" +
+ "tx-fifo-size 4000000\n" +
+ "segment-size 4000000000\n" +
+ "add-segment-size 4000000000\n" +
+ "event-queue-size 100000\n" +
+ "use-mq-eventfd\n" +
+ "app-socket-api abstract:vpp/session\n" +
+ "}\" > /vcl.conf"
+
+ out, err := s.Exec(s.PodNames.Nginx, s.ContainerNames.Server, []string{"/bin/bash", "-c", symLink})
+ s.AssertNil(err, out)
+
+ out, err = s.Exec(s.PodNames.Nginx, s.ContainerNames.Server, []string{"/bin/bash", "-c", vclConf})
+ s.AssertNil(err, out)
+
+ go func() {
+ defer GinkgoRecover()
+ out, err := s.Exec(s.PodNames.Nginx, s.ContainerNames.Server, []string{"/bin/bash", "-c", ldp + " " + vcl + " nginx -c /nginx.conf"})
+ s.AssertNil(err, out)
+ }()
+
+ // wait for nginx to start up
+ time.Sleep(time.Second * 2)
+ out, err = s.Exec(s.PodNames.Ab, s.ContainerNames.Client, []string{"ab", "-k", "-r", "-n", "1000000", "-c", "1000", "http://" + s.ServerIp + ":80/64B.json"})
+ s.Log(out)
+ s.AssertNil(err)
+}
#!/usr/bin/env bash
set -e
+if [ "$EUID" -eq 0 ]; then
+ echo "********"
+ echo "Do not run as root. Exiting."
+ echo "********"
+ exit 1
+fi
+
echo "********"
echo "Performance tests only work on Ubuntu 22.04 for now."
echo "Do not run as root (untested) and make sure you have KinD and Kubectl installed!"
kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/master/yaml/calico/installation-default.yaml
kubectl create -f kubernetes/calico-config.yaml
-echo "Loading hs-test/vpp image."
-kind load docker-image hs-test/vpp:latest
echo "Done. Please wait for the cluster to come fully online before running tests."
echo "Use 'watch kubectl get pods -A' to monitor cluster status."
echo "To delete the cluster, use 'kind delete cluster'"
\ No newline at end of file