CSIT-748 vnf-agent integration 04/8504/38
authorPeter Mikus <pmikus@cisco.com>
Fri, 22 Sep 2017 12:59:40 +0000 (14:59 +0200)
committerPeter Mikus <pmikus@cisco.com>
Thu, 12 Oct 2017 08:39:16 +0000 (08:39 +0000)
CSIT-773 Implement RF keywords for k8s

- Implementation of Test Suite Setup for Ligato vnf-agent testing
- Implementation of KubernetesUtil for controlling kubectl
- Yaml templates for L2XC topology with 1cswitch and 1cnf
- Yaml templates for L2BD topology with 1cswitch and 1cnf
- ligato bootstrap script for creating vnf-agent image

Change-Id: Iebefde0eb984a27a0afcdf29fe549ca4edf8a61e
Signed-off-by: Peter Mikus <pmikus@cisco.com>
23 files changed:
LIGATO_REPO_URL [new file with mode: 0644]
LIGATO_STABLE_VER [new file with mode: 0644]
bootstrap-verify-perf-ligato.sh [new file with mode: 0644]
resources/libraries/bash/k8s_setup.sh [new file with mode: 0755]
resources/libraries/python/ContainerUtils.py
resources/libraries/python/InterfaceUtil.py
resources/libraries/python/KubernetesUtils.py [new file with mode: 0644]
resources/libraries/python/VppConfigGenerator.py
resources/libraries/python/constants.py
resources/libraries/robot/performance/performance_setup.robot
resources/libraries/robot/performance/performance_utils.robot
resources/libraries/robot/shared/default.robot
resources/templates/kubernetes/calico_v2.4.1.yaml [new file with mode: 0644]
resources/templates/kubernetes/csit.yaml [new file with mode: 0644]
resources/templates/kubernetes/etcd.yaml [new file with mode: 0644]
resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml [new file with mode: 0644]
resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml [new file with mode: 0644]
resources/templates/kubernetes/kafka.yaml [new file with mode: 0644]
resources/tools/scripts/topo_container_copy.py [new file with mode: 0644]
tests/ligato/perf/__init__.robot [new file with mode: 0644]
tests/ligato/perf/l2/10ge2p1x520-eth-l2bdbase-eth-2memif-1vnf-ndrpdrdisc.robot [new file with mode: 0644]
tests/ligato/perf/l2/10ge2p1x520-eth-l2xcbase-eth-2memif-1vnf-ndrpdrdisc.robot [new file with mode: 0644]
tests/vpp/perf/__init__.robot

diff --git a/LIGATO_REPO_URL b/LIGATO_REPO_URL
new file mode 100644 (file)
index 0000000..6b4ed1d
--- /dev/null
@@ -0,0 +1 @@
+https://github.com/ligato/
diff --git a/LIGATO_STABLE_VER b/LIGATO_STABLE_VER
new file mode 100644 (file)
index 0000000..ce15bd9
--- /dev/null
@@ -0,0 +1 @@
+8b61778490fa4a22df294b0682c13e39b8f51869
diff --git a/bootstrap-verify-perf-ligato.sh b/bootstrap-verify-perf-ligato.sh
new file mode 100644 (file)
index 0000000..7c4c6d3
--- /dev/null
@@ -0,0 +1,363 @@
+#!/bin/bash
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -xo pipefail
+
+# Space separated list of available testbeds, described by topology files
+TOPOLOGIES="topologies/available/lf_testbed1.yaml \
+            topologies/available/lf_testbed2.yaml \
+            topologies/available/lf_testbed3.yaml"
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+# Reservation dir
+RESERVATION_DIR="/tmp/reservation_dir"
+INSTALLATION_DIR="/tmp/install_dir"
+
+PYBOT_ARGS="-W 150 -L TRACE"
+
+ARCHIVE_ARTIFACTS=(log.html output.xml report.html output_perf_data.xml)
+
+LIGATO_REPO_URL=$(cat ${SCRIPT_DIR}/LIGATO_REPO_URL)
+LIGATO_STABLE_VER=$(cat ${SCRIPT_DIR}/LIGATO_STABLE_VER)
+VPP_COMMIT=$1
+VPP_BUILD=$1
+DOCKER_DEB="docker-ce_17.06.2~ce-0~ubuntu_amd64.deb"
+
+# Clone & checkout stable vnf-agent
+cd .. && git clone ${LIGATO_REPO_URL}/vpp-agent
+# If the git clone fails, complain clearly and exit
+if [ $? != 0 ]; then
+    echo "Failed to run: git clone --depth 1 ${LIGATO_REPO_URL}/vpp-agent"
+    exit 1
+fi
+cd vpp-agent && git checkout ${LIGATO_STABLE_VER}
+# If the git checkout fails, complain clearly and exit
+if [ $? != 0 ]; then
+    echo "Failed to run: git checkout ${LIGATO_STABLE_VER}"
+    exit 1
+fi
+
+# Install Docker
+wget -q https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/${DOCKER_DEB}
+sudo dpkg -i ${DOCKER_DEB}
+# If installation fails, complain clearly and exit
+if [ $? != 0 ]; then
+    echo "Failed to install Docker"
+    exit 1
+fi
+
+# Compile vnf-agent docker image
+cd ${SCRIPT_DIR}/../vpp-agent/docker/dev_vpp_agent/ &&\
+    ./build.sh --agent ${LIGATO_STABLE_VER} --vpp ${VPP_COMMIT} &&\
+    ./shrink.sh
+cd ${SCRIPT_DIR}/../vpp-agent/docker/prod_vpp_agent/ &&\
+    ./build.sh &&\
+    ./shrink.sh
+# Export Docker image
+sudo docker save prod_vpp_agent_shrink | gzip > prod_vpp_agent_shrink.tar.gz
+# If image build fails, complain clearly and exit
+if [ $? != 0 ]; then
+    echo "Failed to build vpp-agent Docker image."
+    exit 1
+fi
+DOCKER_IMAGE="$( readlink -f prod_vpp_agent_shrink.tar.gz | tr '\n' ' ' )"
+
+cd ${SCRIPT_DIR}
+
+sudo apt-get -y update
+sudo apt-get -y install libpython2.7-dev python-virtualenv
+
+WORKING_TOPOLOGY=""
+export PYTHONPATH=${SCRIPT_DIR}
+
+virtualenv --system-site-packages env
+. env/bin/activate
+
+echo pip install
+pip install -r requirements.txt
+
+# We iterate over available topologies and wait until we reserve topology
+while :; do
+    for TOPOLOGY in ${TOPOLOGIES};
+    do
+        python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -t ${TOPOLOGY}
+        if [ $? -eq 0 ]; then
+            WORKING_TOPOLOGY=${TOPOLOGY}
+            echo "Reserved: ${WORKING_TOPOLOGY}"
+            break
+        fi
+    done
+
+    if [ ! -z "${WORKING_TOPOLOGY}" ]; then
+        # Exit the infinite while loop if we made a reservation
+        break
+    fi
+
+    # Wait ~3minutes before next try
+    SLEEP_TIME=$[ ( $RANDOM % 20 ) + 180 ]s
+    echo "Sleeping ${SLEEP_TIME}"
+    sleep ${SLEEP_TIME}
+done
+
+function cancel_all {
+    python ${SCRIPT_DIR}/resources/tools/scripts/topo_container_copy.py -c -d ${INSTALLATION_DIR} -t $1
+    python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -c -t $1
+}
+
+# On script exit we cancel the reservation and installation and delete all vpp
+# packages
+trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
+
+python ${SCRIPT_DIR}/resources/tools/scripts/topo_container_copy.py\
+    -t ${WORKING_TOPOLOGY} -d ${INSTALLATION_DIR} -i ${DOCKER_IMAGE}
+if [ $? -eq 0 ]; then
+    echo "Docker image copied and loaded on hosts from: ${WORKING_TOPOLOGY}"
+else
+    echo "Failed to copy and load Docker image to DUTs"
+    exit 1
+fi
+
+case "$TEST_TAG" in
+    # run specific performance tests based on jenkins job type variable
+    PERFTEST_DAILY )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrdiscANDnic_intel-x520-da2AND1t1cORndrdiscANDnic_intel-x520-da2AND2t2c \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    PERFTEST_SEMI_WEEKLY )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrdiscANDnic_intel-x710AND1t1cORndrdiscANDnic_intel-x710AND2t2cORndrdiscANDnic_intel-xl710AND1t1cORndrdiscANDnic_intel-xl710AND2t2c \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VERIFY-PERF-NDRDISC )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrdiscAND1t1cORndrdiscAND2t2c \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VERIFY-PERF-PDRDISC )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include pdrdiscAND1t1cORpdrdiscAND2t2c \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VERIFY-PERF-NDRCHK )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrchkAND1t1cORndrchkAND2t2c \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    PERFTEST_NDRCHK_DAILY )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrchkAND1t1cORndrchkAND2t2c \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VERIFY-PERF-IP4 )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDip4baseORndrdiscANDnic_intel-x520-da2AND1t1cANDip4fwdANDfib_2m \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VERIFY-PERF-IP6 )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDip6baseORndrdiscANDnic_intel-x520-da2AND1t1cANDip6fwdANDfib_2m \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VERIFY-PERF-L2 )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDl2xcbaseORndrdiscANDnic_intel-x520-da2AND1t1cANDl2bdbase \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VERIFY-PERF-LISP )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDlisp \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VERIFY-PERF-VXLAN )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDvxlan \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VERIFY-PERF-VHOST )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDvhost \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VPP-VERIFY-PERF-IP4 )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include pdrchkANDnic_intel-x520-da2AND1t1cANDip4baseORpdrchkANDnic_intel-x520-da2AND1t1cANDip4fwdANDfib_2m \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VPP-VERIFY-PERF-IP6 )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include pdrchkANDnic_intel-x520-da2AND1t1cANDip6baseORpdrchkANDnic_intel-x520-da2AND1t1cANDip6fwdANDfib_2m \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VPP-VERIFY-PERF-L2 )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include pdrchkANDnic_intel-x520-da2AND1t1cANDl2xcbaseORpdrchkANDnic_intel-x520-da2AND1t1cANDl2bdbase \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VPP-VERIFY-PERF-LISP )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include pdrchkANDnic_intel-x520-da2AND1t1cANDlisp \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VPP-VERIFY-PERF-VXLAN )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include pdrchkANDnic_intel-x520-da2AND1t1cANDvxlan \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VPP-VERIFY-PERF-VHOST )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include pdrdiscANDnic_intel-x520-da2AND1t1cANDvhost \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    VPP-VERIFY-PERF-ACL )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --include pdrdiscANDnic_intel-x520-da2AND1t1cANDacl \
+              --include pdrdiscANDnic_intel-x520-da2AND2t2cANDacl \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    PERFTEST_LONG )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              --exclude SKIP_PATCH \
+              -i NDRPDRDISC \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    PERFTEST_SHORT )
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              -i NDRCHK \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    PERFTEST_NIGHTLY )
+        #run all available tests
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              tests/
+        RETURN_STATUS=$(echo $?)
+        ;;
+    * )
+        # run full performance test suite and exit on fail
+        pybot ${PYBOT_ARGS} \
+              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+              -v DPDK_TEST:True \
+              -s "tests.ligato.perf" \
+              tests/
+        RETURN_STATUS=$(echo $?)
+esac
+
+# Pybot output post-processing
+echo Post-processing test data...
+
+python ${SCRIPT_DIR}/resources/tools/scripts/robot_output_parser.py \
+       -i ${SCRIPT_DIR}/output.xml \
+       -o ${SCRIPT_DIR}/output_perf_data.xml \
+       -v ${VPP_BUILD}
+if [ ! $? -eq 0 ]; then
+    echo "Parsing ${SCRIPT_DIR}/output.xml failed"
+fi
+
+# Archive artifacts
+mkdir -p archive
+for i in ${ARCHIVE_ARTIFACTS[@]}; do
+    cp $( readlink -f ${i} | tr '\n' ' ' ) archive/
+done
+
+echo Post-processing finished.
+
+exit ${RETURN_STATUS}
diff --git a/resources/libraries/bash/k8s_setup.sh b/resources/libraries/bash/k8s_setup.sh
new file mode 100755 (executable)
index 0000000..0649c71
--- /dev/null
@@ -0,0 +1,54 @@
+#!/bin/bash
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -xo pipefail
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+K8S_CALICO="${SCRIPT_DIR}/../../templates/kubernetes/calico_v2.4.1.yaml"
+K8S_CSIT="${SCRIPT_DIR}/../../templates/kubernetes/csit.yaml"
+
+trap "sudo kubeadm reset && sudo rm -rf $HOME/.kube" ERR
+
+# Revert any changes made to this host by 'kubeadm init' or 'kubeadm join'
+sudo kubeadm reset && sudo rm -rf $HOME/.kube || \
+    { echo "Failed to reset kubeadm"; exit 1; }
+
+# Ret up the Kubernetes master
+sudo -E kubeadm init --token-ttl 0 --pod-network-cidr=192.168.0.0/16 || \
+    { echo "Failed to init kubeadm"; exit 1; }
+
+# Make cgroup non-exclusive for CPU and MEM
+sudo cgset -r cpuset.cpu_exclusive=0 /kubepods
+sudo cgset -r cpuset.mem_exclusive=0 /kubepods
+
+rm -rf $HOME/.kube
+mkdir -p $HOME/.kube
+sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+# Apply resources
+kubectl apply -f ${K8S_CALICO}  || \
+    { echo "Failed to apply Calico resources"; exit 1; }
+kubectl apply -f ${K8S_CSIT}  || \
+    { echo "Failed to apply CSIT resource"; exit 1; }
+
+# Update the taints
+kubectl taint nodes --all node-role.kubernetes.io/master- || \
+    { echo "Failed to taint nodes"; exit 1; }
+
+# Dump Kubernetes objects ...
+kubectl get all --all-namespaces
+
+echo Kubernetes is ready
index fb2695f..b56fb0d 100644 (file)
@@ -12,7 +12,7 @@
 # limitations under the License.
 
 # Bug workaround in pylint for abstract classes.
-#pylint: disable=W0223
+# pylint: disable=W0223
 
 """Library to manipulate Containers."""
 
@@ -93,11 +93,12 @@ class ContainerManager(object):
 
     def construct_containers(self, **kwargs):
         """Construct 1..N container(s) on node with specified name.
+
         Ordinal number is automatically added to the name of container as
         suffix.
 
-        :param kwargs: Name of container.
-        :param kwargs: str
+        :param kwargs: Named parameters.
+        :param kwargs: dict
         """
         name = kwargs['name']
         for i in range(kwargs['count']):
@@ -311,7 +312,6 @@ class ContainerEngine(object):
         # Create config instance
         vpp_config = VppConfigGenerator()
         vpp_config.set_node(self.container.node)
-        vpp_config.set_config_filename(config_filename)
         vpp_config.add_unix_cli_listen()
         vpp_config.add_unix_nodaemon()
         vpp_config.add_unix_exec('/tmp/running.exec')
@@ -326,15 +326,15 @@ class ContainerEngine(object):
         self.execute('mkdir -p /etc/vpp/')
         self.execute('echo "{c}" | tee {f}'
                      .format(c=vpp_config.get_config_str(),
-                             f=vpp_config.get_config_filename()))
+                             f=config_filename))
 
-    def create_vpp_exec_config(self, vat_template_file, **args):
+    def create_vpp_exec_config(self, vat_template_file, **kwargs):
         """Create VPP exec configuration on container.
 
         :param vat_template_file: File name of a VAT template script.
-        :param args: Parameters for VAT script.
+        :param kwargs: Parameters for VAT script.
         :type vat_template_file: str
-        :type args: dict
+        :type kwargs: dict
         """
         vat_file_path = '{p}/{f}'.format(p=Constants.RESOURCES_TPL_VAT,
                                          f=vat_template_file)
@@ -342,7 +342,7 @@ class ContainerEngine(object):
         with open(vat_file_path, 'r') as template_file:
             cmd_template = template_file.readlines()
             for line_tmpl in cmd_template:
-                vat_cmd = line_tmpl.format(**args)
+                vat_cmd = line_tmpl.format(**kwargs)
                 self.execute('echo "{c}" >> /tmp/running.exec'
                              .format(c=vat_cmd.replace('\n', '')))
 
@@ -354,6 +354,28 @@ class ContainerEngine(object):
         """Check if container is present."""
         raise NotImplementedError
 
+    def _configure_cgroup(self, name):
+        """Configure the control group associated with a container.
+
+        :param name: Name of cgroup.
+        :type name: str
+        :raises RuntimeError: If applying cgroup settings via cgset failed.
+        """
+        ret, _, _ = self.container.ssh.exec_command_sudo(
+            'cgcreate -g cpuset:/{name}'.format(name=name))
+        if int(ret) != 0:
+            raise RuntimeError('Failed to copy cgroup settings from root.')
+
+        ret, _, _ = self.container.ssh.exec_command_sudo(
+            'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
+        if int(ret) != 0:
+            raise RuntimeError('Failed to apply cgroup settings.')
+
+        ret, _, _ = self.container.ssh.exec_command_sudo(
+            'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
+        if int(ret) != 0:
+            raise RuntimeError('Failed to apply cgroup settings.')
+
 
 class LXC(ContainerEngine):
     """LXC implementation."""
@@ -363,8 +385,7 @@ class LXC(ContainerEngine):
         super(LXC, self).__init__()
 
     def acquire(self, force=True):
-        """Acquire a privileged system object where configuration is stored and
-        where user information can be stored.
+        """Acquire a privileged system object where configuration is stored.
 
         :param force: If a container exists, destroy it and create a new
         container.
@@ -398,6 +419,7 @@ class LXC(ContainerEngine):
             if int(ret) != 0:
                 raise RuntimeError('Failed to write {c.name} config.'
                                    .format(c=self.container))
+        self._configure_cgroup('lxc')
 
     def create(self):
         """Create/deploy an application inside a container on system.
@@ -415,13 +437,25 @@ class LXC(ContainerEngine):
             raise RuntimeError('Failed to start container {c.name}.'
                                .format(c=self.container))
         self._lxc_wait('RUNNING')
-        self._lxc_cgroup(state_object='cpuset.cpus',
-                         value=cpuset_cpus)
+
+        # Workaround for LXC to be able to allocate all cpus including isolated.
+        cmd = 'cgset --copy-from / lxc/'
+        ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+        if int(ret) != 0:
+            raise RuntimeError('Failed to copy cgroup to LXC')
+
+        cmd = 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'\
+            .format(c=self.container, cpus=cpuset_cpus)
+        ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+        if int(ret) != 0:
+            raise RuntimeError('Failed to set cpuset.cpus to container '
+                               '{c.name}.'.format(c=self.container))
 
     def execute(self, command):
-        """Start a process inside a running container. Runs the specified
-        command inside the container specified by name. The container has to
-        be running already.
+        """Start a process inside a running container.
+
+        Runs the specified command inside the container specified by name. The
+        container has to be running already.
 
         :param command: Command to run inside container.
         :type command: str
@@ -530,33 +564,6 @@ class LXC(ContainerEngine):
             raise RuntimeError('Failed to wait for state "{s}" of container '
                                '{c.name}.'.format(s=state, c=self.container))
 
-    def _lxc_cgroup(self, state_object, value=''):
-        """Manage the control group associated with a container.
-
-        :param state_object: Specify the state object name.
-        :param value: Specify the value to assign to the state object. If empty,
-        then action is GET, otherwise is action SET.
-        :type state_object: str
-        :type value: str
-        :raises RuntimeError: If getting/setting state of a container failed.
-        """
-        cmd = 'lxc-cgroup --name {c.name} {s} {v}'\
-            .format(c=self.container, s=state_object, v=value)
-
-        ret, _, _ = self.container.ssh.exec_command_sudo(
-            'cgset --copy-from / lxc')
-        if int(ret) != 0:
-            raise RuntimeError('Failed to copy cgroup settings from root.')
-
-        ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
-        if int(ret) != 0:
-            if value:
-                raise RuntimeError('Failed to set {s} of container {c.name}.'
-                                   .format(s=state_object, c=self.container))
-            else:
-                raise RuntimeError('Failed to get {s} of container {c.name}.'
-                                   .format(s=state_object, c=self.container))
-
 
 class Docker(ContainerEngine):
     """Docker implementation."""
@@ -584,6 +591,7 @@ class Docker(ContainerEngine):
         if int(ret) != 0:
             raise RuntimeError('Failed to create container {c.name}.'
                                .format(c=self.container))
+        self._configure_cgroup('docker')
 
     def create(self):
         """Create/deploy container.
@@ -613,7 +621,7 @@ class Docker(ContainerEngine):
 
         cmd = 'docker run '\
             '--privileged --detach --interactive --tty --rm '\
-            '--cgroup-parent lxc {cpuset_cpus} {cpuset_mems} {publish} '\
+            '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
             '{env} {volume} --name {container.name} {container.image} '\
             '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
                                container=self.container, command=command,
@@ -627,9 +635,10 @@ class Docker(ContainerEngine):
         self.info()
 
     def execute(self, command):
-        """Start a process inside a running container. Runs the specified
-        command inside the container specified by name. The container has to
-        be running already.
+        """Start a process inside a running container.
+
+        Runs the specified command inside the container specified by name. The
+        container has to be running already.
 
         :param command: Command to run inside container.
         :type command: str
@@ -731,12 +740,26 @@ class Container(object):
         pass
 
     def __getattr__(self, attr):
+        """Get attribute custom implementation.
+
+        :param attr: Attribute to get.
+        :type attr: str
+        :returns: Attribute value or None.
+        :rtype: any
+        """
         try:
             return self.__dict__[attr]
         except KeyError:
             return None
 
     def __setattr__(self, attr, value):
+        """Set attribute custom implementation.
+
+        :param attr: Attribute to set.
+        :param value: Value to set.
+        :type attr: str
+        :type value: any
+        """
         try:
             # Check if attribute exists
             self.__dict__[attr]
index 71d36c1..795bb52 100644 (file)
@@ -449,6 +449,47 @@ class InterfaceUtil(object):
         VatJsonUtil.update_vpp_interface_data_from_json(node,
                                                         interface_dump_json)
 
+    @staticmethod
+    def update_nic_interface_names(node):
+        """Update interface names based on nic type and PCI address.
+
+        This method updates interface names in the same format as VPP does.
+
+        :param node: Node dictionary.
+        :type node: dict
+        """
+        for ifc in node['interfaces'].values():
+            if_pci = ifc['pci_address'].replace('.', ':').split(':')
+            bus = '{:x}'.format(int(if_pci[1], 16))
+            dev = '{:x}'.format(int(if_pci[2], 16))
+            fun = '{:x}'.format(int(if_pci[3], 16))
+            loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun)
+            if ifc['model'] == 'Intel-XL710':
+                ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
+            elif ifc['model'] == 'Intel-X710':
+                ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
+            elif ifc['model'] == 'Intel-X520-DA2':
+                ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
+            elif ifc['model'] == 'Cisco-VIC-1385':
+                ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
+            elif ifc['model'] == 'Cisco-VIC-1227':
+                ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
+            else:
+                ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc)
+
+    @staticmethod
+    def update_nic_interface_names_on_all_duts(nodes):
+        """Update interface names based on nic type and PCI address on all DUTs.
+
+        This method updates interface names in the same format as VPP does.
+
+        :param nodes: Topology nodes.
+        :type nodes: dict
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                InterfaceUtil.update_nic_interface_names(node)
+
     @staticmethod
     def update_tg_interface_data_on_node(node):
         """Update interface name for TG/linux node in DICT__nodes.
diff --git a/resources/libraries/python/KubernetesUtils.py b/resources/libraries/python/KubernetesUtils.py
new file mode 100644 (file)
index 0000000..5faa056
--- /dev/null
@@ -0,0 +1,372 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Library to control Kubernetes kubectl."""
+
+import time
+import yaml
+
+from resources.libraries.python.constants import Constants
+from resources.libraries.python.topology import NodeType
+from resources.libraries.python.ssh import SSH
+from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
+
+__all__ = ["KubernetesUtils"]
+
+
+class KubernetesUtils(object):
+    """Kubernetes utilities class."""
+
+    def __init__(self):
+        """Initialize KubernetesUtils class."""
+        pass
+
+    @staticmethod
+    def setup_kubernetes_on_node(node):
+        """Set up Kubernetes on node.
+
+        :param node: DUT node.
+        :type node: dict
+        :raises RuntimeError: If Kubernetes setup failed on node.
+        """
+        ssh = SSH()
+        ssh.connect(node)
+
+        cmd = '{dir}/{lib}/k8s_setup.sh '.format(dir=Constants.REMOTE_FW_DIR,
+                                                 lib=Constants.RESOURCES_LIB_SH)
+        (ret_code, _, _) = ssh.exec_command(cmd, timeout=120)
+        if int(ret_code) != 0:
+            raise RuntimeError('Failed to setup Kubernetes on {node}.'
+                               .format(node=node['host']))
+
+    @staticmethod
+    def setup_kubernetes_on_all_duts(nodes):
+        """Set up Kubernetes on all DUTs.
+
+        :param nodes: Topology nodes.
+        :type nodes: dict
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                KubernetesUtils.setup_kubernetes_on_node(node)
+
+    @staticmethod
+    def apply_kubernetes_resource_on_node(node, yaml_file, **kwargs):
+        """Apply Kubernetes resource on node.
+
+        :param node: DUT node.
+        :param yaml_file: YAML configuration file.
+        :param kwargs: Key-value pairs to replace in YAML template.
+        :type node: dict
+        :type yaml_file: str
+        :type kwargs: dict
+        :raises RuntimeError: If applying Kubernetes template failed.
+        """
+        ssh = SSH()
+        ssh.connect(node)
+
+        stream = file('{tpl}/{yaml}'.format(tpl=Constants.RESOURCES_TPL_K8S,
+                                            yaml=yaml_file), 'r')
+
+        for data in yaml.load_all(stream):
+            data = reduce(lambda a, kv: a.replace(*kv), kwargs.iteritems(),
+                          yaml.dump(data, default_flow_style=False))
+            # Workaround to avoid using RAW string anotated with | in YAML as
+            # library + bash is misinterpreting spaces.
+            data = data.replace('.conf:\n', '.conf: |\n')
+            cmd = 'cat <<EOF | kubectl apply -f - \n{data}\nEOF'.format(
+                data=data)
+            (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+            if int(ret_code) != 0:
+                raise RuntimeError('Failed to apply Kubernetes template {yaml} '
+                                   'on {node}.'.format(yaml=yaml_file,
+                                                       node=node['host']))
+
+    @staticmethod
+    def apply_kubernetes_resource_on_all_duts(nodes, yaml_file, **kwargs):
+        """Apply Kubernetes resource on all DUTs.
+
+        :param nodes: Topology nodes.
+        :param yaml_file: YAML configuration file.
+        :param kwargs: Key-value pairs to replace in YAML template.
+        :type nodes: dict
+        :type yaml_file: str
+        :type kwargs: dict
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                KubernetesUtils.apply_kubernetes_resource_on_node(node,
+                                                                  yaml_file,
+                                                                  **kwargs)
+
+    @staticmethod
+    def create_kubernetes_cm_from_file_on_node(node, name, key, src_file):
+        """Create Kubernetes ConfigMap from file on node.
+
+        :param node: DUT node.
+        :param name: ConfigMap name.
+        :param key: Key (destination file).
+        :param src_file: Source file.
+        :type node: dict
+        :type name: str
+        :type key: str
+        :type src_file: str
+        :raises RuntimeError: If creating Kubernetes ConfigMap failed.
+        """
+        ssh = SSH()
+        ssh.connect(node)
+
+        cmd = 'kubectl create -n csit configmap {name} --from-file={key}='\
+            '{src_file}'.format(name=name, key=key, src_file=src_file)
+        (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+        if int(ret_code) != 0:
+            raise RuntimeError('Failed to create Kubernetes ConfigMap {name} '
+                               'on {node}.'.format(name=name,
+                                                   node=node['host']))
+
+    @staticmethod
+    def create_kubernetes_cm_from_file_on_all_duts(nodes, name, key, src_file):
+        """Create Kubernetes ConfigMap from file on all DUTs.
+
+        :param nodes: Topology nodes.
+        :param name: ConfigMap name.
+        :param key: Key (destination file).
+        :param src_file: Source file.
+        :type nodes: dict
+        :type name: str
+        :type key: str
+        :type src_file: str
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                KubernetesUtils.create_kubernetes_cm_from_file_on_node(node,
+                                                                       name,
+                                                                       key,
+                                                                       src_file)
+
+    @staticmethod
+    def delete_kubernetes_resource_on_node(node, rtype='po,cm', name=None):
+        """Delete Kubernetes resource on node.
+
+        :param node: DUT node.
+        :param rtype: Kubernetes resource type.
+        :param name: Name of resource.
+        :type node: dict
+        :type rtype: str
+        :type name: str
+        :raises RuntimeError: If deleting Kubernetes resource failed.
+        """
+        ssh = SSH()
+        ssh.connect(node)
+
+        name = '{name}'.format(name=name) if name else '--all'
+
+        cmd = 'kubectl delete -n csit {rtype} {name}'\
+            .format(rtype=rtype, name=name)
+        (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+        if int(ret_code) != 0:
+            raise RuntimeError('Failed to delete Kubernetes resources in CSIT '
+                               'namespace on {node}.'.format(node=node['host']))
+
+        cmd = 'kubectl get -n csit pods --no-headers'
+        for _ in range(24):
+            (ret_code, stdout, _) = ssh.exec_command_sudo(cmd, timeout=120)
+            if int(ret_code) == 0:
+                ready = True
+                for line in stdout.splitlines():
+                    if 'No resources found.' not in line:
+                        ready = False
+                if ready:
+                    break
+            time.sleep(5)
+        else:
+            raise RuntimeError('Failed to delete Kubernetes resources in CSIT '
+                               'namespace on {node}.'.format(node=node['host']))
+
+    @staticmethod
+    def delete_kubernetes_resource_on_all_duts(nodes, rtype='po,cm', name=None):
+        """Delete all Kubernetes resource on all DUTs.
+
+        :param nodes: Topology nodes.
+        :param rtype: Kubernetes resource type.
+        :param name: Name of resource.
+        :type nodes: dict
+        :type rtype: str
+        :type name: str
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                KubernetesUtils.delete_kubernetes_resource_on_node(node, rtype,
+                                                                   name)
+
+    @staticmethod
+    def describe_kubernetes_resource_on_node(node, rtype='po,cm'):
+        """Describe Kubernetes resource on node.
+
+        :param node: DUT node.
+        :param rtype: Kubernetes resource type.
+        :type node: dict
+        :type rtype: str
+        :raises RuntimeError: If describing Kubernetes resource failed.
+        """
+        ssh = SSH()
+        ssh.connect(node)
+
+        cmd = 'kubectl describe -n csit {rtype}'.format(rtype=rtype)
+        (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+        if int(ret_code) != 0:
+            raise RuntimeError('Failed to describe Kubernetes resource on '
+                               '{node}.'.format(node=node['host']))
+
+    @staticmethod
+    def describe_kubernetes_resource_on_all_duts(nodes, rtype='po,cm'):
+        """Describe Kubernetes resource on all DUTs.
+
+        :param nodes: Topology nodes.
+        :param rtype: Kubernetes resource type.
+        :type nodes: dict
+        :type rtype: str
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                KubernetesUtils.describe_kubernetes_resource_on_node(node,
+                                                                     rtype)
+
+    @staticmethod
+    def reset_kubernetes_on_node(node):
+        """Reset Kubernetes on node.
+
+        :param node: DUT node.
+        :type node: dict
+        :raises RuntimeError: If resetting Kubernetes failed.
+        """
+        ssh = SSH()
+        ssh.connect(node)
+
+        cmd = 'kubeadm reset && rm -rf $HOME/.kube'
+        (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+        if int(ret_code) != 0:
+            raise RuntimeError('Failed to reset Kubernetes on {node}.'
+                               .format(node=node['host']))
+
+    @staticmethod
+    def reset_kubernetes_on_all_duts(nodes):
+        """Reset Kubernetes on all DUTs.
+
+        :param nodes: Topology nodes.
+        :type nodes: dict
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                KubernetesUtils.reset_kubernetes_on_node(node)
+
+    @staticmethod
+    def wait_for_kubernetes_pods_on_node(node):
+        """Wait for Kubernetes PODs to become in 'Running' state on node.
+
+        :param node: DUT node.
+        :type node: dict
+        :raises RuntimeError: If Kubernetes PODs are not ready.
+        """
+        ssh = SSH()
+        ssh.connect(node)
+
+        cmd = 'kubectl get -n csit pods --no-headers'
+        for _ in range(48):
+            (ret_code, stdout, _) = ssh.exec_command_sudo(cmd, timeout=120)
+            if int(ret_code) == 0:
+                ready = True
+                for line in stdout.splitlines():
+                    if 'Running' not in line:
+                        ready = False
+                if ready:
+                    break
+            time.sleep(5)
+        else:
+            raise RuntimeError('Kubernetes PODs are not ready on {node}.'
+                               .format(node=node['host']))
+
+    @staticmethod
+    def wait_for_kubernetes_pods_on_all_duts(nodes):
+        """Wait for Kubernetes PODs to become in Running state on all DUTs.
+
+        :param nodes: Topology nodes.
+        :type nodes: dict
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                KubernetesUtils.wait_for_kubernetes_pods_on_node(node)
+
+    @staticmethod
+    def create_kubernetes_vswitch_startup_config(**kwargs):
+        """Create Kubernetes VSWITCH startup configuration.
+
+        :param kwargs: Key-value pairs used to create configuration.
+        :param kwargs: dict
+        """
+        cpuset_cpus = \
+            CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
+                                                cpu_node=kwargs['cpu_node'],
+                                                skip_cnt=kwargs['cpu_skip'],
+                                                cpu_cnt=kwargs['cpu_cnt'],
+                                                smt_used=kwargs['smt_used'])
+
+        # Create config instance
+        vpp_config = VppConfigGenerator()
+        vpp_config.set_node(kwargs['node'])
+        vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
+        vpp_config.add_unix_nodaemon()
+        vpp_config.add_dpdk_socketmem('1024,1024')
+        vpp_config.add_heapsize('3G')
+        vpp_config.add_ip6_hash_buckets('2000000')
+        vpp_config.add_ip6_heap_size('3G')
+        if kwargs['framesize'] < 1522:
+            vpp_config.add_dpdk_no_multi_seg()
+        vpp_config.add_dpdk_dev_default_rxq(kwargs['rxq'])
+        vpp_config.add_dpdk_dev(kwargs['if1'], kwargs['if2'])
+        # We will pop first core from list to be main core
+        vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
+        # if this is not only core in list, the rest will be used as workers.
+        if cpuset_cpus:
+            corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+            vpp_config.add_cpu_corelist_workers(corelist_workers)
+        vpp_config.apply_config(filename=kwargs['filename'], restart_vpp=False)
+
+    @staticmethod
+    def create_kubernetes_vnf_startup_config(**kwargs):
+        """Create Kubernetes VNF startup configuration.
+
+        :param kwargs: Key-value pairs used to create configuration.
+        :param kwargs: dict
+        """
+        cpuset_cpus = \
+            CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
+                                                cpu_node=kwargs['cpu_node'],
+                                                skip_cnt=kwargs['cpu_skip'],
+                                                cpu_cnt=kwargs['cpu_cnt'],
+                                                smt_used=kwargs['smt_used'])
+
+        # Create config instance
+        vpp_config = VppConfigGenerator()
+        vpp_config.set_node(kwargs['node'])
+        vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
+        vpp_config.add_unix_nodaemon()
+        # We will pop first core from list to be main core
+        vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
+        # if this is not only core in list, the rest will be used as workers.
+        if cpuset_cpus:
+            corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+            vpp_config.add_cpu_corelist_workers(corelist_workers)
+        vpp_config.add_plugin_disable('dpdk_plugin.so')
+        vpp_config.apply_config(filename=kwargs['filename'], restart_vpp=False)
index 7bd0175..6909649 100644 (file)
@@ -22,10 +22,12 @@ from resources.libraries.python.topology import Topology
 
 __all__ = ['VppConfigGenerator']
 
+
 class VppConfigGenerator(object):
     """VPP Configuration File Generator."""
 
     def __init__(self):
+        """Initialize library."""
         # VPP Node to apply configuration on
         self._node = ''
         # VPP Hostname
@@ -36,8 +38,6 @@ class VppConfigGenerator(object):
         self._vpp_config = ''
         # VPP Service name
         self._vpp_service_name = 'vpp'
-        # VPP Configuration file path
-        self._vpp_config_filename = '/etc/vpp/startup.conf'
 
     def set_node(self, node):
         """Set DUT node.
@@ -46,29 +46,12 @@ class VppConfigGenerator(object):
         :type node: dict
         :raises RuntimeError: If Node type is not DUT.
         """
-
         if node['type'] != NodeType.DUT:
             raise RuntimeError('Startup config can only be applied to DUT'
                                'node.')
         self._node = node
         self._hostname = Topology.get_node_hostname(node)
 
-    def set_config_filename(self, filename):
-        """Set startup configuration filename.
-
-        :param filename: Startup configuration filename.
-        :type filename: str
-        """
-        self._vpp_config_filename = filename
-
-    def get_config_filename(self):
-        """Get startup configuration filename.
-
-        :returns: Startup configuration filename.
-        :rtype: str
-        """
-        return self._vpp_config_filename
-
     def get_config_str(self):
         """Get dumped startup configuration in VPP config format.
 
@@ -88,11 +71,10 @@ class VppConfigGenerator(object):
         :type value: str
         :type path: list
         """
-
         if len(path) == 1:
             config[path[0]] = value
             return
-        if not config.has_key(path[0]):
+        if path[0] not in config:
             config[path[0]] = {}
         self.add_config_item(config[path[0]], value, path[1:])
 
@@ -100,9 +82,9 @@ class VppConfigGenerator(object):
         """Dump the startup configuration in VPP config format.
 
         :param obj: Python Object to print.
-        :param nested_level: Nested level for indentation.
+        :param level: Nested level for indentation.
         :type obj: Obj
-        :type nested_level: int
+        :type level: int
         :returns: nothing
         """
         indent = '  '
@@ -158,7 +140,6 @@ class VppConfigGenerator(object):
         :type devices: tuple
         :raises ValueError: If PCI address format is not valid.
         """
-
         pattern = re.compile("^[0-9A-Fa-f]{4}:[0-9A-Fa-f]{2}:"
                              "[0-9A-Fa-f]{2}\\.[0-9A-Fa-f]$")
         for device in devices:
@@ -219,7 +200,6 @@ class VppConfigGenerator(object):
         path = ['dpdk', 'dev default', 'num-tx-desc']
         self.add_config_item(self._nodeconfig, value, path)
 
-
     def add_dpdk_socketmem(self, value):
         """Add DPDK socket memory configuration.
 
@@ -312,16 +292,21 @@ class VppConfigGenerator(object):
         path = ['nat']
         self.add_config_item(self._nodeconfig, value, path)
 
-    def apply_config(self, waittime=5, retries=12):
+    def apply_config(self, filename='/etc/vpp/startup.conf', waittime=5,
+                     retries=12, restart_vpp=True):
         """Generate and apply VPP configuration for node.
 
         Use data from calls to this class to form a startup.conf file and
         replace /etc/vpp/startup.conf with it on node.
 
+        :param filename: Startup configuration file name.
         :param waittime: Time to wait for VPP to restart (default 5 seconds).
         :param retries: Number of times (default 12) to re-try waiting.
+        :param restart_vpp: Whether to restart VPP.
+        :type filename: str
         :type waittime: int
         :type retries: int
+        :type restart_vpp: bool.
         :raises RuntimeError: If writing config file failed, or restarting of
         VPP failed.
         """
@@ -330,39 +315,37 @@ class VppConfigGenerator(object):
         ssh = SSH()
         ssh.connect(self._node)
 
-        # We're using this "| sudo tee" construct because redirecting
-        # a sudo's output ("sudo echo xxx > /path/to/file") does not
-        # work on most platforms...
         (ret, _, _) = \
-            ssh.exec_command('echo "{0}" | sudo tee {1}'.
-                             format(self._vpp_config,
-                                    self._vpp_config_filename))
+            ssh.exec_command('echo "{config}" | sudo tee {filename}'.
+                             format(config=self._vpp_config,
+                                    filename=filename))
 
         if ret != 0:
             raise RuntimeError('Writing config file failed to node {}'.
                                format(self._hostname))
 
-        # Instead of restarting, we'll do separate start and stop
-        # actions. This way we don't care whether VPP was running
-        # to begin with.
-        ssh.exec_command('sudo service {} stop'
-                         .format(self._vpp_service_name))
-        (ret, _, _) = \
-            ssh.exec_command('sudo service {} start'
+        if restart_vpp:
+            # Instead of restarting, we'll do separate start and stop
+            # actions. This way we don't care whether VPP was running
+            # to begin with.
+            ssh.exec_command('sudo service {} stop'
                              .format(self._vpp_service_name))
-        if ret != 0:
-            raise RuntimeError('Restarting VPP failed on node {}'.
-                               format(self._hostname))
-
-        # Sleep <waittime> seconds, up to <retry> times,
-        # and verify if VPP is running.
-        for _ in range(retries):
-            time.sleep(waittime)
             (ret, _, _) = \
-                ssh.exec_command('echo show hardware-interfaces | '
-                                 'nc 0 5002 || echo "VPP not yet running"')
-            if ret == 0:
-                break
-        else:
-            raise RuntimeError('VPP failed to restart on node {}'.
-                               format(self._hostname))
+                ssh.exec_command('sudo service {} start'
+                                 .format(self._vpp_service_name))
+            if ret != 0:
+                raise RuntimeError('Restarting VPP failed on node {}'.
+                                   format(self._hostname))
+
+            # Sleep <waittime> seconds, up to <retry> times,
+            # and verify if VPP is running.
+            for _ in range(retries):
+                time.sleep(waittime)
+                (ret, _, _) = \
+                    ssh.exec_command('echo show hardware-interfaces | '
+                                     'nc 0 5002 || echo "VPP not yet running"')
+                if ret == 0:
+                    break
+            else:
+                raise RuntimeError('VPP failed to restart on node {}'.
+                                   format(self._hostname))
index a8d40a2..01a96a8 100644 (file)
@@ -35,6 +35,9 @@ class Constants(object):
     # QEMU install directory
     QEMU_INSTALL_DIR = '/opt/qemu-2.5.0'
 
+    # Kubernetes templates location
+    RESOURCES_TPL_K8S = 'resources/templates/kubernetes'
+
     # Honeycomb directory location at topology nodes:
     REMOTE_HC_DIR = '/opt/honeycomb'
 
index 0dc7f78..9713c22 100644 (file)
@@ -13,7 +13,6 @@
 
 *** Settings ***
 | Library | resources.libraries.python.DUTSetup
-| Library | resources.libraries.python.VhostUser
 | Resource | resources/libraries/robot/performance/performance_configuration.robot
 | Resource | resources/libraries/robot/performance/performance_utils.robot
 | Documentation | Performance suite keywords - Suite and test setups and
 | | ...
 | | [Arguments] | ${topology_type} | ${nic_model}
 | | ...
-| | Show vpp version on all DUTs | ${nodes}
 | | Set variables in 2-node circular topology with DUT interface model
 | | ... | ${nic_model}
 | | Initialize traffic generator | ${tg} | ${tg_if1} | ${tg_if2}
 | | [Arguments] | ${topology_type} | ${nic_model} | ${tg_if1_dest_mac}
 | | ... | ${tg_if2_dest_mac}
 | | ...
-| | Show vpp version on all DUTs | ${nodes}
 | | Set variables in 2-node circular topology with DUT interface model
 | | ... | ${nic_model}
 | | Initialize traffic generator | ${tg} | ${tg_if1} | ${tg_if2}
 | | ...
 | | [Arguments] | ${topology_type} | ${nic_model}
 | | ...
-| | Show vpp version on all DUTs | ${nodes}
 | | Set variables in 3-node circular topology with DUT interface model
 | | ... | ${nic_model}
 | | Initialize traffic generator | ${tg} | ${tg_if1} | ${tg_if2}
index 884bc48..6d6413d 100644 (file)
@@ -17,6 +17,7 @@
 | Library | resources.libraries.python.NodePath
 | Library | resources.libraries.python.DpdkUtil
 | Library | resources.libraries.python.InterfaceUtil
+| Library | resources.libraries.python.KubernetesUtils
 | Library | resources.libraries.python.VhostUser
 | Library | resources.libraries.python.TrafficGenerator
 | Library | resources.libraries.python.TrafficGenerator.TGDropRateSearchImpl
index 250380d..fa291bf 100644 (file)
 | | ... | \| Start VPP Service on DUT \| ${nodes['DUT1']} \|
 | | ...
 | | [Arguments] | ${node}
-| | Start VPP Service | ${node}
\ No newline at end of file
+| | Start VPP Service | ${node}
diff --git a/resources/templates/kubernetes/calico_v2.4.1.yaml b/resources/templates/kubernetes/calico_v2.4.1.yaml
new file mode 100644 (file)
index 0000000..921e692
--- /dev/null
@@ -0,0 +1,387 @@
+# Calico Version v2.4.1
+# https://docs.projectcalico.org/v2.4/releases#v2.4.1
+# This manifest includes the following component versions:
+#   calico/node:v2.4.1
+#   calico/cni:v1.10.0
+#   calico/kube-policy-controller:v0.7.0
+
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: calico-config
+  namespace: kube-system
+data:
+  # The location of your etcd cluster.  This uses the Service clusterIP
+  # defined below.
+  etcd_endpoints: "http://10.96.232.136:6666"
+
+  # Configure the Calico backend to use.
+  calico_backend: "bird"
+
+  # The CNI network configuration to install on each node.
+  cni_network_config: |-
+    {
+        "name": "k8s-pod-network",
+        "cniVersion": "0.1.0",
+        "type": "calico",
+        "etcd_endpoints": "__ETCD_ENDPOINTS__",
+        "log_level": "info",
+        "mtu": 1500,
+        "ipam": {
+            "type": "calico-ipam"
+        },
+        "policy": {
+            "type": "k8s",
+             "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
+             "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
+        },
+        "kubernetes": {
+            "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
+        }
+    }
+
+---
+
+# This manifest installs the Calico etcd on the kubeadm master.  This uses a DaemonSet
+# to force it to run on the master even when the master isn't schedulable, and uses
+# nodeSelector to ensure it only runs on the master.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: calico-etcd
+  namespace: kube-system
+  labels:
+    k8s-app: calico-etcd
+spec:
+  template:
+    metadata:
+      labels:
+        k8s-app: calico-etcd
+      annotations:
+        # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+        # reserves resources for critical add-on pods so that they can be rescheduled after
+        # a failure.  This annotation works in tandem with the toleration below.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Only run this pod on the master.
+      tolerations:
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+      # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+      # This, along with the annotation above marks this pod as a critical add-on.
+      - key: CriticalAddonsOnly
+        operator: Exists
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+      hostNetwork: true
+      containers:
+        - name: calico-etcd
+          image: quay.io/coreos/etcd:v3.1.10
+          env:
+            - name: CALICO_ETCD_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.podIP
+          command: ["/bin/sh","-c"]
+          args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
+          volumeMounts:
+            - name: var-etcd
+              mountPath: /var/etcd
+      volumes:
+        - name: var-etcd
+          hostPath:
+            path: /var/etcd
+
+---
+
+# This manifest installs the Service which gets traffic to the Calico
+# etcd.
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    k8s-app: calico-etcd
+  name: calico-etcd
+  namespace: kube-system
+spec:
+  # Select the calico-etcd pod running on the master.
+  selector:
+    k8s-app: calico-etcd
+  # This ClusterIP needs to be known in advance, since we cannot rely
+  # on DNS to get access to etcd.
+  clusterIP: 10.96.232.136
+  ports:
+    - port: 6666
+
+---
+
+# This manifest installs the calico/node container, as well
+# as the Calico CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+  name: calico-node
+  namespace: kube-system
+  labels:
+    k8s-app: calico-node
+spec:
+  selector:
+    matchLabels:
+      k8s-app: calico-node
+  template:
+    metadata:
+      labels:
+        k8s-app: calico-node
+      annotations:
+        # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+        # reserves resources for critical add-on pods so that they can be rescheduled after
+        # a failure.  This annotation works in tandem with the toleration below.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      hostNetwork: true
+      tolerations:
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+      # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+      # This, along with the annotation above marks this pod as a critical add-on.
+      - key: CriticalAddonsOnly
+        operator: Exists
+      serviceAccountName: calico-cni-plugin
+      containers:
+        # Runs calico/node container on each Kubernetes node.  This
+        # container programs network policy and routes on each
+        # host.
+        - name: calico-node
+          image: quay.io/calico/node:v2.4.1
+          env:
+            # The location of the Calico etcd cluster.
+            - name: ETCD_ENDPOINTS
+              valueFrom:
+                configMapKeyRef:
+                  name: calico-config
+                  key: etcd_endpoints
+            # Enable BGP.  Disable to enforce policy only.
+            - name: CALICO_NETWORKING_BACKEND
+              valueFrom:
+                configMapKeyRef:
+                  name: calico-config
+                  key: calico_backend
+            # Cluster type to identify the deployment type
+            - name: CLUSTER_TYPE
+              value: "kubeadm,bgp"
+            # Disable file logging so `kubectl logs` works.
+            - name: CALICO_DISABLE_FILE_LOGGING
+              value: "true"
+            # Set Felix endpoint to host default action to ACCEPT.
+            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+              value: "ACCEPT"
+            # Configure the IP Pool from which Pod IPs will be chosen.
+            - name: CALICO_IPV4POOL_CIDR
+              value: "192.168.0.0/16"
+            - name: CALICO_IPV4POOL_IPIP
+              value: "always"
+            # Disable IPv6 on Kubernetes.
+            - name: FELIX_IPV6SUPPORT
+              value: "false"
+            # Set MTU for tunnel device used if ipip is enabled
+            - name: FELIX_IPINIPMTU
+              value: "1440"
+            # Set Felix logging to "info"
+            - name: FELIX_LOGSEVERITYSCREEN
+              value: "info"
+            - name: FELIX_HEALTHENABLED
+              value: "true"
+            # Auto-detect the BGP IP address.
+            - name: IP
+              value: ""
+          securityContext:
+            privileged: true
+          resources:
+            requests:
+              cpu: 250m
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9099
+            periodSeconds: 10
+            initialDelaySeconds: 10
+            failureThreshold: 6
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9099
+            periodSeconds: 10
+          volumeMounts:
+            - mountPath: /lib/modules
+              name: lib-modules
+              readOnly: true
+            - mountPath: /var/run/calico
+              name: var-run-calico
+              readOnly: false
+        # This container installs the Calico CNI binaries
+        # and CNI network config file on each node.
+        - name: install-cni
+          image: quay.io/calico/cni:v1.10.0
+          command: ["/install-cni.sh"]
+          env:
+            # The location of the Calico etcd cluster.
+            - name: ETCD_ENDPOINTS
+              valueFrom:
+                configMapKeyRef:
+                  name: calico-config
+                  key: etcd_endpoints
+            # The CNI network config to install on each node.
+            - name: CNI_NETWORK_CONFIG
+              valueFrom:
+                configMapKeyRef:
+                  name: calico-config
+                  key: cni_network_config
+          volumeMounts:
+            - mountPath: /host/opt/cni/bin
+              name: cni-bin-dir
+            - mountPath: /host/etc/cni/net.d
+              name: cni-net-dir
+      volumes:
+        # Used by calico/node.
+        - name: lib-modules
+          hostPath:
+            path: /lib/modules
+        - name: var-run-calico
+          hostPath:
+            path: /var/run/calico
+        # Used to install CNI.
+        - name: cni-bin-dir
+          hostPath:
+            path: /opt/cni/bin
+        - name: cni-net-dir
+          hostPath:
+            path: /etc/cni/net.d
+
+---
+
+# This manifest deploys the Calico policy controller on Kubernetes.
+# See https://github.com/projectcalico/k8s-policy
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: calico-policy-controller
+  namespace: kube-system
+  labels:
+    k8s-app: calico-policy
+spec:
+  # The policy controller can only have a single active instance.
+  replicas: 1
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      name: calico-policy-controller
+      namespace: kube-system
+      labels:
+        k8s-app: calico-policy-controller
+      annotations:
+        # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+        # reserves resources for critical add-on pods so that they can be rescheduled after
+        # a failure.  This annotation works in tandem with the toleration below.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # The policy controller must run in the host network namespace so that
+      # it isn't governed by policy that would prevent it from working.
+      hostNetwork: true
+      tolerations:
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+      # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+      # This, along with the annotation above marks this pod as a critical add-on.
+      - key: CriticalAddonsOnly
+        operator: Exists
+      serviceAccountName: calico-policy-controller
+      containers:
+        - name: calico-policy-controller
+          image: quay.io/calico/kube-policy-controller:v0.7.0
+          env:
+            # The location of the Calico etcd cluster.
+            - name: ETCD_ENDPOINTS
+              valueFrom:
+                configMapKeyRef:
+                  name: calico-config
+                  key: etcd_endpoints
+            # The location of the Kubernetes API.  Use the default Kubernetes
+            # service for API access.
+            - name: K8S_API
+              value: "https://kubernetes.default:443"
+            # Since we're running in the host namespace and might not have KubeDNS
+            # access, configure the container's /etc/hosts to resolve
+            # kubernetes.default to the correct service clusterIP.
+            - name: CONFIGURE_ETC_HOSTS
+              value: "true"
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: calico-cni-plugin
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: calico-cni-plugin
+subjects:
+- kind: ServiceAccount
+  name: calico-cni-plugin
+  namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: calico-cni-plugin
+  namespace: kube-system
+rules:
+  - apiGroups: [""]
+    resources:
+      - pods
+      - nodes
+    verbs:
+      - get
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: calico-cni-plugin
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: calico-policy-controller
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: calico-policy-controller
+subjects:
+- kind: ServiceAccount
+  name: calico-policy-controller
+  namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: calico-policy-controller
+  namespace: kube-system
+rules:
+  - apiGroups:
+    - ""
+    - extensions
+    resources:
+      - pods
+      - namespaces
+      - networkpolicies
+    verbs:
+      - watch
+      - list
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: calico-policy-controller
+  namespace: kube-system
diff --git a/resources/templates/kubernetes/csit.yaml b/resources/templates/kubernetes/csit.yaml
new file mode 100644 (file)
index 0000000..4ae7206
--- /dev/null
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: csit
diff --git a/resources/templates/kubernetes/etcd.yaml b/resources/templates/kubernetes/etcd.yaml
new file mode 100644 (file)
index 0000000..66c1a57
--- /dev/null
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: etcdv3-server
+  namespace: csit
+spec:
+  hostNetwork: true
+  containers:
+    - image: quay.io/coreos/etcd:v3.0.16
+      name: etcdv3
+      command:
+        - /usr/local/bin/etcd
+        - --advertise-client-urls
+        - http://0.0.0.0:22379
+        - --listen-client-urls
+        - http://0.0.0.0:22379
+        - --listen-peer-urls
+        - http://0.0.0.0:22380
+      ports:
+        - containerPort: 22379
+          hostPort: 22379
+          name: serverport
+      env:
+        - name: ETCDCTL_API
+          value: "3"
diff --git a/resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml b/resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml
new file mode 100644 (file)
index 0000000..726e973
--- /dev/null
@@ -0,0 +1,206 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: sfc-controller-cfg
+  namespace: csit
+data:
+  etcd.conf:
+    insecure-transport: true
+    dial-timeout: 1000000000
+    endpoints:
+      - "172.17.0.1:22379"
+
+  sfc.conf:
+    sfc_controller_config_version: 1
+    description: $$TEST_NAME$$
+    host_entities:
+      - name: vswitch
+    sfc_entities:
+      - name: vswitch-vnf1
+        description: vswitch to VNF1 - memif
+        type: 3
+        elements:
+          - container: vswitch
+            port_label: $$VSWITCH_IF1$$
+            etcd_vpp_switch_key: vswitch
+            type: 5
+          - container: vnf1
+            port_label: port1
+            etcd_vpp_switch_key: vswitch
+            type: 2
+      - name: vnf1-vswitch
+        description: VNF1 to vswitch - memif
+        type: 3
+        elements:
+          - container: vswitch
+            port_label: $$VSWITCH_IF2$$
+            etcd_vpp_switch_key: vswitch
+            type: 5
+          - container: vnf1
+            port_label: port2
+            etcd_vpp_switch_key: vswitch
+            type: 2
+
+  vnf.conf:
+    vnf_plugin_config_version: 1
+    description: VNF config
+    vnf_entities:
+      - name: vnf1
+        container: vnf1
+        l2xconnects:
+          - port_labels:
+            - port1
+            - port2
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: sfc-controller
+  namespace: csit
+spec:
+  containers:
+    - name: "sfc-controller"
+      image: prod_sfc_controller
+      imagePullPolicy: IfNotPresent
+      command:
+        - /bin/sfc-controller
+        - -etcdv3-config=/opt/sfc-controller/dev/etcd.conf
+        - -sfc-config=/opt/sfc-controller/dev/sfc.conf
+        - -vnf-config=/opt/sfc-controller/dev/vnf.conf
+      volumeMounts:
+        - name: controller-config
+          mountPath: /opt/sfc-controller/dev
+  volumes:
+    - name: controller-config
+      configMap:
+        name: sfc-controller-cfg
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vswitch-agent-cfg
+  namespace: csit
+data:
+  etcd.conf:
+    insecure-transport: true
+    dial-timeout: 1000000000
+    endpoints:
+      - "172.17.0.1:22379"
+
+  kafka.conf:
+    addrs:
+      - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: vswitch-vpp
+  namespace: csit
+spec:
+  hostNetwork: true
+  containers:
+    - name: "vswitch"
+      image: prod_vpp_agent_shrink
+      imagePullPolicy: IfNotPresent
+      securityContext:
+        privileged: true
+      ports:
+        - containerPort: 5002
+        - containerPort: 9191
+      readinessProbe:
+        httpGet:
+          path: /readiness
+          port: 9191
+        periodSeconds: 1
+      livenessProbe:
+        httpGet:
+          path: /liveness
+          port: 9191
+        periodSeconds: 1
+        initialDelaySeconds: 15
+      env:
+        - name: MICROSERVICE_LABEL
+          value: vswitch
+      volumeMounts:
+        - name: vpp-config
+          mountPath: /etc/vpp
+        - name: agent-config
+          mountPath: /opt/vpp-agent/dev
+        - name: memif-sockets
+          mountPath: /tmp
+  volumes:
+    - name: vpp-config
+      configMap:
+        name: vswitch-vpp-cfg
+    - name: agent-config
+      configMap:
+        name: vswitch-agent-cfg
+    - name: memif-sockets
+      hostPath:
+        path: /tmp
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vnf-agent-cfg
+  namespace: csit
+data:
+  etcd.conf:
+    insecure-transport: true
+    dial-timeout: 1000000000
+    endpoints:
+      - "172.17.0.1:22379"
+
+  kafka.conf:
+    addrs:
+      - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: vnf-vpp
+  namespace: csit
+spec:
+  containers:
+    - name: "vnf"
+      image: prod_vpp_agent_shrink
+      imagePullPolicy: IfNotPresent
+      securityContext:
+        privileged: true
+      ports:
+        - containerPort: 5002
+        - containerPort: 9191
+      readinessProbe:
+        httpGet:
+          path: /readiness
+          port: 9191
+        periodSeconds: 1
+      livenessProbe:
+        httpGet:
+          path: /liveness
+          port: 9191
+        initialDelaySeconds: 15
+      env:
+        - name: MICROSERVICE_LABEL
+          value: vnf1
+      volumeMounts:
+        - name: vpp-config
+          mountPath: /etc/vpp
+        - name: agent-config
+          mountPath: /opt/vpp-agent/dev
+        - name: memif-sockets
+          mountPath: /tmp
+  volumes:
+  - name: vpp-config
+    configMap:
+      name: vnf-vpp-cfg
+  - name: agent-config
+    configMap:
+      name: vnf-agent-cfg
+  - name: memif-sockets
+    hostPath:
+      path: /tmp
diff --git a/resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml b/resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml
new file mode 100644 (file)
index 0000000..7514eeb
--- /dev/null
@@ -0,0 +1,206 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: sfc-controller-cfg
+  namespace: csit
+data:
+  etcd.conf:
+    insecure-transport: true
+    dial-timeout: 1000000000
+    endpoints:
+      - "172.17.0.1:22379"
+
+  sfc.conf:
+    sfc_controller_config_version: 1
+    description: $$TEST_NAME$$
+    host_entities:
+      - name: vswitch
+    sfc_entities:
+      - name: vswitch-vnf1
+        description: vswitch to VNF1 - memif
+        type: 4
+        elements:
+          - container: vswitch
+            port_label: $$VSWITCH_IF1$$
+            etcd_vpp_switch_key: vswitch
+            type: 5
+          - container: vnf1
+            port_label: port1
+            etcd_vpp_switch_key: vswitch
+            type: 2
+      - name: vnf1-vswitch
+        description: VNF1 to vswitch - memif
+        type: 4
+        elements:
+          - container: vswitch
+            port_label: $$VSWITCH_IF2$$
+            etcd_vpp_switch_key: vswitch
+            type: 5
+          - container: vnf1
+            port_label: port2
+            etcd_vpp_switch_key: vswitch
+            type: 2
+
+  vnf.conf:
+    vnf_plugin_config_version: 1
+    description: VNF config
+    vnf_entities:
+      - name: vnf1
+        container: vnf1
+        l2xconnects:
+          - port_labels:
+            - port1
+            - port2
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: sfc-controller
+  namespace: csit
+spec:
+  containers:
+    - name: "sfc-controller"
+      image: prod_sfc_controller
+      imagePullPolicy: IfNotPresent
+      command:
+        - /bin/sfc-controller
+        - -etcdv3-config=/opt/sfc-controller/dev/etcd.conf
+        - -sfc-config=/opt/sfc-controller/dev/sfc.conf
+        - -vnf-config=/opt/sfc-controller/dev/vnf.conf
+      volumeMounts:
+        - name: controller-config
+          mountPath: /opt/sfc-controller/dev
+  volumes:
+    - name: controller-config
+      configMap:
+        name: sfc-controller-cfg
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vswitch-agent-cfg
+  namespace: csit
+data:
+  etcd.conf:
+    insecure-transport: true
+    dial-timeout: 1000000000
+    endpoints:
+      - "172.17.0.1:22379"
+
+  kafka.conf:
+    addrs:
+      - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: vswitch-vpp
+  namespace: csit
+spec:
+  hostNetwork: true
+  containers:
+    - name: "vswitch"
+      image: prod_vpp_agent_shrink
+      imagePullPolicy: IfNotPresent
+      securityContext:
+        privileged: true
+      ports:
+        - containerPort: 5002
+        - containerPort: 9191
+      readinessProbe:
+        httpGet:
+          path: /readiness
+          port: 9191
+        periodSeconds: 1
+      livenessProbe:
+        httpGet:
+          path: /liveness
+          port: 9191
+        periodSeconds: 1
+        initialDelaySeconds: 15
+      env:
+        - name: MICROSERVICE_LABEL
+          value: vswitch
+      volumeMounts:
+        - name: vpp-config
+          mountPath: /etc/vpp
+        - name: agent-config
+          mountPath: /opt/vpp-agent/dev
+        - name: memif-sockets
+          mountPath: /tmp
+  volumes:
+    - name: vpp-config
+      configMap:
+        name: vswitch-vpp-cfg
+    - name: agent-config
+      configMap:
+        name: vswitch-agent-cfg
+    - name: memif-sockets
+      hostPath:
+        path: /tmp
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vnf-agent-cfg
+  namespace: csit
+data:
+  etcd.conf:
+    insecure-transport: true
+    dial-timeout: 1000000000
+    endpoints:
+      - "172.17.0.1:22379"
+
+  kafka.conf:
+    addrs:
+      - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: vnf-vpp
+  namespace: csit
+spec:
+  containers:
+    - name: "vnf"
+      image: prod_vpp_agent_shrink
+      imagePullPolicy: IfNotPresent
+      securityContext:
+        privileged: true
+      ports:
+        - containerPort: 5002
+        - containerPort: 9191
+      readinessProbe:
+        httpGet:
+          path: /readiness
+          port: 9191
+        periodSeconds: 1
+      livenessProbe:
+        httpGet:
+          path: /liveness
+          port: 9191
+        initialDelaySeconds: 15
+      env:
+        - name: MICROSERVICE_LABEL
+          value: vnf1
+      volumeMounts:
+        - name: vpp-config
+          mountPath: /etc/vpp
+        - name: agent-config
+          mountPath: /opt/vpp-agent/dev
+        - name: memif-sockets
+          mountPath: /tmp
+  volumes:
+  - name: vpp-config
+    configMap:
+      name: vnf-vpp-cfg
+  - name: agent-config
+    configMap:
+      name: vnf-agent-cfg
+  - name: memif-sockets
+    hostPath:
+      path: /tmp
diff --git a/resources/templates/kubernetes/kafka.yaml b/resources/templates/kubernetes/kafka.yaml
new file mode 100644 (file)
index 0000000..55d165f
--- /dev/null
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: kafka-server
+  namespace: csit
+spec:
+  hostNetwork: true
+  containers:
+    - image: spotify/kafka
+      name: kafka
+      ports:
+        - containerPort: 2181
+          hostPort: 2181
+          name: zookeeper
+        - containerPort: 9092
+          hostPort: 9092
+          name: kafka
+      env:
+        - name: ADVERTISED_HOST
+          value: "172.17.0.1"
+        - name: ADVERTISED_PORT
+          value: "9092"
diff --git a/resources/tools/scripts/topo_container_copy.py b/resources/tools/scripts/topo_container_copy.py
new file mode 100644 (file)
index 0000000..d243182
--- /dev/null
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This script provides copy and load of Docker container images.
+   As destinations are used all DUT nodes from the topology file."""
+
+import sys
+import argparse
+from yaml import load
+
+from resources.libraries.python.ssh import SSH
+
+
+def ssh_no_error(ssh, cmd, sudo=False):
+    """Execute a command over ssh channel, and log and exit if the command
+    fails.
+
+    :param ssh: SSH() object connected to a node.
+    :param cmd: Command line to execute on remote node.
+    :param sudo: Run command with sudo privileges.
+    :type ssh: SSH() object
+    :type cmd: str
+    :type sudo: bool
+    :returns: stdout from the SSH command.
+    :rtype: str
+    :raises RuntimeError: In case of unexpected ssh command failure
+    """
+    if sudo:
+        ret, stdo, stde = ssh.exec_command_sudo(cmd, timeout=60)
+    else:
+        ret, stdo, stde = ssh.exec_command(cmd, timeout=60)
+
+    if ret != 0:
+        print('Command execution failed: "{}"'.format(cmd))
+        print('stdout: {0}'.format(stdo))
+        print('stderr: {0}'.format(stde))
+        raise RuntimeError('Unexpected ssh command failure')
+
+    return stdo
+
+
+def ssh_ignore_error(ssh, cmd, sudo=False):
+    """Execute a command over ssh channel, ignore errors.
+
+    :param ssh: SSH() object connected to a node.
+    :param cmd: Command line to execute on remote node.
+    :param sudo: Run command with sudo privileges.
+    :type ssh: SSH() object
+    :type cmd: str
+    :type sudo: bool
+    :returns: stdout from the SSH command.
+    :rtype: str
+    """
+    if sudo:
+        ret, stdo, stde = ssh.exec_command_sudo(cmd)
+    else:
+        ret, stdo, stde = ssh.exec_command(cmd)
+
+    if ret != 0:
+        print('Command execution failed: "{}"'.format(cmd))
+        print('stdout: {0}'.format(stdo))
+        print('stderr: {0}'.format(stde))
+
+    return stdo
+
+
+def main():
+    """Copy and load of Docker image."""
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-t", "--topo", required=True,
+                        help="Topology file")
+    parser.add_argument("-d", "--directory", required=True,
+                        help="Destination directory")
+    parser.add_argument("-i", "--images", required=False, nargs='+',
+                        help="Images paths to copy")
+    parser.add_argument("-c", "--cancel", help="Cancel all",
+                        action="store_true")
+
+    args = parser.parse_args()
+    topology_file = args.topo
+    images = args.images
+    directory = args.directory
+    cancel_all = args.cancel
+
+    work_file = open(topology_file)
+    topology = load(work_file.read())['nodes']
+
+    ssh = SSH()
+    for node in topology:
+        if topology[node]['type'] == "DUT":
+            print("###TI host: {host}".format(host=topology[node]['host']))
+            ssh.connect(topology[node])
+
+            if cancel_all:
+                # Remove destination directory on DUT
+                cmd = "rm -r {directory}".format(directory=directory)
+                stdout = ssh_ignore_error(ssh, cmd)
+                print("###TI {stdout}".format(stdout=stdout))
+
+            else:
+                # Create installation directory on DUT
+                cmd = "rm -r {directory}; mkdir {directory}"\
+                    .format(directory=directory)
+                stdout = ssh_no_error(ssh, cmd)
+                print("###TI {stdout}".format(stdout=stdout))
+
+                # Copy images from local path to destination dir
+                for image in images:
+                    print("###TI scp: {}".format(image))
+                    ssh.scp(local_path=image, remote_path=directory)
+
+                # Load image to Docker.
+                cmd = "for f in {directory}/*.tar.gz; do zcat $f | "\
+                    "sudo docker load; done".format(directory=directory)
+                stdout = ssh_no_error(ssh, cmd)
+                print("###TI {}".format(stdout))
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/tests/ligato/perf/__init__.robot b/tests/ligato/perf/__init__.robot
new file mode 100644 (file)
index 0000000..b4b6557
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Variables | resources/libraries/python/topology.py
+| Library | resources.libraries.python.CpuUtils
+| Library | resources.libraries.python.DUTSetup
+| Library | resources.libraries.python.InterfaceUtil
+| Library | resources.libraries.python.KubernetesUtils
+| Library | resources.libraries.python.NodePath
+| Library | resources.libraries.python.SchedUtils
+| Library | resources.libraries.python.SetupFramework
+| Library | resources.libraries.python.topology.Topology
+| Library | Collections
+| Suite Setup | Run Keywords | Setup performance global Variables
+| ...         | AND          | Setup Framework | ${nodes}
+| ...         | AND          | Setup Kubernetes on all duts | ${nodes}
+| ...         | AND          | Get CPU Layout from all nodes | ${nodes}
+| ...         | AND          | Update all numa nodes | ${nodes}
+| ...                        | skip_tg=${True}
+| ...         | AND          | Update NIC interface names on all duts | ${nodes}
+| ...
+| Suite Teardown | Reset Kubernetes on all duts | ${nodes}
+| ...
+*** Keywords ***
+
+| Setup performance global Variables
+| | [Documentation]
+| | ... | Setup suite Variables. Variables are used across performance testing.
+| | ...
+| | ... | _NOTE:_ This KW sets following suite variables:
+| | ... | - perf_trial_duration - Duration of traffic run [s]
+| | ... | - perf_pdr_loss_acceptance - Loss acceptance treshold
+| | ... | - perf_pdr_loss_acceptance_type - Loss acceptance treshold type
+| | ...
+| | Set Global Variable | ${perf_trial_duration} | 10
+| | Set Global Variable | ${perf_pdr_loss_acceptance} | 0.5
+| | Set Global Variable | ${perf_pdr_loss_acceptance_type} | percentage
diff --git a/tests/ligato/perf/l2/10ge2p1x520-eth-l2bdbase-eth-2memif-1vnf-ndrpdrdisc.robot b/tests/ligato/perf/l2/10ge2p1x520-eth-l2bdbase-eth-2memif-1vnf-ndrpdrdisc.robot
new file mode 100644 (file)
index 0000000..277c3ab
--- /dev/null
@@ -0,0 +1,299 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Resource | resources/libraries/robot/performance/performance_setup.robot
+| ...
+| Force Tags | 3_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | NDRPDRDISC
+| ... | NIC_Intel-X520-DA2 | ETH | L2BDMACLRN | BASE | MEMIF
+| ... | KUBERNETES | 1VSWITCH | 1VNF | VPP_AGENT | SFC_CONTROLLER
+| ...
+| Suite Setup | Set up 3-node performance topology with DUT's NIC model
+| ... | L2 | Intel-X520-DA2
+| ...
+| Test Setup | Run Keywords
+| ... | Apply Kubernetes resource on all duts | ${nodes} | kafka.yaml
+| ... | AND | Apply Kubernetes resource on all duts | ${nodes} | etcd.yaml
+| ...
+| Suite Teardown | Tear down 3-node performance topology
+| ...
+| Test Teardown | Run Keywords
+| ... | Describe Kubernetes resource on all DUTs | ${nodes} | AND
+| ... | Delete Kubernetes resource on all duts | ${nodes}
+| ...
+| Documentation | *RFC2544: Pkt throughput L2XC test cases*
+| ...
+| ... | *[Top] Network Topologies:* TG-DUT1-DUT2-TG 3-node circular topology
+| ... | with single links between nodes.
+| ... | *[Enc] Packet Encapsulations:* Eth-IPv4 for L2 bridge domain.
+| ... | *[Cfg] DUT configuration:* DUT1 and DUT2 are configured with two L2
+| ... | bridge domains and MAC learning enabled. DUT1 and DUT2 tested with
+| ... | 2p10GE NIC X520 Niantic by Intel.
+| ... | VNF Container is connected to VSWITCH container via Memif interface. All
+| ... | containers is running same VPP version. Containers are deployed with
+| ... | Kubernetes. Configuration is applied by vnf-agent.
+| ... | *[Ver] TG verification:* TG finds and reports throughput NDR (Non Drop
+| ... | Rate) with zero packet loss tolerance or throughput PDR (Partial Drop
+| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage
+| ... | of packets transmitted. NDR and PDR are discovered for different
+| ... | Ethernet L2 frame sizes using either binary search or linear search
+| ... | algorithms with configured starting rate and final step that determines
+| ... | throughput measurement resolution. Test packets are generated by TG on
+| ... | links to DUTs. TG traffic profile contains two L3 flow-groups
+| ... | (flow-group per direction, 253 flows per flow-group) with all packets
+| ... | containing Ethernet header, IPv4 header with IP protocol=61 and static
+| ... | payload. MAC addresses are matching MAC addresses of the TG node
+| ... | interfaces.
+| ... | *[Ref] Applicable standard specifications:* RFC2544.
+
+*** Variables ***
+| ${avg_imix_framesize}= | ${357.833}
+# X520-DA2 bandwidth limit
+| ${s_limit} | ${10000000000}
+# Kubernetes profile
+| ${kubernetes_profile} | eth-l2xcbase-eth-2memif-1vnf
+# Traffic profile:
+| ${traffic_profile} | trex-sl-3n-ethip4-ip4src254
+# CPU settings
+| ${system_cpus}= | ${1}
+| ${vswitch_cpus}= | ${5}
+| ${vnf_cpus}= | ${2}
+
+*** Keywords ***
+| Create Kubernetes VSWITCH startup config on all DUTs
+| | [Documentation] | Create base startup configuration of VSWITCH in Kubernetes
+| | ... | deploy to all DUTs.
+| | ...
+| | ... | *Arguments:*
+| | ... | - ${framesize} - L2 framesize. Type: integer
+| | ... | - ${wt} - Worker threads. Type: integer
+| | ... | - ${rxq} - RX queues. Type: integer
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create Kubernetes VSWITCH startup config on all DUTs \| ${64} \
+| | ... | \| ${1} \| ${1}
+| | ...
+| | [Arguments] | ${framesize} | ${wt} | ${rxq}
+| | ${dut1_numa}= | Get interfaces numa node | ${dut1}
+| | ... | ${dut1_if1} | ${dut1_if2}
+| | ${dut2_numa}= | Get interfaces numa node | ${dut2}
+| | ... | ${dut2_if1} | ${dut2_if2}
+| | ${dut1_if1_pci}= | Get Interface PCI Addr | ${dut1} | ${dut1_if1}
+| | ${dut1_if2_pci}= | Get Interface PCI Addr | ${dut1} | ${dut1_if2}
+| | ${dut2_if1_pci}= | Get Interface PCI Addr | ${dut2} | ${dut2_if1}
+| | ${dut2_if2_pci}= | Get Interface PCI Addr | ${dut2} | ${dut2_if2}
+| | ${cpu_cnt}= | Evaluate | ${wt}+1
+| | ${config}= | Run keyword | Create Kubernetes VSWITCH startup config
+| | ... | node=${dut1} | cpu_cnt=${cpu_cnt} | cpu_node=${dut1_numa}
+| | ... | cpu_skip=${system_cpus} | smt_used=${False}
+| | ... | filename=/tmp/vswitch.conf | framesize=${framesize} | rxq=${rxq}
+| | ... | if1=${dut1_if1_pci} | if2=${dut1_if2_pci}
+| | ${config}= | Run keyword | Create Kubernetes VSWITCH startup config
+| | ... | node=${dut2} | cpu_cnt=${cpu_cnt} | cpu_node=${dut2_numa}
+| | ... | cpu_skip=${system_cpus} | smt_used=${False}
+| | ... | filename=/tmp/vswitch.conf | framesize=${framesize} | rxq=${rxq}
+| | ... | if1=${dut2_if1_pci} | if2=${dut2_if2_pci}
+
+| Create Kubernetes VNF startup config on all DUTs
+| | [Documentation] | Create base startup configuration of VNF in Kubernetes
+| | ... | deploy to all DUTs.
+| | ...
+| | ${cpu_skip}= | Evaluate | ${vswitch_cpus}+${system_cpus}
+| | ${dut1_numa}= | Get interfaces numa node | ${dut1}
+| | ... | ${dut1_if1} | ${dut1_if2}
+| | ${dut2_numa}= | Get interfaces numa node | ${dut2}
+| | ... | ${dut2_if1} | ${dut2_if2}
+| | ${config}= | Run keyword | Create Kubernetes VNF startup config
+| | ... | node=${dut1} | cpu_cnt=${vnf_cpus} | cpu_node=${dut1_numa}
+| | ... | cpu_skip=${cpu_skip} | smt_used=${False} | filename=/tmp/vnf.conf
+| | ${config}= | Run keyword | Create Kubernetes VNF startup config
+| | ... | node=${dut2} | cpu_cnt=${vnf_cpus} | cpu_node=${dut2_numa}
+| | ... | cpu_skip=${cpu_skip} | smt_used=${False} | filename=/tmp/vnf.conf
+
+| L2 Bridge Domain Binary Search
+| | [Arguments] | ${framesize} | ${min_rate} | ${wt} | ${rxq} | ${search_type}
+| | Set Test Variable | ${framesize}
+| | Set Test Variable | ${min_rate}
+| | ${get_framesize}= | Set Variable If
+| | ... | "${framesize}" == "IMIX_v4_1" | ${avg_imix_framesize} | ${framesize}
+| | ${max_rate}= | Calculate pps | ${s_limit} | ${get_framesize}
+| | ${binary_min}= | Set Variable | ${min_rate}
+| | ${binary_max}= | Set Variable | ${max_rate}
+| | ${threshold}= | Set Variable | ${min_rate}
+| | ${dut1_if1_name}= | Get interface name | ${dut1} | ${dut1_if1}
+| | ${dut1_if2_name}= | Get interface name | ${dut1} | ${dut1_if2}
+| | ${dut2_if1_name}= | Get interface name | ${dut2} | ${dut2_if1}
+| | ${dut2_if2_name}= | Get interface name | ${dut2} | ${dut2_if2}
+| | Create Kubernetes VSWITCH startup config on all DUTs | ${get_framesize}
+| | ... | ${wt} | ${rxq}
+| | Create Kubernetes VNF startup config on all DUTs
+| | Create Kubernetes CM from file on all DUTs | ${nodes} | name=vswitch-vpp-cfg
+| | ... | key=vpp.conf | src_file=/tmp/vswitch.conf
+| | Create Kubernetes CM from file on all DUTs | ${nodes} | name=vnf-vpp-cfg
+| | ... | key=vpp.conf | src_file=/tmp/vnf.conf
+| | Apply Kubernetes resource on node | ${dut1}
+| | ... | ${kubernetes_profile}.yaml | $$TEST_NAME$$=${TEST NAME}
+| | ... | $$VSWITCH_IF1$$=${dut1_if1_name}
+| | ... | $$VSWITCH_IF2$$=${dut1_if2_name}
+| | Apply Kubernetes resource on node | ${dut2}
+| | ... | ${kubernetes_profile}.yaml | $$TEST_NAME$$=${TEST NAME}
+| | ... | $$VSWITCH_IF1$$=${dut2_if1_name}
+| | ... | $$VSWITCH_IF2$$=${dut2_if2_name}
+| | Wait for Kubernetes PODs on all DUTs | ${nodes}
+| | Run Keyword If | '${search_type}' == 'NDR'
+| | ... | Find NDR using binary search and pps
+| | ... | ${framesize} | ${binary_min} | ${binary_max} | ${traffic_profile}
+| | ... | ${min_rate} | ${max_rate} | ${threshold}
+| | ... | ELSE IF | '${search_type}' == 'PDR'
+| | ... | Find PDR using binary search and pps
+| | ... | ${framesize} | ${binary_min} | ${binary_max} | ${traffic_profile}
+| | ... | ${min_rate} | ${max_rate} | ${threshold}
+| | ... | ${perf_pdr_loss_acceptance} | ${perf_pdr_loss_acceptance_type}
+
+*** Test Cases ***
+| tc01-64B-1t1c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for 64 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 100kpps.
+| | ...
+| | [Tags] | 64B | 1T1C | STHREAD | NDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=${64} | min_rate=${100000} | wt=1 | rxq=1 | search_type=NDR
+
+| tc02-64B-1t1c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for 64 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 100kpps, LT=0.5%.
+| | ...
+| | [Tags] | 64B | 1T1C | STHREAD | PDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=${64} | min_rate=${100000} | wt=1 | rxq=1 | search_type=PDR
+
+| tc03-IMIX-1t1c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for IMIX_v4_1 frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps.
+| | ... | IMIX_v4_1 = (28x64B;16x570B;4x1518B)
+| | ...
+| | [Tags] | IMIX | 1T1C | STHREAD | NDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=IMIX_v4_1 | min_rate=${10000} | wt=1 | rxq=1 | search_type=NDR
+
+| tc04-IMIX-1t1c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for IMIX_v4_1 frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps, LT=0.5%.
+| | ... | IMIX_v4_1 = (28x64B;16x570B;4x1518B)
+| | ...
+| | [Tags] | IMIX | 1T1C | STHREAD | PDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=IMIX_v4_1 | min_rate=${10000} | wt=1 | rxq=1 | search_type=PDR
+
+| tc05-1518B-1t1c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for 1518 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps.
+| | ...
+| | [Tags] | 1518B | 1T1C | STHREAD | NDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=${1518} | min_rate=${10000} | wt=1 | rxq=1 | search_type=NDR
+
+| tc06-1518B-1t1c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for 1518 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps, LT=0.5%.
+| | ...
+| | [Tags] | 1518B | 1T1C | STHREAD | PDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=${1518} | min_rate=${10000} | wt=1 | rxq=1 | search_type=PDR
+
+| tc07-64B-2t2c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 2 thread, 2 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for 64 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 100kpps.
+| | ...
+| | [Tags] | 64B | 2T2C | MTHREAD | NDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=${64} | min_rate=${100000} | wt=2 | rxq=1 | search_type=NDR
+
+| tc08-64B-2t2c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 2 thread, 2 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for 64 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 100kpps, LT=0.5%.
+| | ...
+| | [Tags] | 64B | 2T2C | MTHREAD | PDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=${64} | min_rate=${100000} | wt=2 | rxq=1 | search_type=PDR
+
+| tc09-IMIX-2t2c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 2 thread, 2 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for IMIX_v4_1 frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps.
+| | ... | IMIX_v4_1 = (28x64B;16x570B;4x1518B)
+| | ...
+| | [Tags] | IMIX | 2T2C | MTHREAD | NDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=IMIX_v4_1 | min_rate=${10000} | wt=2 | rxq=1 | search_type=NDR
+
+| tc10-IMIX-2t2c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 2 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for IMIX_v4_1 frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps, LT=0.5%.
+| | ... | IMIX_v4_1 = (28x64B;16x570B;4x1518B)
+| | ...
+| | [Tags] | IMIX | 2T2C | MTHREAD | PDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=IMIX_v4_1 | min_rate=${10000} | wt=2 | rxq=1 | search_type=PDR
+
+| tc11-1518B-2t2c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 2 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for 1518 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps.
+| | ...
+| | [Tags] | 1518B | 2T2C | MTHREAD | NDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=${1518} | min_rate=${10000} | wt=2 | rxq=1 | search_type=NDR
+
+| tc12-1518B-2t2c-eth-l2bdbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2BD switching config with 2 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for 1518 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps, LT=0.5%.
+| | ...
+| | [Tags] | 1518B | 2T2C | MTHREAD | PDRDISC
+| | [Template] | L2 Bridge Domain Binary Search
+| | framesize=${1518} | min_rate=${10000} | wt=2 | rxq=1 | search_type=PDR
diff --git a/tests/ligato/perf/l2/10ge2p1x520-eth-l2xcbase-eth-2memif-1vnf-ndrpdrdisc.robot b/tests/ligato/perf/l2/10ge2p1x520-eth-l2xcbase-eth-2memif-1vnf-ndrpdrdisc.robot
new file mode 100644 (file)
index 0000000..6dba66f
--- /dev/null
@@ -0,0 +1,299 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Resource | resources/libraries/robot/performance/performance_setup.robot
+| ...
+| Force Tags | 3_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | NDRPDRDISC
+| ... | NIC_Intel-X520-DA2 | ETH | L2XCFWD | BASE | L2XCBASE | MEMIF
+| ... | KUBERNETES | 1VSWITCH | 1VNF | VPP_AGENT | SFC_CONTROLLER
+| ...
+| Suite Setup | Set up 3-node performance topology with DUT's NIC model
+| ... | L2 | Intel-X520-DA2
+| ...
+| Test Setup | Run Keywords
+| ... | Apply Kubernetes resource on all duts | ${nodes} | kafka.yaml
+| ... | AND | Apply Kubernetes resource on all duts | ${nodes} | etcd.yaml
+| ...
+| Suite Teardown | Tear down 3-node performance topology
+| ...
+| Test Teardown | Run Keywords
+| ... | Describe Kubernetes resource on all DUTs | ${nodes} | AND
+| ... | Delete Kubernetes resource on all duts | ${nodes}
+| ...
+| Documentation | *RFC2544: Pkt throughput L2XC test cases*
+| ...
+| ... | *[Top] Network Topologies:* TG-DUT1-DUT2-TG 3-node circular topology
+| ... | with single links between nodes.
+| ... | *[Enc] Packet Encapsulations:* Eth-IPv4 for L2 cross connect.
+| ... | *[Cfg] DUT configuration:* DUT1 and DUT2 are configured with L2 cross-
+| ... | connect. DUT1 and DUT2 tested with 2p10GE NIC X520 Niantic by Intel.
+| ... | VNF Container is connected to VSWITCH container via Memif interface. All
+| ... | containers is running same VPP version. Containers are deployed with
+| ... | Kubernetes. Configuration is applied by vnf-agent.
+| ... | *[Ver] TG verification:* TG finds and reports throughput NDR (Non Drop
+| ... | Rate) with zero packet loss tolerance or throughput PDR (Partial Drop
+| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage
+| ... | of packets transmitted. NDR and PDR are discovered for different
+| ... | Ethernet L2 frame sizes using either binary search or linear search
+| ... | algorithms with configured starting rate and final step that determines
+| ... | throughput measurement resolution. Test packets are generated by TG on
+| ... | links to DUTs. TG traffic profile contains two L3 flow-groups
+| ... | (flow-group per direction, 253 flows per flow-group) with all packets
+| ... | containing Ethernet header, IPv4 header with IP protocol=61 and static
+| ... | payload. MAC addresses are matching MAC addresses of the TG node
+| ... | interfaces.
+| ... | *[Ref] Applicable standard specifications:* RFC2544.
+
+*** Variables ***
+| ${avg_imix_framesize}= | ${357.833}
+# X520-DA2 bandwidth limit
+| ${s_limit} | ${10000000000}
+# Kubernetes profile
+| ${kubernetes_profile} | eth-l2xcbase-eth-2memif-1vnf
+# Traffic profile:
+| ${traffic_profile} | trex-sl-3n-ethip4-ip4src254
+# CPU settings
+| ${system_cpus}= | ${1}
+| ${vswitch_cpus}= | ${5}
+| ${vnf_cpus}= | ${2}
+
+*** Keywords ***
+| Create Kubernetes VSWITCH startup config on all DUTs
+| | [Documentation] | Create base startup configuration of VSWITCH in Kubernetes
+| | ... | deploy to all DUTs.
+| | ...
+| | ... | *Arguments:*
+| | ... | - ${framesize} - L2 framesize. Type: integer
+| | ... | - ${wt} - Worker threads. Type: integer
+| | ... | - ${rxq} - RX queues. Type: integer
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create Kubernetes VSWITCH startup config on all DUTs \| ${64} \
+| | ... | \| ${1} \| ${1}
+| | ...
+| | [Arguments] | ${framesize} | ${wt} | ${rxq}
+| | ${dut1_numa}= | Get interfaces numa node | ${dut1}
+| | ... | ${dut1_if1} | ${dut1_if2}
+| | ${dut2_numa}= | Get interfaces numa node | ${dut2}
+| | ... | ${dut2_if1} | ${dut2_if2}
+| | ${dut1_if1_pci}= | Get Interface PCI Addr | ${dut1} | ${dut1_if1}
+| | ${dut1_if2_pci}= | Get Interface PCI Addr | ${dut1} | ${dut1_if2}
+| | ${dut2_if1_pci}= | Get Interface PCI Addr | ${dut2} | ${dut2_if1}
+| | ${dut2_if2_pci}= | Get Interface PCI Addr | ${dut2} | ${dut2_if2}
+| | ${cpu_cnt}= | Evaluate | ${wt}+1
+| | ${config}= | Run keyword | Create Kubernetes VSWITCH startup config
+| | ... | node=${dut1} | cpu_cnt=${cpu_cnt} | cpu_node=${dut1_numa}
+| | ... | cpu_skip=${system_cpus} | smt_used=${False}
+| | ... | filename=/tmp/vswitch.conf | framesize=${framesize} | rxq=${rxq}
+| | ... | if1=${dut1_if1_pci} | if2=${dut1_if2_pci}
+| | ${config}= | Run keyword | Create Kubernetes VSWITCH startup config
+| | ... | node=${dut2} | cpu_cnt=${cpu_cnt} | cpu_node=${dut2_numa}
+| | ... | cpu_skip=${system_cpus} | smt_used=${False}
+| | ... | filename=/tmp/vswitch.conf | framesize=${framesize} | rxq=${rxq}
+| | ... | if1=${dut2_if1_pci} | if2=${dut2_if2_pci}
+
+| Create Kubernetes VNF startup config on all DUTs
+| | [Documentation] | Create base startup configuration of VNF in Kubernetes
+| | ... | deploy to all DUTs.
+| | ...
+| | ${cpu_skip}= | Evaluate | ${vswitch_cpus}+${system_cpus}
+| | ${dut1_numa}= | Get interfaces numa node | ${dut1}
+| | ... | ${dut1_if1} | ${dut1_if2}
+| | ${dut2_numa}= | Get interfaces numa node | ${dut2}
+| | ... | ${dut2_if1} | ${dut2_if2}
+| | ${config}= | Run keyword | Create Kubernetes VNF startup config
+| | ... | node=${dut1} | cpu_cnt=${vnf_cpus} | cpu_node=${dut1_numa}
+| | ... | cpu_skip=${cpu_skip} | smt_used=${False} | filename=/tmp/vnf.conf
+| | ${config}= | Run keyword | Create Kubernetes VNF startup config
+| | ... | node=${dut2} | cpu_cnt=${vnf_cpus} | cpu_node=${dut2_numa}
+| | ... | cpu_skip=${cpu_skip} | smt_used=${False} | filename=/tmp/vnf.conf
+
+| L2 Cross Connect Binary Search
+| | [Arguments] | ${framesize} | ${min_rate} | ${wt} | ${rxq} | ${search_type}
+| | Set Test Variable | ${framesize}
+| | Set Test Variable | ${min_rate}
+| | ${get_framesize}= | Set Variable If
+| | ... | "${framesize}" == "IMIX_v4_1" | ${avg_imix_framesize} | ${framesize}
+| | ${max_rate}= | Calculate pps | ${s_limit} | ${get_framesize}
+| | ${binary_min}= | Set Variable | ${min_rate}
+| | ${binary_max}= | Set Variable | ${max_rate}
+| | ${threshold}= | Set Variable | ${min_rate}
+| | ${dut1_if1_name}= | Get interface name | ${dut1} | ${dut1_if1}
+| | ${dut1_if2_name}= | Get interface name | ${dut1} | ${dut1_if2}
+| | ${dut2_if1_name}= | Get interface name | ${dut2} | ${dut2_if1}
+| | ${dut2_if2_name}= | Get interface name | ${dut2} | ${dut2_if2}
+| | Create Kubernetes VSWITCH startup config on all DUTs | ${get_framesize}
+| | ... | ${wt} | ${rxq}
+| | Create Kubernetes VNF startup config on all DUTs
+| | Create Kubernetes CM from file on all DUTs | ${nodes} | name=vswitch-vpp-cfg
+| | ... | key=vpp.conf | src_file=/tmp/vswitch.conf
+| | Create Kubernetes CM from file on all DUTs | ${nodes} | name=vnf-vpp-cfg
+| | ... | key=vpp.conf | src_file=/tmp/vnf.conf
+| | Apply Kubernetes resource on node | ${dut1}
+| | ... | ${kubernetes_profile}.yaml | $$TEST_NAME$$=${TEST NAME}
+| | ... | $$VSWITCH_IF1$$=${dut1_if1_name}
+| | ... | $$VSWITCH_IF2$$=${dut1_if2_name}
+| | Apply Kubernetes resource on node | ${dut2}
+| | ... | ${kubernetes_profile}.yaml | $$TEST_NAME$$=${TEST NAME}
+| | ... | $$VSWITCH_IF1$$=${dut2_if1_name}
+| | ... | $$VSWITCH_IF2$$=${dut2_if2_name}
+| | Wait for Kubernetes PODs on all DUTs | ${nodes}
+| | Describe Kubernetes resource on all DUTs | ${nodes}
+| | Run Keyword If | '${search_type}' == 'NDR'
+| | ... | Find NDR using binary search and pps
+| | ... | ${framesize} | ${binary_min} | ${binary_max} | ${traffic_profile}
+| | ... | ${min_rate} | ${max_rate} | ${threshold}
+| | ... | ELSE IF | '${search_type}' == 'PDR'
+| | ... | Find PDR using binary search and pps
+| | ... | ${framesize} | ${binary_min} | ${binary_max} | ${traffic_profile}
+| | ... | ${min_rate} | ${max_rate} | ${threshold}
+| | ... | ${perf_pdr_loss_acceptance} | ${perf_pdr_loss_acceptance_type}
+
+*** Test Cases ***
+| tc01-64B-1t1c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for 64 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 100kpps.
+| | ...
+| | [Tags] | 64B | 1T1C | STHREAD | NDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=${64} | min_rate=${100000} | wt=1 | rxq=1 | search_type=NDR
+
+| tc02-64B-1t1c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for 64 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 100kpps, LT=0.5%.
+| | ...
+| | [Tags] | 64B | 1T1C | STHREAD | PDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=${64} | min_rate=${100000} | wt=1 | rxq=1 | search_type=PDR
+
+| tc03-IMIX-1t1c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for IMIX_v4_1 frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps.
+| | ... | IMIX_v4_1 = (28x64B;16x570B;4x1518B)
+| | ...
+| | [Tags] | IMIX | 1T1C | STHREAD | NDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=IMIX_v4_1 | min_rate=${10000} | wt=1 | rxq=1 | search_type=NDR
+
+| tc04-IMIX-1t1c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for IMIX_v4_1 frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps, LT=0.5%.
+| | ... | IMIX_v4_1 = (28x64B;16x570B;4x1518B)
+| | ...
+| | [Tags] | IMIX | 1T1C | STHREAD | PDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=IMIX_v4_1 | min_rate=${10000} | wt=1 | rxq=1 | search_type=PDR
+
+| tc05-1518B-1t1c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for 1518 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps.
+| | ...
+| | [Tags] | 1518B | 1T1C | STHREAD | NDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=${1518} | min_rate=${10000} | wt=1 | rxq=1 | search_type=NDR
+
+| tc06-1518B-1t1c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 1 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for 1518 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps, LT=0.5%.
+| | ...
+| | [Tags] | 1518B | 1T1C | STHREAD | PDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=${1518} | min_rate=${10000} | wt=1 | rxq=1 | search_type=PDR
+
+| tc07-64B-2t2c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 2 thread, 2 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for 64 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 100kpps.
+| | ...
+| | [Tags] | 64B | 2T2C | MTHREAD | NDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=${64} | min_rate=${100000} | wt=2 | rxq=1 | search_type=NDR
+
+| tc08-64B-2t2c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 2 thread, 2 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for 64 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 100kpps, LT=0.5%.
+| | ...
+| | [Tags] | 64B | 2T2C | MTHREAD | PDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=${64} | min_rate=${100000} | wt=2 | rxq=1 | search_type=PDR
+
+| tc09-IMIX-2t2c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 2 thread, 2 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for IMIX_v4_1 frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps.
+| | ... | IMIX_v4_1 = (28x64B;16x570B;4x1518B)
+| | ...
+| | [Tags] | IMIX | 2T2C | MTHREAD | NDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=IMIX_v4_1 | min_rate=${10000} | wt=2 | rxq=1 | search_type=NDR
+
+| tc10-IMIX-2t2c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 2 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for IMIX_v4_1 frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps, LT=0.5%.
+| | ... | IMIX_v4_1 = (28x64B;16x570B;4x1518B)
+| | ...
+| | [Tags] | IMIX | 2T2C | MTHREAD | PDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=IMIX_v4_1 | min_rate=${10000} | wt=2 | rxq=1 | search_type=PDR
+
+| tc11-1518B-2t2c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-ndrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 2 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find NDR for 1518 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps.
+| | ...
+| | [Tags] | 1518B | 2T2C | MTHREAD | NDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=${1518} | min_rate=${10000} | wt=2 | rxq=1 | search_type=NDR
+
+| tc12-1518B-2t2c-eth-l2xcbase-eth-2memif-1vnf-kubernetes-pdrdisc
+| | [Documentation]
+| | ... | [Cfg] DUT runs L2XC switching config with 2 thread, 1 phy core,\
+| | ... | 1 receive queue per NIC port.
+| | ... | [Ver] Find PDR for 1518 Byte frames using binary search start at 10GE\
+| | ... | linerate, step 10kpps, LT=0.5%.
+| | ...
+| | [Tags] | 1518B | 2T2C | MTHREAD | PDRDISC
+| | [Template] | L2 Cross Connect Binary Search
+| | framesize=${1518} | min_rate=${10000} | wt=2 | rxq=1 | search_type=PDR
index 61dd83a..18cb722 100644 (file)
@@ -19,6 +19,7 @@
 | Suite Setup | Run Keywords | Setup performance global Variables
 | ...         | AND          | Setup Framework | ${nodes}
 | ...         | AND          | Setup All DUTs | ${nodes}
+| ...         | AND          | Show Vpp Version On All Duts | ${nodes}
 | ...         | AND          | Get CPU Layout from all nodes | ${nodes}
 | ...         | AND          | Update All Interface Data On All Nodes
 | ...                        | ${nodes} | skip_tg=${True} | numa_node=${True}

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.