rls1807 report: added section logical topologies in ../vpp_performance_tests/overview...
[csit.git] / bootstrap-verify-perf-ligato.sh
index 10470dc..1ab881b 100644 (file)
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 set -xo pipefail
 
 # Space separated list of available testbeds, described by topology files
-TOPOLOGIES="topologies/available/lf_testbed1.yaml \
-            topologies/available/lf_testbed2.yaml \
-            topologies/available/lf_testbed3.yaml"
+TOPOLOGIES="topologies/available/lf_3n_hsw_testbed1.yaml \
+            topologies/available/lf_3n_hsw_testbed2.yaml \
+            topologies/available/lf_3n_hsw_testbed3.yaml"
 
 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+export PYTHONPATH=${SCRIPT_DIR}
+export DEBIAN_FRONTEND=noninteractive
 
 # Reservation dir
 RESERVATION_DIR="/tmp/reservation_dir"
 INSTALLATION_DIR="/tmp/install_dir"
 
-PYBOT_ARGS="-W 150 -L TRACE"
-
-ARCHIVE_ARTIFACTS=(log.html output.xml report.html output_perf_data.xml)
+JOB_ARCHIVE_ARTIFACTS=(log.html output.xml report.html)
+LOG_ARCHIVE_ARTIFACTS=(log.html output.xml report.html)
+JOB_ARCHIVE_DIR="archive"
+LOG_ARCHIVE_DIR="$WORKSPACE/archives"
+mkdir -p ${JOB_ARCHIVE_DIR}
+mkdir -p ${LOG_ARCHIVE_DIR}
 
 # If we run this script from CSIT jobs we want to use stable vpp version
 if [[ ${JOB_NAME} == csit-* ]] ;
 then
-    mkdir -p vpp/build-root
-    cd vpp/build-root
+    mkdir -p vpp_download
+    cd vpp_download
 
-    if [[ ${TEST_TAG} == *NIGHTLY ]] || \
-       [[ ${TEST_TAG} == *DAILY ]] || \
+    if [[ ${TEST_TAG} == *DAILY ]] || \
        [[ ${TEST_TAG} == *WEEKLY ]];
     then
-        # Download the latest VPP build .deb install packages
-        echo Downloading VPP packages...
-        bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh --skip-install
-
-        VPP_DEBS="$( readlink -f *.deb | tr '\n' ' ' )"
-        # Take vpp package and get the vpp version
-        VPP_STABLE_VER="$( expr match $(ls *.deb | head -n 1) 'vpp-\(.*\)-deb.deb' )"
+        echo Downloading latest VPP packages from NEXUS...
+        bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
+            --skip-install
     else
-        DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)_amd64
-        VPP_REPO_URL=$(cat ${SCRIPT_DIR}/VPP_REPO_URL_UBUNTU)
+        echo Downloading VPP packages of specific version from NEXUS...
+        DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)
         VPP_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_STABLE_VER_UBUNTU)
-        VPP_CLASSIFIER="-deb"
-        # Download vpp build from nexus and set VPP_DEBS variable
-        wget -q "${VPP_REPO_URL}/vpp/${VPP_STABLE_VER}/vpp-${VPP_STABLE_VER}${VPP_CLASSIFIER}.deb" || exit
-        wget -q "${VPP_REPO_URL}/vpp-dbg/${VPP_STABLE_VER}/vpp-dbg-${VPP_STABLE_VER}${VPP_CLASSIFIER}.deb" || exit
-        wget -q "${VPP_REPO_URL}/vpp-dev/${VPP_STABLE_VER}/vpp-dev-${VPP_STABLE_VER}${VPP_CLASSIFIER}.deb" || exit
-        # Temporary disable using dpdk
-        # wget -q "${VPP_REPO_URL}/vpp-dpdk-dkms/${DPDK_STABLE_VER}/vpp-dpdk-dkms-${DPDK_STABLE_VER}${VPP_CLASSIFIER}.deb" || exit
-        wget -q "${VPP_REPO_URL}/vpp-lib/${VPP_STABLE_VER}/vpp-lib-${VPP_STABLE_VER}${VPP_CLASSIFIER}.deb" || exit
-        wget -q "${VPP_REPO_URL}/vpp-plugins/${VPP_STABLE_VER}/vpp-plugins-${VPP_STABLE_VER}${VPP_CLASSIFIER}.deb" || exit
-        VPP_DEBS="$( readlink -f *.deb | tr '\n' ' ' )"
+        #Temporary if arch will not be removed from VPP_STABLE_VER_UBUNTU
+        #VPP_STABLE_VER=${VPP_STABLE_VER%_amd64}
+        bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
+            --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER}
     fi
-
-    # Temporary workaround as ligato docker file requires specific file name
-    rename -v 's/^(.*)-(\d.*)-deb.deb/$1_$2.deb/' *.deb
+    # Jenkins VPP deb paths (convert to full path)
+    VPP_DEBS="$( readlink -f vpp*.deb | tr '\n' ' ' )"
     cd ${SCRIPT_DIR}
 
 # If we run this script from vpp project we want to use local build
 elif [[ ${JOB_NAME} == vpp-* ]] ;
 then
-    mkdir -p vpp/build-root
     # Use local packages provided as argument list
     # Jenkins VPP deb paths (convert to full path)
     VPP_DEBS="$( readlink -f $@ | tr '\n' ' ' )"
-    # Take vpp package and get the vpp version
-    VPP_STABLE_VER="$( expr match $1 'vpp-\(.*\)-deb.deb' )"
-    # Move files to build-root for packing
-    for deb in ${VPP_DEBS}; do mv ${deb} vpp/build-root/; done
 else
     echo "Unable to identify job type based on JOB_NAME variable: ${JOB_NAME}"
     exit 1
 fi
 
-# Compress all VPP debs and remove temporary directory
-tar -zcvf ${SCRIPT_DIR}/vpp.tar.gz vpp/*  && rm -R vpp
+# Extract VPP API to specific folder
+dpkg -x vpp_download/vpp_*.deb /tmp/vpp
 
-LIGATO_REPO_URL=$(cat ${SCRIPT_DIR}/LIGATO_REPO_URL)
+LIGATO_REPO_URL='https://github.com/ligato/'
 VPP_AGENT_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_AGENT_STABLE_VER)
-VPP_AGENT_STABLE_COMMIT="$( expr match `cat VPP_AGENT_STABLE_VER` '.*g\(.*\)' )"
-DOCKER_DEB="docker-ce_17.06.2~ce-0~ubuntu_amd64.deb"
+DOCKER_DEB="docker-ce_18.03.0~ce-0~ubuntu_amd64.deb"
 
 # Clone & checkout stable vnf-agent
-cd .. && git clone ${LIGATO_REPO_URL}/vpp-agent
+cd .. && git clone -b ${VPP_AGENT_STABLE_VER} --single-branch \
+    ${LIGATO_REPO_URL}/vpp-agent vpp-agent
 # If the git clone fails, complain clearly and exit
 if [ $? != 0 ]; then
-    echo "Failed to run: git clone --depth 1 ${LIGATO_REPO_URL}/vpp-agent"
-    exit 1
-fi
-cd vpp-agent && git checkout ${VPP_AGENT_STABLE_COMMIT}
-# If the git checkout fails, complain clearly and exit
-if [ $? != 0 ]; then
-    echo "Failed to run: git checkout ${VPP_AGENT_STABLE_VER}"
+    echo "Failed to run: git clone ${LIGATO_REPO_URL}/vpp-agent"
     exit 1
 fi
+cd vpp-agent
 
 # Install Docker
 wget -q https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/${DOCKER_DEB}
@@ -117,37 +100,57 @@ fi
 sudo docker pull ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER}
 sudo docker tag ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER}\
     dev_vpp_agent:latest
-sudo docker images
+
+# Start dev_vpp_agent container as daemon
+sudo docker run --rm -itd --name agentcnt dev_vpp_agent bash
+
+# Copy latest vpp api into running container
+sudo docker cp /tmp/vpp/usr/share/vpp/api agentcnt:/usr/share/vpp
+for f in ${SCRIPT_DIR}/vpp_download/*; do
+    sudo docker cp $f agentcnt:/opt/vpp-agent/dev/vpp/build-root/
+done
+
+# Recompile vpp-agent
+sudo docker exec -i agentcnt \
+    script -qec '. ~/.bashrc; cd /go/src/github.com/ligato/vpp-agent && make generate && make install'
+if [ $? != 0 ]; then
+    echo "Failed to build vpp-agent in Docker image."
+    exit 1
+fi
+# Save container state
+sudo docker commit `sudo docker ps -q` dev_vpp_agent:latest
 
 # Build prod_vpp_agent docker image
-cd ${SCRIPT_DIR}/../vpp-agent/docker/prod_vpp_agent/ &&\
-    mv ${SCRIPT_DIR}/vpp.tar.gz . &&\
-    ./extract_agent_files.sh &&\
-    sudo docker build -t prod_vpp_agent --no-cache . &&\
-    ./shrink.sh
+cd docker/prod/ &&\
+    sudo docker build --tag prod_vpp_agent --no-cache .
 # Export Docker image
-sudo docker save prod_vpp_agent_shrink | gzip > prod_vpp_agent_shrink.tar.gz
+sudo docker save prod_vpp_agent | gzip > prod_vpp_agent.tar.gz
+# Kill running agentcnt container
+sudo docker rm -f agentcnt
 # If image build fails, complain clearly and exit
 if [ $? != 0 ]; then
     echo "Failed to build vpp-agent Docker image."
     exit 1
 fi
-DOCKER_IMAGE="$( readlink -f prod_vpp_agent_shrink.tar.gz | tr '\n' ' ' )"
+DOCKER_IMAGE="$( readlink -f prod_vpp_agent.tar.gz | tr '\n' ' ' )"
 
 cd ${SCRIPT_DIR}
 
+WORKING_TOPOLOGY=""
+
 sudo apt-get -y update
 sudo apt-get -y install libpython2.7-dev python-virtualenv
 
-WORKING_TOPOLOGY=""
-export PYTHONPATH=${SCRIPT_DIR}
-
 virtualenv --system-site-packages env
 . env/bin/activate
 
 echo pip install
 pip install -r requirements.txt
 
+if [ -z "${TOPOLOGIES}" ]; then
+    echo "No applicable topology found!"
+    exit 1
+fi
 # We iterate over available topologies and wait until we reserve topology
 while :; do
     for TOPOLOGY in ${TOPOLOGIES};
@@ -180,7 +183,7 @@ function cancel_all {
 # packages
 trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
 
-python ${SCRIPT_DIR}/resources/tools/scripts/topo_container_copy.py\
+python ${SCRIPT_DIR}/resources/tools/scripts/topo_container_copy.py \
     -t ${WORKING_TOPOLOGY} -d ${INSTALLATION_DIR} -i ${DOCKER_IMAGE}
 if [ $? -eq 0 ]; then
     echo "Docker image copied and loaded on hosts from: ${WORKING_TOPOLOGY}"
@@ -189,235 +192,111 @@ else
     exit 1
 fi
 
+# Based on job we will identify DUT
+if [[ ${JOB_NAME} == *hc2vpp* ]] ;
+then
+    DUT="hc2vpp"
+elif [[ ${JOB_NAME} == *vpp* ]] ;
+then
+    DUT="vpp"
+elif [[ ${JOB_NAME} == *ligato* ]] ;
+then
+    DUT="kubernetes"
+elif [[ ${JOB_NAME} == *dpdk* ]] ;
+then
+    DUT="dpdk"
+else
+    echo "Unable to identify dut type based on JOB_NAME variable: ${JOB_NAME}"
+    exit 1
+fi
+
+PYBOT_ARGS="--consolewidth 100 --loglevel TRACE --variable TOPOLOGY_PATH:${WORKING_TOPOLOGY} --suite tests.${DUT}.perf"
+
 case "$TEST_TAG" in
-    # run specific performance tests based on jenkins job type variable
+    # select specific performance tests based on jenkins job type variable
     PERFTEST_DAILY )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrdiscANDnic_intel-x520-da2AND1t1cORndrdiscANDnic_intel-x520-da2AND2t2c \
-              tests/
-        RETURN_STATUS=$(echo $?)
+        TAGS=('ndrdiscANDnic_intel-x520-da2AND1c'
+              'ndrdiscANDnic_intel-x520-da2AND2c'
+              'ndrdiscAND1cANDipsec'
+              'ndrdiscAND2cANDipsec')
         ;;
     PERFTEST_SEMI_WEEKLY )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrdiscANDnic_intel-x710AND1t1cORndrdiscANDnic_intel-x710AND2t2cORndrdiscANDnic_intel-xl710AND1t1cORndrdiscANDnic_intel-xl710AND2t2c \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VERIFY-PERF-NDRDISC )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrdiscAND1t1cORndrdiscAND2t2c \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VERIFY-PERF-PDRDISC )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include pdrdiscAND1t1cORpdrdiscAND2t2c \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VERIFY-PERF-NDRCHK )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrchkAND1t1cORndrchkAND2t2c \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    PERFTEST_NDRCHK_DAILY )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrchkAND1t1cORndrchkAND2t2c \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VERIFY-PERF-IP4 )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDip4baseORndrdiscANDnic_intel-x520-da2AND1t1cANDip4fwdANDfib_2m \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VERIFY-PERF-IP6 )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDip6baseORndrdiscANDnic_intel-x520-da2AND1t1cANDip6fwdANDfib_2m \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VERIFY-PERF-L2 )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDl2xcbaseORndrdiscANDnic_intel-x520-da2AND1t1cANDl2bdbase \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VERIFY-PERF-LISP )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDlisp \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VERIFY-PERF-VXLAN )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDvxlan \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VERIFY-PERF-VHOST )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include ndrdiscANDnic_intel-x520-da2AND1t1cANDvhost \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VPP-VERIFY-PERF-IP4 )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include pdrchkANDnic_intel-x520-da2AND1t1cANDip4baseORpdrchkANDnic_intel-x520-da2AND1t1cANDip4fwdANDfib_2m \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VPP-VERIFY-PERF-IP6 )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include pdrchkANDnic_intel-x520-da2AND1t1cANDip6baseORpdrchkANDnic_intel-x520-da2AND1t1cANDip6fwdANDfib_2m \
-              tests/
-        RETURN_STATUS=$(echo $?)
+        TAGS=('ndrdiscANDnic_intel-x710AND1c'
+              'ndrdiscANDnic_intel-x710AND2c'
+              'ndrdiscANDnic_intel-xl710AND1c'
+              'ndrdiscANDnic_intel-xl710AND2c')
         ;;
-    VPP-VERIFY-PERF-L2 )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include pdrchkANDnic_intel-x520-da2AND1t1cANDl2xcbaseORpdrchkANDnic_intel-x520-da2AND1t1cANDl2bdbase \
-              tests/
-        RETURN_STATUS=$(echo $?)
+    PERFTEST_MRR_DAILY )
+       TAGS=('mrrAND64bAND1c'
+             'mrrAND64bAND2c'
+             'mrrAND64bAND4c'
+             'mrrAND78bAND1c'
+             'mrrAND78bAND2c'
+             'mrrAND78bAND4c'
+             'mrrANDimixAND1cANDvhost'
+             'mrrANDimixAND2cANDvhost'
+             'mrrANDimixAND4cANDvhost'
+             'mrrANDimixAND1cANDmemif'
+             'mrrANDimixAND2cANDmemif'
+             'mrrANDimixAND4cANDmemif')
         ;;
-    VPP-VERIFY-PERF-LISP )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include pdrchkANDnic_intel-x520-da2AND1t1cANDlisp \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VPP-VERIFY-PERF-VXLAN )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include pdrchkANDnic_intel-x520-da2AND1t1cANDvxlan \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VPP-VERIFY-PERF-VHOST )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include pdrdiscANDnic_intel-x520-da2AND1t1cANDvhost \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    VPP-VERIFY-PERF-ACL )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --include pdrdiscANDnic_intel-x520-da2AND1t1cANDacl \
-              --include pdrdiscANDnic_intel-x520-da2AND2t2cANDacl \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    PERFTEST_LONG )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              --exclude SKIP_PATCH \
-              -i NDRPDRDISC \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    PERFTEST_SHORT )
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              -i NDRCHK \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    PERFTEST_NIGHTLY )
-        #run all available tests
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              tests/
-        RETURN_STATUS=$(echo $?)
+    VERIFY-PERF-PATCH )
+        if [[ -z "$TEST_TAG_STRING" ]]; then
+            # If nothing is specified, we will run pre-selected tests by
+            # following tags. Items of array will be concatenated by OR in Robot
+            # Framework.
+            TEST_TAG_ARRAY=('mrrANDnic_intel-x710AND1cAND64bANDip4base'
+                            'mrrANDnic_intel-x710AND1cAND78bANDip6base'
+                            'mrrANDnic_intel-x710AND1cAND64bANDl2bdbase')
+        else
+            # If trigger contains tags, split them into array.
+            TEST_TAG_ARRAY=(${TEST_TAG_STRING//:/ })
+        fi
+
+        TAGS=()
+
+        for TAG in "${TEST_TAG_ARRAY[@]}"; do
+            if [[ ${TAG} == "!"* ]]; then
+                # Exclude tags are not prefixed.
+                TAGS+=("${TAG}")
+            else
+                # We will prefix with perftest to prevent running other tests
+                # (e.g. Functional).
+                prefix="perftestAND"
+                if [[ ${JOB_NAME} == vpp-* ]] ; then
+                    # Automatic prefixing for VPP jobs to limit the NIC used and
+                    # traffic evaluation to MRR.
+                    prefix="${prefix}mrrANDnic_intel-x710AND"
+                fi
+                TAGS+=("$prefix${TAG}")
+            fi
+        done
         ;;
     * )
-        # run full performance test suite and exit on fail
-        pybot ${PYBOT_ARGS} \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -v DPDK_TEST:True \
-              -s "tests.kubernetes.perf" \
-              tests/
-        RETURN_STATUS=$(echo $?)
+        TAGS=('perftest')
 esac
 
-# Pybot output post-processing
-echo Post-processing test data...
+# Catenate TAG selections
+EXPANDED_TAGS=()
+for TAG in "${TAGS[@]}"; do
+    if [[ ${TAG} == "!"* ]]; then
+        EXPANDED_TAGS+=(" --exclude ${TAG#$"!"} ")
+    else
+        EXPANDED_TAGS+=(" --include ${TAG} ")
+    fi
+done
 
-python ${SCRIPT_DIR}/resources/tools/scripts/robot_output_parser.py \
-       -i ${SCRIPT_DIR}/output.xml \
-       -o ${SCRIPT_DIR}/output_perf_data.xml \
-       -v ${VPP_STABLE_VER}
-if [ ! $? -eq 0 ]; then
-    echo "Parsing ${SCRIPT_DIR}/output.xml failed"
-fi
+# Execute the test
+pybot ${PYBOT_ARGS}${EXPANDED_TAGS[@]} tests/
+RETURN_STATUS=$(echo $?)
 
-# Archive artifacts
-mkdir -p archive
-for i in ${ARCHIVE_ARTIFACTS[@]}; do
-    cp $( readlink -f ${i} | tr '\n' ' ' ) archive/
+# Archive JOB artifacts in jenkins
+for i in ${JOB_ARCHIVE_ARTIFACTS[@]}; do
+    cp $( readlink -f ${i} | tr '\n' ' ' ) ${JOB_ARCHIVE_DIR}/
+done
+# Archive JOB artifacts to logs.fd.io
+for i in ${LOG_ARCHIVE_ARTIFACTS[@]}; do
+    cp $( readlink -f ${i} | tr '\n' ' ' ) ${LOG_ARCHIVE_DIR}/
 done
-
-echo Post-processing finished.
 
 exit ${RETURN_STATUS}