report 18.07: reformatted known issues tables in rls notes sections.
[csit.git] / bootstrap-verify-perf.sh
index 925e3a6..731c5da 100755 (executable)
@@ -1,5 +1,5 @@
-#!/bin/bash
-# Copyright (c) 2016 Cisco and/or its affiliates.
+#!/usr/bin/env bash
+# Copyright (c) 2018 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -x
+set -xo pipefail
 
+# FUNCTIONS
+function warn () {
+    # Prints the message to standard error.
+    echo "$@" >&2
+}
+
+function die () {
+    # Prints the message to standard error end exit with error code specified
+    # by first argument.
+    status="$1"
+    shift
+    warn "$@"
+    exit "$status"
+}
+
+function help () {
+    # Displays help message.
+    die 1 "Usage: `basename $0` csit-[dpdk|vpp|ligato]-[2n-skx|3n-skx|3n-hsw]"
+}
+
+function cancel_all () {
+    # Trap function to get into consistent state.
+    python ${SCRIPT_DIR}/resources/tools/scripts/topo_cleanup.py -t $1 || {
+        die 1 "Failure during execution of topology cleanup script!"
+    }
+    python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -c -t $1 || {
+        die 1 "Failure during execution of topology un-reservation script!"
+    }
+}
+
+# VARIABLES
 # Space separated list of available testbeds, described by topology files
-TOPOLOGIES="topologies/available/lf_testbed1-X710-X520.yaml \
-            topologies/available/lf_testbed2-X710-X520.yaml \
-            topologies/available/lf_testbed3-X710-X520.yaml"
+TOPOLOGIES_3N_HSW=(topologies/available/lf_3n_hsw_testbed1.yaml
+                   topologies/available/lf_3n_hsw_testbed2.yaml
+                   topologies/available/lf_3n_hsw_testbed3.yaml)
+TOPOLOGIES_2N_SKX=(topologies/available/lf_2n_skx_testbed21.yaml
+                   topologies/available/lf_2n_skx_testbed24.yaml)
+TOPOLOGIES_3N_SKX=(topologies/available/lf_3n_skx_testbed31.yaml
+                   topologies/available/lf_3n_skx_testbed32.yaml)
 
-VPP_STABLE_VER="1.0.0-437~g8f15e92_amd64"
-VPP_REPO_URL="https://nexus.fd.io/service/local/repositories/fd.io.dev/content/io/fd/vpp"
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+export PYTHONPATH=${SCRIPT_DIR}
 
-# Reservation dir
 RESERVATION_DIR="/tmp/reservation_dir"
-INSTALLATION_DIR="/tmp/install_dir"
-
-PYBOT_ARGS="--noncritical MULTI_THREAD"
-
-ARCHIVE_ARTIFACTS=(log.html output.xml report.html output_perf_data.json)
-
-# If we run this script from CSIT jobs we want to use stable vpp version
-if [[ ${JOB_NAME} == csit-* ]] ;
-then
-    mkdir vpp_download
-    cd vpp_download
-    #download vpp build from nexus and set VPP_DEBS variable
-    wget -q "${VPP_REPO_URL}/vpp/${VPP_STABLE_VER}/vpp-${VPP_STABLE_VER}.deb" || exit
-    wget -q "${VPP_REPO_URL}/vpp-dbg/${VPP_STABLE_VER}/vpp-dbg-${VPP_STABLE_VER}.deb" || exit
-    wget -q "${VPP_REPO_URL}/vpp-dev/${VPP_STABLE_VER}/vpp-dev-${VPP_STABLE_VER}.deb" || exit
-    wget -q "${VPP_REPO_URL}/vpp-dpdk-dev/${VPP_STABLE_VER}/vpp-dpdk-dev-${VPP_STABLE_VER}.deb" || exit
-    wget -q "${VPP_REPO_URL}/vpp-dpdk-dkms/${VPP_STABLE_VER}/vpp-dpdk-dkms-${VPP_STABLE_VER}.deb" || exit
-    wget -q "${VPP_REPO_URL}/vpp-lib/${VPP_STABLE_VER}/vpp-lib-${VPP_STABLE_VER}.deb" || exit
-    VPP_DEBS="$( readlink -f *.deb | tr '\n' ' ' )"
-    PYBOT_ARGS="${PYBOT_ARGS} --exitonfailure"
-    cd ..
-
-# If we run this script from vpp project we want to use local build
-elif [[ ${JOB_NAME} == vpp-* ]] ;
-then
-    #use local packages provided as argument list
-    # Jenkins VPP deb paths (convert to full path)
-    VPP_DEBS="$( readlink -f $@ | tr '\n' ' ' )"
-else
-    echo "Unable to identify job type based on JOB_NAME variable: ${JOB_NAME}"
-    exit 1
+DOWNLOAD_DIR="${SCRIPT_DIR}/download_dir"
+ARCHIVE_DIR="${SCRIPT_DIR}/archive"
+
+mkdir -p ${DOWNLOAD_DIR} || {
+    die 1 "Failed to create download dir!"
+}
+mkdir -p ${ARCHIVE_DIR} || {
+    die 1 "Failed to create archive dir!"
+}
+
+# Get test code.
+TEST_CODE=${JOB_NAME-}
+if [[ -z ${TEST_CODE} ]]; then
+    TEST_CODE=${1}
+    shift
+fi
+
+# TOPOLOGY SELECTION
+case "$TEST_CODE" in
+    *2n-skx*)
+        TOPOLOGIES=$TOPOLOGIES_2N_SKX
+        TOPOLOGIES_TAGS="2_node_*_link_topo"
+        ;;
+    *3n-skx*)
+        TOPOLOGIES=$TOPOLOGIES_3N_SKX
+        TOPOLOGIES_TAGS="3_node_*_link_topo"
+        ;;
+    *)
+        # Fallback to 3-node Haswell by default (backward compatibility)
+        TOPOLOGIES=$TOPOLOGIES_3N_HSW
+        TOPOLOGIES_TAGS="3_node_*_link_topo"
+        ;;
+esac
+
+if [[ -z "${TOPOLOGIES}" ]]; then
+    die 1 "No applicable topology found!"
 fi
 
-CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-WORKING_TOPOLOGY=""
-export PYTHONPATH=${CUR_DIR}
+cd ${DOWNLOAD_DIR}
+case "$TEST_CODE" in
+    *hc2vpp*)
+        DUT="hc2vpp"
+        ;;
+    *vpp*)
+        DUT="vpp"
+
+        case "$TEST_CODE" in
+            csit-vpp-*)
+                # Use downloaded packages with specific version
+                if [[ "$TEST_CODE" == *daily* ]] || \
+                   [[ "$TEST_CODE" == *weekly* ]] || \
+                   [[ "$TEST_CODE" == *timed* ]];
+                then
+                    echo Downloading latest VPP packages from NEXUS...
+                    bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
+                        --skip-install || {
+                        die 1 "Failed to get VPP packages!"
+                    }
+                else
+                    echo Downloading VPP packages of specific version from NEXUS...
+                    DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)
+                    VPP_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_STABLE_VER_UBUNTU)
+                    bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
+                        --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER} || {
+                        die 1 "Failed to get VPP packages!"
+                    }
+                fi
+                ;;
+            vpp-csit-*)
+                # Use local built packages.
+                mv ../${DUT}*.deb ${DOWNLOAD_DIR}/
+                ;;
+            *)
+                die 1 "Unable to identify job type from: ${TEST_CODE}!"
+                ;;
+        esac
+        ;;
+    *ligato*)
+        DUT="kubernetes"
+
+        case "$TEST_CODE" in
+            csit-*)
+                # Use downloaded packages with specific version
+                if [[ "$TEST_CODE" == *daily* ]] || \
+                   [[ "$TEST_CODE" == *weekly* ]] || \
+                   [[ "$TEST_CODE" == *timed* ]];
+                then
+                    echo Downloading latest VPP packages from NEXUS...
+                    bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
+                        --skip-install || {
+                        die 1 "Failed to get VPP packages!"
+                    }
+                else
+                    echo Downloading VPP packages of specific version from NEXUS...
+                    DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)
+                    VPP_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_STABLE_VER_UBUNTU)
+                    bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
+                        --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER} || {
+                        die 1 "Failed to get VPP packages!"
+                    }
+                fi
+                ;;
+            vpp-csit-*)
+                # Use local builded packages.
+                mv ../${DUT}*.deb ${DOWNLOAD_DIR}/
+                ;;
+            *)
+                die 1 "Unable to identify job type from: ${TEST_CODE}!"
+                ;;
+        esac
+        # Extract VPP API to specific folder
+        dpkg -x ${DOWNLOAD_DIR}/vpp_*.deb /tmp/vpp || {
+            die 1 "Failed to extract ${DUT} package!"
+        }
+
+        LIGATO_REPO_URL="https://github.com/ligato/"
+        VPP_AGENT_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_AGENT_STABLE_VER)
+        DOCKER_DEB="docker-ce_18.03.0~ce-0~ubuntu_amd64.deb"
 
-sudo apt-get -y update
-sudo apt-get -y install libpython2.7-dev python-virtualenv
+        # Clone & checkout stable vnf-agent
+        cd ../..
+        git clone -b ${VPP_AGENT_STABLE_VER} --single-branch \
+            ${LIGATO_REPO_URL}/vpp-agent vpp-agent || {
+            die 1 "Failed to run: git clone ${LIGATO_REPO_URL}/vpp-agent!"
+        }
+        cd vpp-agent
 
-virtualenv --system-site-packages env
-. env/bin/activate
+        # Install Docker
+        wget -q https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/${DOCKER_DEB} || {
+            die 1 "Failed to download Docker package!"
+        }
 
-echo pip install
-pip install -r requirements.txt
+        sudo dpkg -i ${DOCKER_DEB} || {
+            die 1 "Failed to install Docker!"
+        }
+
+        # Pull ligato/dev_vpp_agent docker image and re-tag as local
+        sudo docker pull ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER} || {
+            die 1 "Failed to pull Docker image!"
+        }
+
+        sudo docker tag ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER}\
+            dev_vpp_agent:latest || {
+            die 1 "Failed to tag Docker image!"
+        }
+
+        # Start dev_vpp_agent container as daemon
+        sudo docker run --rm -itd --name agentcnt dev_vpp_agent bash || {
+            die 1 "Failed to run Docker image!"
+        }
+
+        # Copy latest vpp api into running container
+        sudo docker cp /tmp/vpp/usr/share/vpp/api agentcnt:/usr/share/vpp || {
+            die 1 "Failed to copy files Docker image!"
+        }
+
+        for f in ${DOWNLOAD_DIR}/*; do
+            sudo docker cp $f agentcnt:/opt/vpp-agent/dev/vpp/build-root/ || {
+                die 1 "Failed to copy files Docker image!"
+            }
+        done
+
+        # Recompile vpp-agent
+        sudo docker exec -i agentcnt \
+            script -qec '. ~/.bashrc; cd /go/src/github.com/ligato/vpp-agent && make generate && make install' || {
+            die 1 "Failed to build vpp-agent in Docker image!"
+        }
+        # Save container state
+        sudo docker commit `sudo docker ps -q` dev_vpp_agent:latest || {
+            die 1 "Failed to commit state of Docker image!"
+        }
+
+        # Build prod_vpp_agent docker image
+        cd docker/prod/ &&\
+            sudo docker build --tag prod_vpp_agent --no-cache . || {
+                die 1 "Failed to build Docker image!"
+            }
+        # Export Docker image
+        sudo docker save prod_vpp_agent | gzip > prod_vpp_agent.tar.gz || {
+            die 1 "Failed to save Docker image!"
+        }
+        DOCKER_IMAGE="$( readlink -f prod_vpp_agent.tar.gz | tr '\n' ' ' )"
+        rm -r ${DOWNLOAD_DIR}/vpp*
+        mv ${DOCKER_IMAGE} ${DOWNLOAD_DIR}/
+        ;;
+    *dpdk*)
+        DUT="dpdk"
 
-# We iterate over available topologies and wait until we reserve topology
+        DPDK_REPO='https://fast.dpdk.org/rel/'
+        # Use downloaded packages with specific version
+        if [[ "$TEST_CODE" == *daily* ]] || \
+           [[ "$TEST_CODE" == *weekly* ]] || \
+           [[ "$TEST_CODE" == *timed* ]];
+        then
+            echo "Downloading latest DPDK packages from repo..."
+            DPDK_STABLE_VER=$(wget --no-check-certificate --quiet -O - ${DPDK_REPO} | \
+                grep -v '2015' | grep -Eo 'dpdk-[^\"]+xz' | tail -1)
+        else
+            echo "Downloading DPDK packages of specific version from repo..."
+            DPDK_STABLE_VER='dpdk-18.05.tar.xz'
+        fi
+        if [[ ! -f ${DPDK_STABLE_VER} ]]; then
+            wget --no-check-certificate ${DPDK_REPO}${DPDK_STABLE_VER} || {
+                die 1 "Failed to get DPDK package from ${DPDK_REPO}!"
+            }
+        fi
+        ;;
+    *)
+        die 1 "Unable to identify DUT type from: ${TEST_CODE}!"
+        ;;
+esac
+cd ${SCRIPT_DIR}
+
+if [[ ! "$(ls -A ${DOWNLOAD_DIR})" ]]; then
+    die 1 "No artifacts downloaded!"
+fi
+
+# ENVIRONMENT PREPARATION
+rm -rf env
+
+pip install virtualenv || {
+    die 1 "Failed to install virtual env!"
+}
+virtualenv --system-site-packages env || {
+    die 1 "Failed to create virtual env!"
+}
+source env/bin/activate || {
+    die 1 "Failed to activate virtual env!"
+}
+pip install -r requirements.txt || {
+    die 1 "Failed to install requirements to virtual env!"
+}
+
+# We iterate over available topologies and wait until we reserve topology.
 while :; do
     for TOPOLOGY in ${TOPOLOGIES};
     do
-        python ${CUR_DIR}/resources/tools/topo_reservation.py -t ${TOPOLOGY}
+        python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -t ${TOPOLOGY}
         if [ $? -eq 0 ]; then
             WORKING_TOPOLOGY=${TOPOLOGY}
             echo "Reserved: ${WORKING_TOPOLOGY}"
+            # On script exit we clean testbed.
+            trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
             break
         fi
     done
 
-    if [ ! -z "${WORKING_TOPOLOGY}" ]; then
-        # Exit the infinite while loop if we made a reservation
+    if [ -n "${WORKING_TOPOLOGY}" ]; then
+        # Exit the infinite while loop if we made a reservation.
         break
     fi
 
-    # Wait ~3minutes before next try
+    # Wait ~3minutes before next try.
     SLEEP_TIME=$[ ( $RANDOM % 20 ) + 180 ]s
     echo "Sleeping ${SLEEP_TIME}"
     sleep ${SLEEP_TIME}
 done
 
-function cancel_all {
-    python ${CUR_DIR}/resources/tools/topo_installation.py -c -d ${INSTALLATION_DIR} -t $1
-    python ${CUR_DIR}/resources/tools/topo_reservation.py -c -t $1
+# Clean testbed before execution.
+python ${SCRIPT_DIR}/resources/tools/scripts/topo_cleanup.py -t ${WORKING_TOPOLOGY} || {
+    die 1 "Failed to cleanup topologies!"
 }
 
-# On script exit we cancel the reservation and installation and delete all vpp
-# packages
-trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
-
-python ${CUR_DIR}/resources/tools/topo_installation.py -t ${WORKING_TOPOLOGY} \
-                                                       -d ${INSTALLATION_DIR} \
-                                                       -p ${VPP_DEBS}
-if [ $? -eq 0 ]; then
-    echo "VPP Installed on hosts from: ${WORKING_TOPOLOGY}"
-else
-    echo "Failed to copy vpp deb files to DUTs"
-    exit 1
-fi
+# CSIT EXECUTION
+PYBOT_ARGS="--outputdir ${ARCHIVE_DIR} --loglevel TRACE --variable TOPOLOGY_PATH:${WORKING_TOPOLOGY} --suite tests.${DUT}.perf"
 
-case "$TEST_TAG" in
-    # run specific performance tests based on jenkins job type variable
-    PERFTEST_LONG )
-        pybot ${PYBOT_ARGS} \
-              -L TRACE \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -i perftest_long \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    PERFTEST_SHORT )
-        pybot ${PYBOT_ARGS} \
-              -L TRACE \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -i perftest_short \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    PERFTEST_LONG_BRIDGE )
-        pybot ${PYBOT_ARGS} \
-              -L TRACE \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -s "performance.Long_Bridge_Domain*" \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    PERFTEST_LONG_IPV4 )
-        pybot ${PYBOT_ARGS} \
-              -L TRACE \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -s "performance.Long_IPv4*" \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    PERFTEST_LONG_IPV6 )
-        pybot ${PYBOT_ARGS} \
-              -L TRACE \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -s "performance.Long_IPv6*" \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    PERFTEST_LONG_XCONNECT )
-        pybot ${PYBOT_ARGS} \
-              -L TRACE \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -s "performance.Long_Xconnect*" \
-              tests/
-        RETURN_STATUS=$(echo $?)
-        ;;
-    PERFTEST_LONG_XCONNECT_DOT1Q )
-        pybot ${PYBOT_ARGS} \
-              -L TRACE \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -s "performance.Long_Xconnect_Dot1q*" \
+# NIC SELECTION
+# All topologies NICs
+TOPOLOGIES_NICS=($(grep -hoPR "model: \K.*" topologies/available/* | sort -u))
+# Selected topology NICs
+TOPOLOGY_NICS=($(grep -hoPR "model: \K.*" ${WORKING_TOPOLOGY} | sort -u))
+# All topologies NICs - Selected topology NICs
+EXCLUDE_NICS=($(comm -13 <(printf '%s\n' "${TOPOLOGY_NICS[@]}") <(printf '%s\n' "${TOPOLOGIES_NICS[@]}")))
+
+case "$TEST_CODE" in
+    # Select specific performance tests based on jenkins job type variable.
+    *ndrpdr-weekly* )
+        TAGS=(ndrpdrANDnic_intel-x520-da2AND1c
+              ndrpdrANDnic_intel-x520-da2AND2c
+              ndrpdrAND1cANDipsec
+              ndrpdrAND2cANDipsec)
         ;;
-    PERFTEST_NDR )
-        pybot ${PYBOT_ARGS} \
-              -L TRACE \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -s performance -i NDR \
-              tests/
+    *ndrpdr-timed* )
         ;;
-    PERFTEST_PDR )
-        pybot ${PYBOT_ARGS} \
-              -L TRACE \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -s performance -i PDR \
-              tests/
-        RETURN_STATUS=$(echo $?)
+    *mrr-daily* )
+       TAGS=(mrrAND64bAND1c
+             mrrAND64bAND2c
+             mrrAND64bAND4c
+             mrrAND78bAND1c
+             mrrAND78bAND2c
+             mrrAND78bAND4c
+             mrrANDimixAND1cANDvhost
+             mrrANDimixAND2cANDvhost
+             mrrANDimixAND4cANDvhost
+             mrrANDimixAND1cANDmemif
+             mrrANDimixAND2cANDmemif
+             mrrANDimixAND4cANDmemif)
         ;;
     * )
-        # run full performance test suite and exit on fail
-        pybot ${PYBOT_ARGS} \
-              -L TRACE \
-              -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-              -s performance \
-              tests/
-        RETURN_STATUS=$(echo $?)
-esac
+        if [[ -z "$TEST_TAG_STRING" ]]; then
+            # If nothing is specified, we will run pre-selected tests by
+            # following tags. Items of array will be concatenated by OR in Robot
+            # Framework.
+            TEST_TAG_ARRAY=(mrrANDnic_intel-x710AND1cAND64bANDip4base
+                            mrrANDnic_intel-x710AND1cAND78bANDip6base
+                            mrrANDnic_intel-x710AND1cAND64bANDl2bdbase)
+        else
+            # If trigger contains tags, split them into array.
+            TEST_TAG_ARRAY=(${TEST_TAG_STRING//:/ })
+            # We will add excluded NICs.
+            TEST_TAG_ARRAY+=("${EXCLUDE_NICS[@]/#/!NIC_}")
+        fi
 
-# Pybot output post-processing
-echo Post-processing test data...
+        TAGS=()
 
-python ${CUR_DIR}/resources/tools/robot_output_parser.py \
-       -i ${CUR_DIR}/output.xml \
-       -o ${CUR_DIR}/output_perf_data.json \
-       -v ${VPP_STABLE_VER}
-if [ ! $? -eq 0 ]; then
-    echo "Parsing ${CUR_DIR}/output.xml failed"
-fi
+        # We will prefix with perftest to prevent running other tests
+        # (e.g. Functional).
+        prefix="perftestAND"
+        if [[ ${TEST_CODE} == vpp-* ]]; then
+            # Automatic prefixing for VPP jobs to limit the NIC used and
+            # traffic evaluation to MRR.
+            prefix="${prefix}mrrANDnic_intel-x710AND"
+        fi
+        for TAG in "${TEST_TAG_ARRAY[@]}"; do
+            if [[ ${TAG} == "!"* ]]; then
+                # Exclude tags are not prefixed.
+                TAGS+=("${TAG}")
+            else
+                TAGS+=("$prefix${TAG}")
+            fi
+        done
+        ;;
+esac
 
-# Archive artifacts
-mkdir archive
-for i in ${ARCHIVE_ARTIFACTS[@]}; do
-    cp $( readlink -f ${i} | tr '\n' ' ' ) archive/
+# Catenate TAG selections
+EXPANDED_TAGS=()
+for TAG in "${TAGS[@]}"; do
+    if [[ ${TAG} == "!"* ]]; then
+        EXPANDED_TAGS+=(" --exclude ${TAG#$"!"} ")
+    else
+        EXPANDED_TAGS+=(" --include ${TOPOLOGIES_TAGS}AND${TAG} ")
+    fi
 done
 
-echo Post-processing finished.
+# Execute the test
+pybot ${PYBOT_ARGS}${EXPANDED_TAGS[@]} tests/
+RETURN_STATUS=$(echo $?)
+
+# We will create additional archive if workspace variable is set. This way if
+# script is running in jenkins all will be automatically archived to logs.fd.io.
+if [[ -n ${WORKSPACE-} ]]; then
+    cp -r ${ARCHIVE_DIR}/ $WORKSPACE/archives/
+fi
 
 exit ${RETURN_STATUS}