CSIT-1193 De-duplicate bootstrap scripts into one 14/13814/35
authorPeter Mikus <pmikus@cisco.com>
Mon, 30 Jul 2018 17:28:16 +0000 (17:28 +0000)
committerJan Gelety <jgelety@cisco.com>
Fri, 10 Aug 2018 09:16:13 +0000 (09:16 +0000)
- DPDK and VPP bootstrap merged into one (ligato will follow).
- Added more error detections
- Added topo-cleanup.py to cleanup TB before and after test.
- Remove installation of VPP

Change-Id: I7a86117eb03cbe4ee8fde47baeed58bc86c0dfb2
Signed-off-by: Peter Mikus <pmikus@cisco.com>
.gitignore
bootstrap-verify-perf-DPDK.sh
bootstrap-verify-perf-ligato.sh
bootstrap-verify-perf.sh
resources/libraries/python/ContainerUtils.py
resources/libraries/python/SetupFramework.py
resources/tools/scripts/topo_cleanup.py [new file with mode: 0755]
tests/dpdk/dpdk_scripts/install_dpdk.sh
tests/vpp/perf/__init__.robot

index 2d27b2c..41ac50a 100644 (file)
@@ -1,4 +1,6 @@
 /env
+/download_dir
+/archive_dir
 outputs
 output.xml
 log.html
index d24167d..f6cdb22 100755 (executable)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -xo pipefail
-
-# TOPOLOGY
-# Space separated list of available testbeds, described by topology files
-TOPOLOGIES_3N_HSW="topologies/available/lf_3n_hsw_testbed1.yaml \
-                   topologies/available/lf_3n_hsw_testbed2.yaml \
-                   topologies/available/lf_3n_hsw_testbed3.yaml"
-TOPOLOGIES_2N_SKX="topologies/available/lf_2n_skx_testbed21.yaml \
-                   topologies/available/lf_2n_skx_testbed24.yaml"
-TOPOLOGIES_3N_SKX="topologies/available/lf_3n_skx_testbed31.yaml \
-                   topologies/available/lf_3n_skx_testbed32.yaml"
-
-# SYSTEM
-SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-export PYTHONPATH=${SCRIPT_DIR}
-export DEBIAN_FRONTEND=noninteractive
-
-# RESERVATION
-RESERVATION_DIR="/tmp/reservation_dir"
-
-# ARCHIVE
-JOB_ARCHIVE_ARTIFACTS=(log.html output.xml report.html)
-LOG_ARCHIVE_ARTIFACTS=(log.html output.xml report.html)
-JOB_ARCHIVE_DIR="archive"
-LOG_ARCHIVE_DIR="$WORKSPACE/archives"
-mkdir -p ${JOB_ARCHIVE_DIR}
-mkdir -p ${LOG_ARCHIVE_DIR}
-
-# JOB SETTING
-case ${JOB_NAME} in
-    *2n-skx*)
-        TOPOLOGIES=$TOPOLOGIES_2N_SKX
-        TOPOLOGIES_TAGS="2_node_*_link_topo"
-        ;;
-    *3n-skx*)
-        TOPOLOGIES=$TOPOLOGIES_3N_SKX
-        TOPOLOGIES_TAGS="3_node_*_link_topo"
-        ;;
-    *)
-        TOPOLOGIES=$TOPOLOGIES_3N_HSW
-        TOPOLOGIES_TAGS="3_node_*_link_topo"
-        ;;
-esac
-case ${JOB_NAME} in
-    *hc2vpp*)
-        DUT="hc2vpp"
-        ;;
-    *vpp*)
-        DUT="vpp"
-        ;;
-    *ligato*)
-        DUT="kubernetes"
-        ;;
-    *dpdk*)
-        DUT="dpdk"
-
-        # If we run this script from CSIT jobs we want to use stable version
-        DPDK_REPO='https://fast.dpdk.org/rel/'
-        if [[ ${TEST_TAG} == *DAILY ]] || \
-           [[ ${TEST_TAG} == *WEEKLY ]];
-        then
-            echo Downloading latest DPDK packages from repo...
-            DPDK_STABLE_VER=$(wget --no-check-certificate --quiet -O - ${DPDK_REPO} | \
-                grep -v '2015' | grep -Eo 'dpdk-[^\"]+xz' | tail -1)
-        else
-            echo Downloading DPDK packages of specific version from repo...
-            DPDK_STABLE_VER='dpdk-18.05.tar.xz'
-        fi
-        wget --no-check-certificate --quiet ${DPDK_REPO}${DPDK_STABLE_VER}
-        ;;
-    *)
-        echo "Unable to identify dut type based on JOB_NAME variable: ${JOB_NAME}"
-        exit 1
-        ;;
-esac
-
-
-# ENVIRONMENT PREPARATION
-virtualenv --system-site-packages env
-. env/bin/activate
-pip install -r requirements.txt
-
-if [ -z "${TOPOLOGIES}" ]; then
-    echo "No applicable topology found!"
-    exit 1
-fi
-# We iterate over available topologies and wait until we reserve topology
-while :; do
-    for TOPOLOGY in ${TOPOLOGIES};
-    do
-        python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -t ${TOPOLOGY}
-        if [ $? -eq 0 ]; then
-            WORKING_TOPOLOGY=${TOPOLOGY}
-            echo "Reserved: ${WORKING_TOPOLOGY}"
-            break
-        fi
-    done
-
-    if [ ! -z "${WORKING_TOPOLOGY}" ]; then
-        # Exit the infinite while loop if we made a reservation
-        break
-    fi
-
-    # Wait ~3minutes before next try
-    SLEEP_TIME=$[ ( $RANDOM % 20 ) + 180 ]s
-    echo "Sleeping ${SLEEP_TIME}"
-    sleep ${SLEEP_TIME}
-done
-
-#for DPDK test, we don't need to install the VPP deb
-function cancel_all {
-    python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -c -t $1
-}
-
-# On script exit we cancel the reservation
-trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
-
-# CSIT EXECUTION
-PYBOT_ARGS="--consolewidth 100 \
-            --loglevel TRACE \
-            --variable TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-            --suite tests.${DUT}.perf"
-
-case "$TEST_TAG" in
-    # select specific performance tests based on jenkins job type variable
-    PERFTEST_MRR_DAILY )
-       TAGS=('mrrAND64bAND1c'
-             'mrrAND64bAND2c'
-             'mrrAND64bAND4c'
-             'mrrAND78bAND1c'
-             'mrrAND78bAND2c'
-             'mrrAND78bAND4c'
-             'mrrANDimixAND1c'
-             'mrrANDimixAND2c'
-             'mrrANDimixAND4c')
-        ;;
-    VERIFY-PERF-PATCH )
-        if [[ -z "$TEST_TAG_STRING" ]]; then
-            # If nothing is specified, we will run pre-selected tests by
-            # following tags. Items of array will be concatenated by OR in Robot
-            # Framework.
-            TEST_TAG_ARRAY=('mrrANDnic_intel-x710AND1cAND64b')
-        else
-            # If trigger contains tags, split them into array.
-            TEST_TAG_ARRAY=(${TEST_TAG_STRING//:/ })
-        fi
-
-        TAGS=()
-
-        for TAG in "${TEST_TAG_ARRAY[@]}"; do
-            if [[ ${TAG} == "!"* ]]; then
-                # Exclude tags are not prefixed.
-                TAGS+=("${TAG}")
-            else
-                # We will prefix with perftest to prevent running other tests
-                # (e.g. Functional).
-                prefix="perftestAND"
-                if [[ ${JOB_NAME} == vpp-* ]] ; then
-                    # Automatic prefixing for VPP jobs to limit the NIC used and
-                    # traffic evaluation to MRR.
-                    prefix="${prefix}mrrANDnic_intel-x710AND"
-                fi
-                TAGS+=("$prefix${TAG}")
-            fi
-        done
-        ;;
-    * )
-        TAGS=('perftest')
-esac
-
-# Catenate TAG selections
-EXPANDED_TAGS=()
-for TAG in "${TAGS[@]}"; do
-    if [[ ${TAG} == "!"* ]]; then
-        EXPANDED_TAGS+=(" --exclude ${TAG#$"!"} ")
-    else
-        EXPANDED_TAGS+=(" --include ${TOPOLOGIES_TAGS}AND${TAG} ")
-    fi
-done
-
-# Execute the test
-pybot ${PYBOT_ARGS}${EXPANDED_TAGS[@]} tests/
-RETURN_STATUS=$(echo $?)
-
-# Archive JOB artifacts in jenkins
-for i in ${JOB_ARCHIVE_ARTIFACTS[@]}; do
-    cp $( readlink -f ${i} | tr '\n' ' ' ) ${JOB_ARCHIVE_DIR}/
-done
-# Archive JOB artifacts to logs.fd.io
-for i in ${LOG_ARCHIVE_ARTIFACTS[@]}; do
-    cp $( readlink -f ${i} | tr '\n' ' ' ) ${LOG_ARCHIVE_DIR}/
-done
-
-exit ${RETURN_STATUS}
+# run the script
+./bootstrap-verify-perf.sh $@
index 1ab881b..f6cdb22 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -xo pipefail
-
-# Space separated list of available testbeds, described by topology files
-TOPOLOGIES="topologies/available/lf_3n_hsw_testbed1.yaml \
-            topologies/available/lf_3n_hsw_testbed2.yaml \
-            topologies/available/lf_3n_hsw_testbed3.yaml"
-
-SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-export PYTHONPATH=${SCRIPT_DIR}
-export DEBIAN_FRONTEND=noninteractive
-
-# Reservation dir
-RESERVATION_DIR="/tmp/reservation_dir"
-INSTALLATION_DIR="/tmp/install_dir"
-
-JOB_ARCHIVE_ARTIFACTS=(log.html output.xml report.html)
-LOG_ARCHIVE_ARTIFACTS=(log.html output.xml report.html)
-JOB_ARCHIVE_DIR="archive"
-LOG_ARCHIVE_DIR="$WORKSPACE/archives"
-mkdir -p ${JOB_ARCHIVE_DIR}
-mkdir -p ${LOG_ARCHIVE_DIR}
-
-# If we run this script from CSIT jobs we want to use stable vpp version
-if [[ ${JOB_NAME} == csit-* ]] ;
-then
-    mkdir -p vpp_download
-    cd vpp_download
-
-    if [[ ${TEST_TAG} == *DAILY ]] || \
-       [[ ${TEST_TAG} == *WEEKLY ]];
-    then
-        echo Downloading latest VPP packages from NEXUS...
-        bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
-            --skip-install
-    else
-        echo Downloading VPP packages of specific version from NEXUS...
-        DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)
-        VPP_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_STABLE_VER_UBUNTU)
-        #Temporary if arch will not be removed from VPP_STABLE_VER_UBUNTU
-        #VPP_STABLE_VER=${VPP_STABLE_VER%_amd64}
-        bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
-            --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER}
-    fi
-    # Jenkins VPP deb paths (convert to full path)
-    VPP_DEBS="$( readlink -f vpp*.deb | tr '\n' ' ' )"
-    cd ${SCRIPT_DIR}
-
-# If we run this script from vpp project we want to use local build
-elif [[ ${JOB_NAME} == vpp-* ]] ;
-then
-    # Use local packages provided as argument list
-    # Jenkins VPP deb paths (convert to full path)
-    VPP_DEBS="$( readlink -f $@ | tr '\n' ' ' )"
-else
-    echo "Unable to identify job type based on JOB_NAME variable: ${JOB_NAME}"
-    exit 1
-fi
-
-# Extract VPP API to specific folder
-dpkg -x vpp_download/vpp_*.deb /tmp/vpp
-
-LIGATO_REPO_URL='https://github.com/ligato/'
-VPP_AGENT_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_AGENT_STABLE_VER)
-DOCKER_DEB="docker-ce_18.03.0~ce-0~ubuntu_amd64.deb"
-
-# Clone & checkout stable vnf-agent
-cd .. && git clone -b ${VPP_AGENT_STABLE_VER} --single-branch \
-    ${LIGATO_REPO_URL}/vpp-agent vpp-agent
-# If the git clone fails, complain clearly and exit
-if [ $? != 0 ]; then
-    echo "Failed to run: git clone ${LIGATO_REPO_URL}/vpp-agent"
-    exit 1
-fi
-cd vpp-agent
-
-# Install Docker
-wget -q https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/${DOCKER_DEB}
-sudo dpkg -i ${DOCKER_DEB}
-# If installation fails, complain clearly and exit
-if [ $? != 0 ]; then
-    echo "Failed to install Docker"
-    exit 1
-fi
-
-# Pull ligato/dev_vpp_agent docker image and re-tag as local
-sudo docker pull ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER}
-sudo docker tag ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER}\
-    dev_vpp_agent:latest
-
-# Start dev_vpp_agent container as daemon
-sudo docker run --rm -itd --name agentcnt dev_vpp_agent bash
-
-# Copy latest vpp api into running container
-sudo docker cp /tmp/vpp/usr/share/vpp/api agentcnt:/usr/share/vpp
-for f in ${SCRIPT_DIR}/vpp_download/*; do
-    sudo docker cp $f agentcnt:/opt/vpp-agent/dev/vpp/build-root/
-done
-
-# Recompile vpp-agent
-sudo docker exec -i agentcnt \
-    script -qec '. ~/.bashrc; cd /go/src/github.com/ligato/vpp-agent && make generate && make install'
-if [ $? != 0 ]; then
-    echo "Failed to build vpp-agent in Docker image."
-    exit 1
-fi
-# Save container state
-sudo docker commit `sudo docker ps -q` dev_vpp_agent:latest
-
-# Build prod_vpp_agent docker image
-cd docker/prod/ &&\
-    sudo docker build --tag prod_vpp_agent --no-cache .
-# Export Docker image
-sudo docker save prod_vpp_agent | gzip > prod_vpp_agent.tar.gz
-# Kill running agentcnt container
-sudo docker rm -f agentcnt
-# If image build fails, complain clearly and exit
-if [ $? != 0 ]; then
-    echo "Failed to build vpp-agent Docker image."
-    exit 1
-fi
-DOCKER_IMAGE="$( readlink -f prod_vpp_agent.tar.gz | tr '\n' ' ' )"
-
-cd ${SCRIPT_DIR}
-
-WORKING_TOPOLOGY=""
-
-sudo apt-get -y update
-sudo apt-get -y install libpython2.7-dev python-virtualenv
-
-virtualenv --system-site-packages env
-. env/bin/activate
-
-echo pip install
-pip install -r requirements.txt
-
-if [ -z "${TOPOLOGIES}" ]; then
-    echo "No applicable topology found!"
-    exit 1
-fi
-# We iterate over available topologies and wait until we reserve topology
-while :; do
-    for TOPOLOGY in ${TOPOLOGIES};
-    do
-        python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -t ${TOPOLOGY}
-        if [ $? -eq 0 ]; then
-            WORKING_TOPOLOGY=${TOPOLOGY}
-            echo "Reserved: ${WORKING_TOPOLOGY}"
-            break
-        fi
-    done
-
-    if [ ! -z "${WORKING_TOPOLOGY}" ]; then
-        # Exit the infinite while loop if we made a reservation
-        break
-    fi
-
-    # Wait ~3minutes before next try
-    SLEEP_TIME=$[ ( $RANDOM % 20 ) + 180 ]s
-    echo "Sleeping ${SLEEP_TIME}"
-    sleep ${SLEEP_TIME}
-done
-
-function cancel_all {
-    python ${SCRIPT_DIR}/resources/tools/scripts/topo_container_copy.py -c -d ${INSTALLATION_DIR} -t $1
-    python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -c -t $1
-}
-
-# On script exit we cancel the reservation and installation and delete all vpp
-# packages
-trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
-
-python ${SCRIPT_DIR}/resources/tools/scripts/topo_container_copy.py \
-    -t ${WORKING_TOPOLOGY} -d ${INSTALLATION_DIR} -i ${DOCKER_IMAGE}
-if [ $? -eq 0 ]; then
-    echo "Docker image copied and loaded on hosts from: ${WORKING_TOPOLOGY}"
-else
-    echo "Failed to copy and load Docker image to DUTs"
-    exit 1
-fi
-
-# Based on job we will identify DUT
-if [[ ${JOB_NAME} == *hc2vpp* ]] ;
-then
-    DUT="hc2vpp"
-elif [[ ${JOB_NAME} == *vpp* ]] ;
-then
-    DUT="vpp"
-elif [[ ${JOB_NAME} == *ligato* ]] ;
-then
-    DUT="kubernetes"
-elif [[ ${JOB_NAME} == *dpdk* ]] ;
-then
-    DUT="dpdk"
-else
-    echo "Unable to identify dut type based on JOB_NAME variable: ${JOB_NAME}"
-    exit 1
-fi
-
-PYBOT_ARGS="--consolewidth 100 --loglevel TRACE --variable TOPOLOGY_PATH:${WORKING_TOPOLOGY} --suite tests.${DUT}.perf"
-
-case "$TEST_TAG" in
-    # select specific performance tests based on jenkins job type variable
-    PERFTEST_DAILY )
-        TAGS=('ndrdiscANDnic_intel-x520-da2AND1c'
-              'ndrdiscANDnic_intel-x520-da2AND2c'
-              'ndrdiscAND1cANDipsec'
-              'ndrdiscAND2cANDipsec')
-        ;;
-    PERFTEST_SEMI_WEEKLY )
-        TAGS=('ndrdiscANDnic_intel-x710AND1c'
-              'ndrdiscANDnic_intel-x710AND2c'
-              'ndrdiscANDnic_intel-xl710AND1c'
-              'ndrdiscANDnic_intel-xl710AND2c')
-        ;;
-    PERFTEST_MRR_DAILY )
-       TAGS=('mrrAND64bAND1c'
-             'mrrAND64bAND2c'
-             'mrrAND64bAND4c'
-             'mrrAND78bAND1c'
-             'mrrAND78bAND2c'
-             'mrrAND78bAND4c'
-             'mrrANDimixAND1cANDvhost'
-             'mrrANDimixAND2cANDvhost'
-             'mrrANDimixAND4cANDvhost'
-             'mrrANDimixAND1cANDmemif'
-             'mrrANDimixAND2cANDmemif'
-             'mrrANDimixAND4cANDmemif')
-        ;;
-    VERIFY-PERF-PATCH )
-        if [[ -z "$TEST_TAG_STRING" ]]; then
-            # If nothing is specified, we will run pre-selected tests by
-            # following tags. Items of array will be concatenated by OR in Robot
-            # Framework.
-            TEST_TAG_ARRAY=('mrrANDnic_intel-x710AND1cAND64bANDip4base'
-                            'mrrANDnic_intel-x710AND1cAND78bANDip6base'
-                            'mrrANDnic_intel-x710AND1cAND64bANDl2bdbase')
-        else
-            # If trigger contains tags, split them into array.
-            TEST_TAG_ARRAY=(${TEST_TAG_STRING//:/ })
-        fi
-
-        TAGS=()
-
-        for TAG in "${TEST_TAG_ARRAY[@]}"; do
-            if [[ ${TAG} == "!"* ]]; then
-                # Exclude tags are not prefixed.
-                TAGS+=("${TAG}")
-            else
-                # We will prefix with perftest to prevent running other tests
-                # (e.g. Functional).
-                prefix="perftestAND"
-                if [[ ${JOB_NAME} == vpp-* ]] ; then
-                    # Automatic prefixing for VPP jobs to limit the NIC used and
-                    # traffic evaluation to MRR.
-                    prefix="${prefix}mrrANDnic_intel-x710AND"
-                fi
-                TAGS+=("$prefix${TAG}")
-            fi
-        done
-        ;;
-    * )
-        TAGS=('perftest')
-esac
-
-# Catenate TAG selections
-EXPANDED_TAGS=()
-for TAG in "${TAGS[@]}"; do
-    if [[ ${TAG} == "!"* ]]; then
-        EXPANDED_TAGS+=(" --exclude ${TAG#$"!"} ")
-    else
-        EXPANDED_TAGS+=(" --include ${TAG} ")
-    fi
-done
-
-# Execute the test
-pybot ${PYBOT_ARGS}${EXPANDED_TAGS[@]} tests/
-RETURN_STATUS=$(echo $?)
-
-# Archive JOB artifacts in jenkins
-for i in ${JOB_ARCHIVE_ARTIFACTS[@]}; do
-    cp $( readlink -f ${i} | tr '\n' ' ' ) ${JOB_ARCHIVE_DIR}/
-done
-# Archive JOB artifacts to logs.fd.io
-for i in ${LOG_ARCHIVE_ARTIFACTS[@]}; do
-    cp $( readlink -f ${i} | tr '\n' ' ' ) ${LOG_ARCHIVE_DIR}/
-done
-
-exit ${RETURN_STATUS}
+# run the script
+./bootstrap-verify-perf.sh $@
index 9e1d43a..731c5da 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 # Copyright (c) 2018 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 
 set -xo pipefail
 
-# TOPOLOGY
+# FUNCTIONS
+function warn () {
+    # Prints the message to standard error.
+    echo "$@" >&2
+}
+
+function die () {
+    # Prints the message to standard error end exit with error code specified
+    # by first argument.
+    status="$1"
+    shift
+    warn "$@"
+    exit "$status"
+}
+
+function help () {
+    # Displays help message.
+    die 1 "Usage: `basename $0` csit-[dpdk|vpp|ligato]-[2n-skx|3n-skx|3n-hsw]"
+}
+
+function cancel_all () {
+    # Trap function to get into consistent state.
+    python ${SCRIPT_DIR}/resources/tools/scripts/topo_cleanup.py -t $1 || {
+        die 1 "Failure during execution of topology cleanup script!"
+    }
+    python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -c -t $1 || {
+        die 1 "Failure during execution of topology un-reservation script!"
+    }
+}
+
+# VARIABLES
 # Space separated list of available testbeds, described by topology files
-TOPOLOGIES_3N_HSW="topologies/available/lf_3n_hsw_testbed1.yaml \
-                   topologies/available/lf_3n_hsw_testbed2.yaml \
-                   topologies/available/lf_3n_hsw_testbed3.yaml"
-TOPOLOGIES_2N_SKX="topologies/available/lf_2n_skx_testbed21.yaml \
-                   topologies/available/lf_2n_skx_testbed24.yaml"
-TOPOLOGIES_3N_SKX="topologies/available/lf_3n_skx_testbed31.yaml \
-                   topologies/available/lf_3n_skx_testbed32.yaml"
-
-# SYSTEM
+TOPOLOGIES_3N_HSW=(topologies/available/lf_3n_hsw_testbed1.yaml
+                   topologies/available/lf_3n_hsw_testbed2.yaml
+                   topologies/available/lf_3n_hsw_testbed3.yaml)
+TOPOLOGIES_2N_SKX=(topologies/available/lf_2n_skx_testbed21.yaml
+                   topologies/available/lf_2n_skx_testbed24.yaml)
+TOPOLOGIES_3N_SKX=(topologies/available/lf_3n_skx_testbed31.yaml
+                   topologies/available/lf_3n_skx_testbed32.yaml)
+
 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 export PYTHONPATH=${SCRIPT_DIR}
-export DEBIAN_FRONTEND=noninteractive
 
-# RESERVATION
 RESERVATION_DIR="/tmp/reservation_dir"
-INSTALLATION_DIR="/tmp/install_dir"
-
-# ARCHIVE
-JOB_ARCHIVE_ARTIFACTS=(log.html output.xml report.html)
-LOG_ARCHIVE_ARTIFACTS=(log.html output.xml report.html)
-JOB_ARCHIVE_DIR="archive"
-LOG_ARCHIVE_DIR="$WORKSPACE/archives"
-mkdir -p ${JOB_ARCHIVE_DIR}
-mkdir -p ${LOG_ARCHIVE_DIR}
-
-# JOB SETTING
-case ${JOB_NAME} in
+DOWNLOAD_DIR="${SCRIPT_DIR}/download_dir"
+ARCHIVE_DIR="${SCRIPT_DIR}/archive"
+
+mkdir -p ${DOWNLOAD_DIR} || {
+    die 1 "Failed to create download dir!"
+}
+mkdir -p ${ARCHIVE_DIR} || {
+    die 1 "Failed to create archive dir!"
+}
+
+# Get test code.
+TEST_CODE=${JOB_NAME-}
+if [[ -z ${TEST_CODE} ]]; then
+    TEST_CODE=${1}
+    shift
+fi
+
+# TOPOLOGY SELECTION
+case "$TEST_CODE" in
     *2n-skx*)
         TOPOLOGIES=$TOPOLOGIES_2N_SKX
         TOPOLOGIES_TAGS="2_node_*_link_topo"
@@ -52,69 +86,212 @@ case ${JOB_NAME} in
         TOPOLOGIES_TAGS="3_node_*_link_topo"
         ;;
     *)
+        # Fallback to 3-node Haswell by default (backward compatibility)
         TOPOLOGIES=$TOPOLOGIES_3N_HSW
         TOPOLOGIES_TAGS="3_node_*_link_topo"
         ;;
 esac
-case ${JOB_NAME} in
+
+if [[ -z "${TOPOLOGIES}" ]]; then
+    die 1 "No applicable topology found!"
+fi
+
+cd ${DOWNLOAD_DIR}
+case "$TEST_CODE" in
     *hc2vpp*)
         DUT="hc2vpp"
         ;;
     *vpp*)
         DUT="vpp"
 
-        case ${JOB_NAME} in
+        case "$TEST_CODE" in
             csit-vpp-*)
                 # Use downloaded packages with specific version
-                if [[ ${TEST_TAG} == *DAILY ]] || \
-                   [[ ${TEST_TAG} == *WEEKLY ]];
+                if [[ "$TEST_CODE" == *daily* ]] || \
+                   [[ "$TEST_CODE" == *weekly* ]] || \
+                   [[ "$TEST_CODE" == *timed* ]];
                 then
                     echo Downloading latest VPP packages from NEXUS...
                     bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
-                        --skip-install
+                        --skip-install || {
+                        die 1 "Failed to get VPP packages!"
+                    }
                 else
                     echo Downloading VPP packages of specific version from NEXUS...
                     DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)
                     VPP_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_STABLE_VER_UBUNTU)
                     bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
-                        --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER}
+                        --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER} || {
+                        die 1 "Failed to get VPP packages!"
+                    }
                 fi
-                # Jenkins VPP deb paths (convert to full path)
-                DUT_PKGS="$( readlink -f ${DUT}*.deb | tr '\n' ' ' )"
                 ;;
             vpp-csit-*)
-                # Use local packages provided as argument list
-                # Jenkins VPP deb paths (convert to full path)
-                DUT_PKGS="$( readlink -f $@ | tr '\n' ' ' )"
+                # Use local built packages.
+                mv ../${DUT}*.deb ${DOWNLOAD_DIR}/
                 ;;
             *)
-                echo "Unable to identify job type based on JOB_NAME variable: ${JOB_NAME}"
-                exit 1
+                die 1 "Unable to identify job type from: ${TEST_CODE}!"
                 ;;
         esac
         ;;
     *ligato*)
         DUT="kubernetes"
+
+        case "$TEST_CODE" in
+            csit-*)
+                # Use downloaded packages with specific version
+                if [[ "$TEST_CODE" == *daily* ]] || \
+                   [[ "$TEST_CODE" == *weekly* ]] || \
+                   [[ "$TEST_CODE" == *timed* ]];
+                then
+                    echo Downloading latest VPP packages from NEXUS...
+                    bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
+                        --skip-install || {
+                        die 1 "Failed to get VPP packages!"
+                    }
+                else
+                    echo Downloading VPP packages of specific version from NEXUS...
+                    DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)
+                    VPP_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_STABLE_VER_UBUNTU)
+                    bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
+                        --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER} || {
+                        die 1 "Failed to get VPP packages!"
+                    }
+                fi
+                ;;
+            vpp-csit-*)
+                # Use local builded packages.
+                mv ../${DUT}*.deb ${DOWNLOAD_DIR}/
+                ;;
+            *)
+                die 1 "Unable to identify job type from: ${TEST_CODE}!"
+                ;;
+        esac
+        # Extract VPP API to specific folder
+        dpkg -x ${DOWNLOAD_DIR}/vpp_*.deb /tmp/vpp || {
+            die 1 "Failed to extract ${DUT} package!"
+        }
+
+        LIGATO_REPO_URL="https://github.com/ligato/"
+        VPP_AGENT_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_AGENT_STABLE_VER)
+        DOCKER_DEB="docker-ce_18.03.0~ce-0~ubuntu_amd64.deb"
+
+        # Clone & checkout stable vnf-agent
+        cd ../..
+        git clone -b ${VPP_AGENT_STABLE_VER} --single-branch \
+            ${LIGATO_REPO_URL}/vpp-agent vpp-agent || {
+            die 1 "Failed to run: git clone ${LIGATO_REPO_URL}/vpp-agent!"
+        }
+        cd vpp-agent
+
+        # Install Docker
+        wget -q https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/${DOCKER_DEB} || {
+            die 1 "Failed to download Docker package!"
+        }
+
+        sudo dpkg -i ${DOCKER_DEB} || {
+            die 1 "Failed to install Docker!"
+        }
+
+        # Pull ligato/dev_vpp_agent docker image and re-tag as local
+        sudo docker pull ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER} || {
+            die 1 "Failed to pull Docker image!"
+        }
+
+        sudo docker tag ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER}\
+            dev_vpp_agent:latest || {
+            die 1 "Failed to tag Docker image!"
+        }
+
+        # Start dev_vpp_agent container as daemon
+        sudo docker run --rm -itd --name agentcnt dev_vpp_agent bash || {
+            die 1 "Failed to run Docker image!"
+        }
+
+        # Copy latest vpp api into running container
+        sudo docker cp /tmp/vpp/usr/share/vpp/api agentcnt:/usr/share/vpp || {
+            die 1 "Failed to copy files Docker image!"
+        }
+
+        for f in ${DOWNLOAD_DIR}/*; do
+            sudo docker cp $f agentcnt:/opt/vpp-agent/dev/vpp/build-root/ || {
+                die 1 "Failed to copy files Docker image!"
+            }
+        done
+
+        # Recompile vpp-agent
+        sudo docker exec -i agentcnt \
+            script -qec '. ~/.bashrc; cd /go/src/github.com/ligato/vpp-agent && make generate && make install' || {
+            die 1 "Failed to build vpp-agent in Docker image!"
+        }
+        # Save container state
+        sudo docker commit `sudo docker ps -q` dev_vpp_agent:latest || {
+            die 1 "Failed to commit state of Docker image!"
+        }
+
+        # Build prod_vpp_agent docker image
+        cd docker/prod/ &&\
+            sudo docker build --tag prod_vpp_agent --no-cache . || {
+                die 1 "Failed to build Docker image!"
+            }
+        # Export Docker image
+        sudo docker save prod_vpp_agent | gzip > prod_vpp_agent.tar.gz || {
+            die 1 "Failed to save Docker image!"
+        }
+        DOCKER_IMAGE="$( readlink -f prod_vpp_agent.tar.gz | tr '\n' ' ' )"
+        rm -r ${DOWNLOAD_DIR}/vpp*
+        mv ${DOCKER_IMAGE} ${DOWNLOAD_DIR}/
         ;;
     *dpdk*)
         DUT="dpdk"
+
+        DPDK_REPO='https://fast.dpdk.org/rel/'
+        # Use downloaded packages with specific version
+        if [[ "$TEST_CODE" == *daily* ]] || \
+           [[ "$TEST_CODE" == *weekly* ]] || \
+           [[ "$TEST_CODE" == *timed* ]];
+        then
+            echo "Downloading latest DPDK packages from repo..."
+            DPDK_STABLE_VER=$(wget --no-check-certificate --quiet -O - ${DPDK_REPO} | \
+                grep -v '2015' | grep -Eo 'dpdk-[^\"]+xz' | tail -1)
+        else
+            echo "Downloading DPDK packages of specific version from repo..."
+            DPDK_STABLE_VER='dpdk-18.05.tar.xz'
+        fi
+        if [[ ! -f ${DPDK_STABLE_VER} ]]; then
+            wget --no-check-certificate ${DPDK_REPO}${DPDK_STABLE_VER} || {
+                die 1 "Failed to get DPDK package from ${DPDK_REPO}!"
+            }
+        fi
         ;;
     *)
-        echo "Unable to identify dut type based on JOB_NAME variable: ${JOB_NAME}"
-        exit 1
+        die 1 "Unable to identify DUT type from: ${TEST_CODE}!"
         ;;
 esac
+cd ${SCRIPT_DIR}
+
+if [[ ! "$(ls -A ${DOWNLOAD_DIR})" ]]; then
+    die 1 "No artifacts downloaded!"
+fi
 
 # ENVIRONMENT PREPARATION
-virtualenv --system-site-packages env
-. env/bin/activate
-pip install -r requirements.txt
+rm -rf env
 
-if [ -z "${TOPOLOGIES}" ]; then
-    echo "No applicable topology found!"
-    exit 1
-fi
-# We iterate over available topologies and wait until we reserve topology
+pip install virtualenv || {
+    die 1 "Failed to install virtual env!"
+}
+virtualenv --system-site-packages env || {
+    die 1 "Failed to create virtual env!"
+}
+source env/bin/activate || {
+    die 1 "Failed to activate virtual env!"
+}
+pip install -r requirements.txt || {
+    die 1 "Failed to install requirements to virtual env!"
+}
+
+# We iterate over available topologies and wait until we reserve topology.
 while :; do
     for TOPOLOGY in ${TOPOLOGIES};
     do
@@ -122,109 +299,97 @@ while :; do
         if [ $? -eq 0 ]; then
             WORKING_TOPOLOGY=${TOPOLOGY}
             echo "Reserved: ${WORKING_TOPOLOGY}"
+            # On script exit we clean testbed.
+            trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
             break
         fi
     done
 
-    if [ ! -z "${WORKING_TOPOLOGY}" ]; then
-        # Exit the infinite while loop if we made a reservation
+    if [ -n "${WORKING_TOPOLOGY}" ]; then
+        # Exit the infinite while loop if we made a reservation.
         break
     fi
 
-    # Wait ~3minutes before next try
+    # Wait ~3minutes before next try.
     SLEEP_TIME=$[ ( $RANDOM % 20 ) + 180 ]s
     echo "Sleeping ${SLEEP_TIME}"
     sleep ${SLEEP_TIME}
 done
 
-function cancel_all {
-    python ${SCRIPT_DIR}/resources/tools/scripts/topo_installation.py -c -d ${INSTALLATION_DIR} -t $1
-    python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -c -t $1
+# Clean testbed before execution.
+python ${SCRIPT_DIR}/resources/tools/scripts/topo_cleanup.py -t ${WORKING_TOPOLOGY} || {
+    die 1 "Failed to cleanup topologies!"
 }
 
-# On script exit we cancel the reservation and installation and delete all vpp
-# packages
-trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
-
-python ${SCRIPT_DIR}/resources/tools/scripts/topo_installation.py \
-    -t ${WORKING_TOPOLOGY} -d ${INSTALLATION_DIR} -p ${DUT_PKGS}
-if [ $? -eq 0 ]; then
-    echo "DUT installed on hosts from: ${WORKING_TOPOLOGY}"
-else
-    echo "Failed to copy DUT packages files to hosts from: ${WORKING_TOPOLOGY}"
-    exit 1
-fi
-
 # CSIT EXECUTION
-PYBOT_ARGS="--consolewidth 100 \
-            --loglevel TRACE \
-            --variable TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
-            --suite tests.${DUT}.perf"
-
-case "$TEST_TAG" in
-    # select specific performance tests based on jenkins job type variable
-    PERFTEST_DAILY )
-        TAGS=('ndrdiscANDnic_intel-x520-da2AND1c'
-              'ndrdiscANDnic_intel-x520-da2AND2c'
-              'ndrpdrANDnic_intel-x520-da2AND1c'
-              'ndrpdrANDnic_intel-x520-da2AND2c'
-              'ndrdiscAND1cANDipsec'
-              'ndrdiscAND2cANDipsec')
+PYBOT_ARGS="--outputdir ${ARCHIVE_DIR} --loglevel TRACE --variable TOPOLOGY_PATH:${WORKING_TOPOLOGY} --suite tests.${DUT}.perf"
+
+# NIC SELECTION
+# All topologies NICs
+TOPOLOGIES_NICS=($(grep -hoPR "model: \K.*" topologies/available/* | sort -u))
+# Selected topology NICs
+TOPOLOGY_NICS=($(grep -hoPR "model: \K.*" ${WORKING_TOPOLOGY} | sort -u))
+# All topologies NICs - Selected topology NICs
+EXCLUDE_NICS=($(comm -13 <(printf '%s\n' "${TOPOLOGY_NICS[@]}") <(printf '%s\n' "${TOPOLOGIES_NICS[@]}")))
+
+case "$TEST_CODE" in
+    # Select specific performance tests based on jenkins job type variable.
+    *ndrpdr-weekly* )
+        TAGS=(ndrpdrANDnic_intel-x520-da2AND1c
+              ndrpdrANDnic_intel-x520-da2AND2c
+              ndrpdrAND1cANDipsec
+              ndrpdrAND2cANDipsec)
         ;;
-    PERFTEST_SEMI_WEEKLY )
-        TAGS=('ndrdiscANDnic_intel-x710AND1c'
-              'ndrdiscANDnic_intel-x710AND2c'
-              'ndrdiscANDnic_intel-xl710AND1c'
-              'ndrdiscANDnic_intel-xl710AND2c')
+    *ndrpdr-timed* )
         ;;
-    PERFTEST_MRR_DAILY )
-       TAGS=('mrrAND64bAND1c'
-             'mrrAND64bAND2c'
-             'mrrAND64bAND4c'
-             'mrrAND78bAND1c'
-             'mrrAND78bAND2c'
-             'mrrAND78bAND4c'
-             'mrrANDimixAND1cANDvhost'
-             'mrrANDimixAND2cANDvhost'
-             'mrrANDimixAND4cANDvhost'
-             'mrrANDimixAND1cANDmemif'
-             'mrrANDimixAND2cANDmemif'
-             'mrrANDimixAND4cANDmemif')
+    *mrr-daily* )
+       TAGS=(mrrAND64bAND1c
+             mrrAND64bAND2c
+             mrrAND64bAND4c
+             mrrAND78bAND1c
+             mrrAND78bAND2c
+             mrrAND78bAND4c
+             mrrANDimixAND1cANDvhost
+             mrrANDimixAND2cANDvhost
+             mrrANDimixAND4cANDvhost
+             mrrANDimixAND1cANDmemif
+             mrrANDimixAND2cANDmemif
+             mrrANDimixAND4cANDmemif)
         ;;
-    VERIFY-PERF-PATCH )
+    * )
         if [[ -z "$TEST_TAG_STRING" ]]; then
             # If nothing is specified, we will run pre-selected tests by
             # following tags. Items of array will be concatenated by OR in Robot
             # Framework.
-            TEST_TAG_ARRAY=('mrrANDnic_intel-x710AND1cAND64bANDip4base'
-                            'mrrANDnic_intel-x710AND1cAND78bANDip6base'
-                            'mrrANDnic_intel-x710AND1cAND64bANDl2bdbase')
+            TEST_TAG_ARRAY=(mrrANDnic_intel-x710AND1cAND64bANDip4base
+                            mrrANDnic_intel-x710AND1cAND78bANDip6base
+                            mrrANDnic_intel-x710AND1cAND64bANDl2bdbase)
         else
             # If trigger contains tags, split them into array.
             TEST_TAG_ARRAY=(${TEST_TAG_STRING//:/ })
+            # We will add excluded NICs.
+            TEST_TAG_ARRAY+=("${EXCLUDE_NICS[@]/#/!NIC_}")
         fi
 
         TAGS=()
 
+        # We will prefix with perftest to prevent running other tests
+        # (e.g. Functional).
+        prefix="perftestAND"
+        if [[ ${TEST_CODE} == vpp-* ]]; then
+            # Automatic prefixing for VPP jobs to limit the NIC used and
+            # traffic evaluation to MRR.
+            prefix="${prefix}mrrANDnic_intel-x710AND"
+        fi
         for TAG in "${TEST_TAG_ARRAY[@]}"; do
             if [[ ${TAG} == "!"* ]]; then
                 # Exclude tags are not prefixed.
                 TAGS+=("${TAG}")
             else
-                # We will prefix with perftest to prevent running other tests
-                # (e.g. Functional).
-                prefix="perftestAND"
-                if [[ ${JOB_NAME} == vpp-* ]] ; then
-                    # Automatic prefixing for VPP jobs to limit the NIC used and
-                    # traffic evaluation to MRR.
-                    prefix="${prefix}mrrANDnic_intel-x710AND"
-                fi
                 TAGS+=("$prefix${TAG}")
             fi
         done
         ;;
-    * )
-        TAGS=('perftest')
 esac
 
 # Catenate TAG selections
@@ -241,13 +406,10 @@ done
 pybot ${PYBOT_ARGS}${EXPANDED_TAGS[@]} tests/
 RETURN_STATUS=$(echo $?)
 
-# Archive JOB artifacts in jenkins
-for i in ${JOB_ARCHIVE_ARTIFACTS[@]}; do
-    cp $( readlink -f ${i} | tr '\n' ' ' ) ${JOB_ARCHIVE_DIR}/
-done
-# Archive JOB artifacts to logs.fd.io
-for i in ${LOG_ARCHIVE_ARTIFACTS[@]}; do
-    cp $( readlink -f ${i} | tr '\n' ' ' ) ${LOG_ARCHIVE_DIR}/
-done
+# We will create additional archive if workspace variable is set. This way if
+# script is running in jenkins all will be automatically archived to logs.fd.io.
+if [[ -n ${WORKSPACE-} ]]; then
+    cp -r ${ARCHIVE_DIR}/ $WORKSPACE/archives/
+fi
 
 exit ${RETURN_STATUS}
index 9a14176..eadc0a8 100644 (file)
@@ -329,12 +329,15 @@ class ContainerEngine(object):
         if self.container.install_dkms:
             self.execute(
                 'apt-get install -y dkms && '
-                'dpkg -i --force-all {guest_dir}/install_dir/*.deb'.
+                'dpkg -i --force-all '
+                '{guest_dir}/openvpp-testing/download_dir/*.deb'.
                 format(guest_dir=self.container.mnt[0].split(':')[1]))
         else:
             self.execute(
-                'for i in $(ls -I \"*dkms*\" {guest_dir}/install_dir/); do '
-                'dpkg -i --force-all {guest_dir}/install_dir/$i; done'.
+                'for i in $(ls -I \"*dkms*\" '
+                '{guest_dir}/openvpp-testing/download_dir/); do '
+                'dpkg -i --force-all '
+                '{guest_dir}/openvpp-testing/download_dir/$i; done'.
                 format(guest_dir=self.container.mnt[0].split(':')[1]))
         self.execute('apt-get -f install -y')
         self.execute('apt-get install -y ca-certificates')
index 558d5d4..46b8597 100644 (file)
@@ -56,7 +56,7 @@ def pack_framework_dir():
 
     proc = Popen(
         split("tar --sparse --exclude-vcs --exclude=output*.xml "
-              "--exclude=./tmp --exclude=*.deb --exclude=*.rpm -zcf {0} ."
+              "--exclude=./tmp -zcf {0} ."
               .format(file_name)), stdout=PIPE, stderr=PIPE)
     (stdout, stderr) = proc.communicate()
 
diff --git a/resources/tools/scripts/topo_cleanup.py b/resources/tools/scripts/topo_cleanup.py
new file mode 100755 (executable)
index 0000000..c708a5d
--- /dev/null
@@ -0,0 +1,118 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This script provides cleanup routines on all DUTs."""
+
+import argparse
+import sys
+from platform import dist
+from yaml import load
+
+from resources.libraries.python.ssh import SSH
+
+
+def execute_command_ssh(ssh, cmd, sudo=False):
+    """Execute a command over ssh channel, and print outputs.
+
+    :param ssh: SSH() object connected to a node.
+    :param cmd: Command line to execute on remote node.
+    :param sudo: Run command with sudo privilege level..
+    :type ssh: SSH() object
+    :type cmd: str
+    :type sudo: bool
+    :returns return_code, stdout, stderr
+    :rtype: tuple(int, str, str)
+    """
+    if sudo:
+        ret, stdout, stderr = ssh.exec_command_sudo(cmd, timeout=60)
+    else:
+        ret, stdout, stderr = ssh.exec_command(cmd, timeout=60)
+
+    print 'Executing: {cmd}'.format(cmd=cmd)
+    print '({ret}) {stdout} {stderr}'.format(ret=ret, stdout=stdout,
+                                             stderr=stderr)
+
+    return ret, stdout, stdout
+
+def uninstall_package(ssh, package):
+    """If there are packages installed, clean them up.
+
+    :param ssh: SSH() object connected to a node.
+    :param package: Package name.
+    :type ssh: SSH() object
+    :type package: str
+    """
+    if dist()[0] == 'Ubuntu':
+        ret, _, _ = ssh.exec_command("dpkg -l | grep {package}".format(
+            package=package))
+        if ret == 0:
+            # Try to fix interrupted installations first.
+            execute_command_ssh(ssh, 'dpkg --configure -a', sudo=True)
+            # Try to remove installed packages
+            execute_command_ssh(ssh, 'apt-get purge -y "{package}.*"'.format(
+                package=package), sudo=True)
+
+def kill_process(ssh, process):
+    """If there are running processes, kill them.
+
+    :param ssh: SSH() object connected to a node.
+    :param process: Process name.
+    :type ssh: SSH() object
+    :type process: str
+    """
+    execute_command_ssh(ssh, 'killall -v -s 9 {process}'.format(
+        process=process), sudo=True)
+
+
+def main():
+    """Testbed cleanup."""
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-t", "--topo", required=True, help="Topology file")
+
+    args = parser.parse_args()
+    topology_file = args.topo
+
+    topology = load(open(topology_file).read())['nodes']
+
+    ssh = SSH()
+    for node in topology:
+        if topology[node]['type'] == "DUT":
+            print "###TI host: {}".format(topology[node]['host'])
+            ssh.connect(topology[node])
+
+            # Kill processes.
+            kill_process(ssh, 'qemu')
+            kill_process(ssh, 'l3fwd')
+            kill_process(ssh, 'testpmd')
+
+            # Uninstall packages
+            uninstall_package(ssh, 'vpp')
+            uninstall_package(ssh, 'honeycomb')
+
+            # Remove HC logs.
+            execute_command_ssh(ssh, 'rm -rf /var/log/honeycomb',
+                                sudo=True)
+
+            # Kill all containers.
+            execute_command_ssh(ssh, 'docker rm $(sudo docker ps -a -q)',
+                                sudo=True)
+
+            # Destroy kubernetes.
+            execute_command_ssh(ssh, 'kubeadm reset --force',
+                                sudo=True)
+
+if __name__ == "__main__":
+    sys.exit(main())
index 4fcbf08..ec23aa0 100755 (executable)
@@ -21,7 +21,7 @@ PWDDIR=$(pwd)
 
 cd ${ROOTDIR}
 mkdir ${DPDK_DIR}
-tar -xvf dpdk*.tar.xz --strip=1 --directory dpdk || \
+tar -xvf download_dir/dpdk*.tar.xz --strip=1 --directory dpdk || \
     { echo "Failed to extract DPDK"; exit 1; }
 
 # Compile the DPDK
index b847d81..ecee11c 100644 (file)
@@ -19,6 +19,9 @@
 | Library | resources.libraries.python.CpuUtils
 | Suite Setup | Run Keywords | Setup performance global Variables
 | ...         | AND          | Setup Framework | ${nodes}
+| ...         | AND          | Install Vpp On All Duts | ${nodes}
+| ...         | ${packages_dir} | ${vpp_rpm_pkgs} | ${vpp_deb_pkgs}
+| ...         | AND          | Verify Vpp On All Duts | ${nodes}
 | ...         | AND          | Verify UIO Driver on all DUTs | ${nodes}
 | ...         | AND          | Setup All DUTs | ${nodes}
 | ...         | AND          | Show Vpp Version On All Duts | ${nodes}
@@ -54,3 +57,9 @@
 | | Set Global Variable | ${dut_stats} | ${True}
 | | @{plugins_to_enable}= | Create List | dpdk_plugin.so
 | | Set Global Variable | @{plugins_to_enable}
+| | Set Global Variable | ${packages_dir} | /tmp/openvpp-testing/download_dir/
+| | @{vpp_rpm_pkgs}= | Create List | vpp | vpp-devel | vpp-lib | vpp-plugins
+| | Set Global Variable | ${vpp_rpm_pkgs}
+| | @{vpp_deb_pkgs}= | Create List | vpp | vpp-dbg | vpp-dev | vpp-lib
+| | ... | vpp-plugins | vpp-dpdk-dkms
+| | Set Global Variable | ${vpp_deb_pkgs}