Report: Add rls data
[csit.git] / resources / libraries / bash / function / common.sh
index 6d078e5..33b9c9f 100644 (file)
@@ -1,5 +1,5 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Copyright (c) 2019 PANTHEON.tech and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2021 PANTHEON.tech and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -23,7 +23,6 @@ set -exuo pipefail
 
 
 function activate_docker_topology () {
-    set -exuo pipefail
 
     # Create virtual vpp-device topology. Output of the function is topology
     # file describing created environment saved to a file.
@@ -34,27 +33,33 @@ function activate_docker_topology () {
     # - NODENESS - Node multiplicity of desired testbed.
     # - FLAVOR - Node flavor string, usually describing the processor.
     # - IMAGE_VER_FILE - Name of file that contains the image version.
+    # - CSIT_DIR - Directory where ${IMAGE_VER_FILE} is located.
     # Variables set:
     # - WORKING_TOPOLOGY - Path to topology file.
 
+    set -exuo pipefail
+
     source "${BASH_FUNCTION_DIR}/device.sh" || {
         die "Source failed!"
     }
-
     device_image="$(< ${CSIT_DIR}/${IMAGE_VER_FILE})"
     case_text="${NODENESS}_${FLAVOR}"
     case "${case_text}" in
-        "1n_skx")
+        "1n_skx" | "1n_tx2")
             # We execute reservation over csit-shim-dcr (ssh) which runs sourced
             # script's functions. Env variables are read from ssh output
             # back to localhost for further processing.
-            hostname=$(grep search /etc/resolv.conf | cut -d' ' -f3)
-            ssh="ssh root@${hostname} -p 6022"
+            # Shim and Jenkins executor are in the same network on the same host
+            # Connect to docker's default gateway IP and shim's exposed port
+            ssh="ssh root@172.17.0.1 -p 6022"
             run="activate_wrapper ${NODENESS} ${FLAVOR} ${device_image}"
+            # The "declare -f" output is long and boring.
+            set +x
             # backtics to avoid https://midnight-commander.org/ticket/2142
             env_vars=`${ssh} "$(declare -f); ${run}"` || {
                 die "Topology reservation via shim-dcr failed!"
             }
+            set -x
             set -a
             source <(echo "$env_vars" | grep -v /usr/bin/docker) || {
                 die "Source failed!"
@@ -91,8 +96,6 @@ function activate_docker_topology () {
 
 function activate_virtualenv () {
 
-    set -exuo pipefail
-
     # Update virtualenv pip package, delete and create virtualenv directory,
     # activate the virtualenv, install requirements, set PYTHONPATH.
 
@@ -107,25 +110,22 @@ function activate_virtualenv () {
     # Functions called:
     # - die - Print to stderr and exit.
 
-    # TODO: Do we want the callers to be able to set the env dir name?
-    # TODO: + In that case, do we want to support env switching?
-    # TODO:   + In that case we want to make env_dir global.
-    # TODO: Do we want the callers to override PYTHONPATH loaction?
+    set -exuo pipefail
 
     root_path="${1-$CSIT_DIR}"
     env_dir="${root_path}/env"
     req_path=${2-$CSIT_DIR/requirements.txt}
     rm -rf "${env_dir}" || die "Failed to clean previous virtualenv."
-    pip install --upgrade virtualenv || {
+    pip3 install virtualenv==20.0.20 || {
         die "Virtualenv package install failed."
     }
-    virtualenv "${env_dir}" || {
-        die "Virtualenv creation failed."
+    virtualenv --no-download --python=$(which python3) "${env_dir}" || {
+        die "Virtualenv creation for $(which python3) failed."
     }
     set +u
     source "${env_dir}/bin/activate" || die "Virtualenv activation failed."
     set -u
-    pip install --upgrade -r "${req_path}" || {
+    pip3 install -r "${req_path}" || {
         die "Requirements installation failed."
     }
     # Most CSIT Python scripts assume PYTHONPATH is set and exported.
@@ -135,8 +135,6 @@ function activate_virtualenv () {
 
 function archive_tests () {
 
-    set -exuo pipefail
-
     # Create .tar.xz of generated/tests for archiving.
     # To be run after generate_tests, kept separate to offer more flexibility.
 
@@ -145,7 +143,9 @@ function archive_tests () {
     # File rewriten:
     # - ${ARCHIVE_DIR}/tests.tar.xz - Archive of generated tests.
 
-    tar c "${GENERATED_DIR}/tests" | xz -9e > "${ARCHIVE_DIR}/tests.tar.xz" || {
+    set -exuo pipefail
+
+    tar c "${GENERATED_DIR}/tests" | xz -3 > "${ARCHIVE_DIR}/tests.tar.xz" || {
         die "Error creating archive of generated tests."
     }
 }
@@ -153,10 +153,7 @@ function archive_tests () {
 
 function check_download_dir () {
 
-    set -exuo pipefail
-
     # Fail if there are no files visible in ${DOWNLOAD_DIR}.
-    # TODO: Do we need this as a function, if it is (almost) a one-liner?
     #
     # Variables read:
     # - DOWNLOAD_DIR - Path to directory pybot takes the build to test from.
@@ -165,40 +162,44 @@ function check_download_dir () {
     # Functions called:
     # - die - Print to stderr and exit.
 
+    set -exuo pipefail
+
     if [[ ! "$(ls -A "${DOWNLOAD_DIR}")" ]]; then
         die "No artifacts downloaded!"
     fi
 }
 
 
-function cleanup_topo () {
+function check_prerequisites () {
 
-    set -exuo pipefail
+    # Fail if prerequisites are not met.
+    #
+    # Functions called:
+    # - installed - Check if application is installed/present in system.
+    # - die - Print to stderr and exit.
 
-    # Variables read:
-    # - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
-    # - PYTHON_SCRIPTS_DIR - Path to directory holding the reservation script.
+    set -exuo pipefail
 
-    python "${PYTHON_SCRIPTS_DIR}/topo_cleanup.py" -t "${WORKING_TOPOLOGY}"
-    # Not using "|| die" as some callers might want to ignore errors,
-    # e.g. in teardowns, such as unreserve.
+    if ! installed sshpass; then
+        die "Please install sshpass before continue!"
+    fi
 }
 
 
 function common_dirs () {
 
-    set -exuo pipefail
-
     # Set global variables, create some directories (without touching content).
 
     # Variables set:
     # - BASH_FUNCTION_DIR - Path to existing directory this file is located in.
     # - CSIT_DIR - Path to existing root of local CSIT git repository.
-    # - TOPOLOGIES_DIR - Path to existing directory with available tpologies.
+    # - TOPOLOGIES_DIR - Path to existing directory with available topologies.
+    # - JOB_SPECS_DIR - Path to existing directory with job test specifications.
     # - RESOURCES_DIR - Path to existing CSIT subdirectory "resources".
     # - TOOLS_DIR - Path to existing resources subdirectory "tools".
     # - PYTHON_SCRIPTS_DIR - Path to existing tools subdirectory "scripts".
-    # - ARCHIVE_DIR - Path to created CSIT subdirectory "archive".
+    # - ARCHIVE_DIR - Path to created CSIT subdirectory "archives".
+    #   The name is chosen to match what ci-management expects.
     # - DOWNLOAD_DIR - Path to created CSIT subdirectory "download_dir".
     # - GENERATED_DIR - Path to created CSIT subdirectory "generated".
     # Directories created if not present:
@@ -206,37 +207,49 @@ function common_dirs () {
     # Functions called:
     # - die - Print to stderr and exit.
 
-    BASH_FUNCTION_DIR="$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")" || {
-        die "Some error during localizing this source directory."
+    set -exuo pipefail
+
+    this_file=$(readlink -e "${BASH_SOURCE[0]}") || {
+        die "Some error during locating of this source file."
+    }
+    BASH_FUNCTION_DIR=$(dirname "${this_file}") || {
+        die "Some error during dirname call."
     }
     # Current working directory could be in a different repo, e.g. VPP.
     pushd "${BASH_FUNCTION_DIR}" || die "Pushd failed"
-    CSIT_DIR="$(readlink -e "$(git rev-parse --show-toplevel)")" || {
-        die "Readlink or git rev-parse failed."
+    relative_csit_dir=$(git rev-parse --show-toplevel) || {
+        die "Git rev-parse failed."
     }
+    CSIT_DIR=$(readlink -e "${relative_csit_dir}") || die "Readlink failed."
     popd || die "Popd failed."
-    TOPOLOGIES_DIR="$(readlink -e "${CSIT_DIR}/topologies/available")" || {
+    TOPOLOGIES_DIR=$(readlink -e "${CSIT_DIR}/topologies/available") || {
+        die "Readlink failed."
+    }
+    JOB_SPECS_DIR=$(readlink -e "${CSIT_DIR}/docs/job_specs") || {
         die "Readlink failed."
     }
-    RESOURCES_DIR="$(readlink -e "${CSIT_DIR}/resources")" || {
+    RESOURCES_DIR=$(readlink -e "${CSIT_DIR}/resources") || {
         die "Readlink failed."
     }
-    TOOLS_DIR="$(readlink -e "${RESOURCES_DIR}/tools")" || {
+    TOOLS_DIR=$(readlink -e "${RESOURCES_DIR}/tools") || {
         die "Readlink failed."
     }
-    PYTHON_SCRIPTS_DIR="$(readlink -e "${TOOLS_DIR}/scripts")" || {
+    DOC_GEN_DIR=$(readlink -e "${TOOLS_DIR}/doc_gen") || {
+        die "Readlink failed."
+    }
+    PYTHON_SCRIPTS_DIR=$(readlink -e "${TOOLS_DIR}/scripts") || {
         die "Readlink failed."
     }
 
-    ARCHIVE_DIR="$(readlink -f "${CSIT_DIR}/archive")" || {
+    ARCHIVE_DIR=$(readlink -f "${CSIT_DIR}/archives") || {
         die "Readlink failed."
     }
     mkdir -p "${ARCHIVE_DIR}" || die "Mkdir failed."
-    DOWNLOAD_DIR="$(readlink -f "${CSIT_DIR}/download_dir")" || {
+    DOWNLOAD_DIR=$(readlink -f "${CSIT_DIR}/download_dir") || {
         die "Readlink failed."
     }
     mkdir -p "${DOWNLOAD_DIR}" || die "Mkdir failed."
-    GENERATED_DIR="$(readlink -f "${CSIT_DIR}/generated")" || {
+    GENERATED_DIR=$(readlink -f "${CSIT_DIR}/generated") || {
         die "Readlink failed."
     }
     mkdir -p "${GENERATED_DIR}" || die "Mkdir failed."
@@ -245,18 +258,19 @@ function common_dirs () {
 
 function compose_pybot_arguments () {
 
-    set -exuo pipefail
-
     # Variables read:
     # - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
     # - DUT - CSIT test/ subdirectory, set while processing tags.
     # - TAGS - Array variable holding selected tag boolean expressions.
     # - TOPOLOGIES_TAGS - Tag boolean expression filtering tests for topology.
     # - TEST_CODE - The test selection string from environment or argument.
+    # - SELECTION_MODE - Selection criteria [test, suite, include, exclude].
     # Variables set:
     # - PYBOT_ARGS - String holding part of all arguments for pybot.
     # - EXPANDED_TAGS - Array of strings pybot arguments compiled from tags.
 
+    set -exuo pipefail
+
     # No explicit check needed with "set -u".
     PYBOT_ARGS=("--loglevel" "TRACE")
     PYBOT_ARGS+=("--variable" "TOPOLOGY_PATH:${WORKING_TOPOLOGY}")
@@ -265,9 +279,6 @@ function compose_pybot_arguments () {
         *"device"*)
             PYBOT_ARGS+=("--suite" "tests.${DUT}.device")
             ;;
-        *"func"*)
-            PYBOT_ARGS+=("--suite" "tests.${DUT}.func")
-            ;;
         *"perf"*)
             PYBOT_ARGS+=("--suite" "tests.${DUT}.perf")
             ;;
@@ -280,37 +291,22 @@ function compose_pybot_arguments () {
         if [[ ${tag} == "!"* ]]; then
             EXPANDED_TAGS+=("--exclude" "${tag#$"!"}")
         else
-            EXPANDED_TAGS+=("--include" "${TOPOLOGIES_TAGS}AND${tag}")
+            if [[ ${SELECTION_MODE} == "--test" ]]; then
+                EXPANDED_TAGS+=("--test" "${tag}")
+            else
+                EXPANDED_TAGS+=("--include" "${TOPOLOGIES_TAGS}AND${tag}")
+            fi
         fi
     done
-}
 
-
-function copy_archives () {
-
-    set -exuo pipefail
-
-    # Variables read:
-    # - WORKSPACE - Jenkins workspace, copy only if the value is not empty.
-    #   Can be unset, then it speeds up manual testing.
-    # - ARCHIVE_DIR - Path to directory with content to be copied.
-    # Directories updated:
-    # - ${WORKSPACE}/archives/ - Created if does not exist.
-    #   Content of ${ARCHIVE_DIR}/ is copied here.
-    # Functions called:
-    # - die - Print to stderr and exit.
-
-    # We will create additional archive if workspace variable is set.
-    # This way if script is running in jenkins all will be
-    # automatically archived to logs.fd.io.
-    if [[ -n "${WORKSPACE-}" ]]; then
-        mkdir -p "${WORKSPACE}/archives/" || die "Archives dir create failed."
-        cp -rf "${ARCHIVE_DIR}"/* "${WORKSPACE}/archives" || die "Copy failed."
+    if [[ ${SELECTION_MODE} == "--test" ]]; then
+        EXPANDED_TAGS+=("--include" "${TOPOLOGIES_TAGS}")
     fi
 }
 
 
 function deactivate_docker_topology () {
+
     # Deactivate virtual vpp-device topology by removing containers.
     #
     # Variables read:
@@ -321,13 +317,15 @@ function deactivate_docker_topology () {
 
     case_text="${NODENESS}_${FLAVOR}"
     case "${case_text}" in
-        "1n_skx")
-            hostname=$(grep search /etc/resolv.conf | cut -d' ' -f3)
-            ssh="ssh root@${hostname} -p 6022"
-            env_vars="$(env | grep CSIT_ | tr '\n' ' ' )"
+        "1n_skx" | "1n_tx2")
+            ssh="ssh root@172.17.0.1 -p 6022"
+            env_vars=$(env | grep CSIT_ | tr '\n' ' ' ) || die
+            # The "declare -f" output is long and boring.
+            set +x
             ${ssh} "$(declare -f); deactivate_wrapper ${env_vars}" || {
                 die "Topology cleanup via shim-dcr failed!"
             }
+            set -x
             ;;
         "1n_vbox")
             enter_mutex || die
@@ -343,6 +341,7 @@ function deactivate_docker_topology () {
 
 
 function die () {
+
     # Print the message to standard error end exit with error code specified
     # by the second argument.
     #
@@ -361,8 +360,6 @@ function die () {
 
 function die_on_pybot_error () {
 
-    set -exuo pipefail
-
     # Source this fragment if you want to abort on any failed test case.
     #
     # Variables read:
@@ -370,6 +367,8 @@ function die_on_pybot_error () {
     # Functions called:
     # - die - Print to stderr and exit.
 
+    set -exuo pipefail
+
     if [[ "${PYBOT_EXIT_STATUS}" != "0" ]]; then
         die "Test failures are present!" "${PYBOT_EXIT_STATUS}"
     fi
@@ -378,8 +377,6 @@ function die_on_pybot_error () {
 
 function generate_tests () {
 
-    set -exuo pipefail
-
     # Populate ${GENERATED_DIR}/tests based on ${CSIT_DIR}/tests/.
     # Any previously existing content of ${GENERATED_DIR}/tests is wiped before.
     # The generation is done by executing any *.py executable
@@ -393,11 +390,14 @@ function generate_tests () {
     # Directories replaced:
     # - ${GENERATED_DIR}/tests - Overwritten by the generated tests.
 
-    rm -rf "${GENERATED_DIR}/tests"
-    cp -r "${CSIT_DIR}/tests" "${GENERATED_DIR}/tests"
+    set -exuo pipefail
+
+    rm -rf "${GENERATED_DIR}/tests" || die
+    cp -r "${CSIT_DIR}/tests" "${GENERATED_DIR}/tests" || die
     cmd_line=("find" "${GENERATED_DIR}/tests" "-type" "f")
     cmd_line+=("-executable" "-name" "*.py")
-    file_list=$("${cmd_line[@]}") || die
+    # We sort the directories, so log output can be compared between runs.
+    file_list=$("${cmd_line[@]}" | sort) || die
 
     for gen in ${file_list}; do
         directory="$(dirname "${gen}")" || die
@@ -411,8 +411,6 @@ function generate_tests () {
 
 function get_test_code () {
 
-    set -exuo pipefail
-
     # Arguments:
     # - ${1} - Optional, argument of entry script (or empty as unset).
     #   Test code value to override job name from environment.
@@ -423,6 +421,8 @@ function get_test_code () {
     # - NODENESS - Node multiplicity of desired testbed.
     # - FLAVOR - Node flavor string, usually describing the processor.
 
+    set -exuo pipefail
+
     TEST_CODE="${1-}" || die "Reading optional argument failed, somehow."
     if [[ -z "${TEST_CODE}" ]]; then
         TEST_CODE="${JOB_NAME-}" || die "Reading job name failed, somehow."
@@ -437,22 +437,49 @@ function get_test_code () {
             NODENESS="1n"
             FLAVOR="skx"
             ;;
+       *"1n-tx2"*)
+            NODENESS="1n"
+            FLAVOR="tx2"
+            ;;
+        *"2n-aws"*)
+            NODENESS="2n"
+            FLAVOR="aws"
+            ;;
+        *"3n-aws"*)
+            NODENESS="3n"
+            FLAVOR="aws"
+            ;;
         *"2n-skx"*)
             NODENESS="2n"
             FLAVOR="skx"
             ;;
+        *"2n-zn2"*)
+            NODENESS="2n"
+            FLAVOR="zn2"
+            ;;
         *"3n-skx"*)
             NODENESS="3n"
             FLAVOR="skx"
             ;;
-        *"3n-tsh"*)
+        *"2n-clx"*)
+            NODENESS="2n"
+            FLAVOR="clx"
+            ;;
+        *"2n-dnv"*)
+            NODENESS="2n"
+            FLAVOR="dnv"
+            ;;
+        *"3n-dnv"*)
             NODENESS="3n"
-            FLAVOR="tsh"
+            FLAVOR="dnv"
             ;;
-        *)
-            # Fallback to 3-node Haswell by default (backward compatibility)
+        *"2n-tx2"*)
+            NODENESS="2n"
+            FLAVOR="tx2"
+            ;;
+        *"3n-tsh"*)
             NODENESS="3n"
-            FLAVOR="hsw"
+            FLAVOR="tsh"
             ;;
     esac
 }
@@ -460,54 +487,148 @@ function get_test_code () {
 
 function get_test_tag_string () {
 
-    set -exuo pipefail
-
     # Variables read:
     # - GERRIT_EVENT_TYPE - Event type set by gerrit, can be unset.
     # - GERRIT_EVENT_COMMENT_TEXT - Comment text, read for "comment-added" type.
     # - TEST_CODE - The test selection string from environment or argument.
     # Variables set:
-    # - TEST_TAG_STRING - The string following "perftest" in gerrit comment,
-    #   or empty.
+    # - TEST_TAG_STRING - The string following trigger word in gerrit comment.
+    #   May be empty, or even not set on event types not adding comment.
 
     # TODO: ci-management scripts no longer need to perform this.
 
-    trigger=""
+    set -exuo pipefail
+
     if [[ "${GERRIT_EVENT_TYPE-}" == "comment-added" ]]; then
         case "${TEST_CODE}" in
             *"device"*)
-                # On parsing error, ${trigger} stays empty.
-                trigger="$(echo "${GERRIT_EVENT_COMMENT_TEXT}" \
-                    | grep -oE '(devicetest$|devicetest[[:space:]].+$)')" \
-                    || true
-                # Set test tags as string.
-                TEST_TAG_STRING="${trigger#$"devicetest"}"
+                trigger="devicetest"
                 ;;
             *"perf"*)
-                # On parsing error, ${trigger} stays empty.
-                comment="${GERRIT_EVENT_COMMENT_TEXT}"
-                # As "perftest" can be followed by something, we substitute it.
-                comment="${comment/perftest-2n/perftest}"
-                comment="${comment/perftest-3n/perftest}"
-                comment="${comment/perftest-hsw/perftest}"
-                comment="${comment/perftest-skx/perftest}"
-                comment="${comment/perftest-tsh/perftest}"
-                tag_string="$(echo "${comment}" \
-                    | grep -oE '(perftest$|perftest[[:space:]].+$)' || true)"
-                # Set test tags as string.
-                TEST_TAG_STRING="${tag_string#$"perftest"}"
+                trigger="perftest"
                 ;;
             *)
                 die "Unknown specification: ${TEST_CODE}"
         esac
+        # Ignore lines not containing the trigger word.
+        comment=$(fgrep "${trigger}" <<< "${GERRIT_EVENT_COMMENT_TEXT}" || true)
+        # The vpp-csit triggers trail stuff we are not interested in.
+        # Removing them and trigger word: https://unix.stackexchange.com/a/13472
+        # (except relying on \s whitespace, \S non-whitespace and . both).
+        # The last string is concatenated, only the middle part is expanded.
+        cmd=("grep" "-oP" '\S*'"${trigger}"'\S*\s\K.+$') || die "Unset trigger?"
+        # On parsing error, TEST_TAG_STRING probably stays empty.
+        TEST_TAG_STRING=$("${cmd[@]}" <<< "${comment}" || true)
+        if [[ -z "${TEST_TAG_STRING-}" ]]; then
+            # Probably we got a base64 encoded comment.
+            comment="${GERRIT_EVENT_COMMENT_TEXT}"
+            comment=$(base64 --decode <<< "${comment}" || true)
+            comment=$(fgrep "${trigger}" <<< "${comment}" || true)
+            TEST_TAG_STRING=$("${cmd[@]}" <<< "${comment}" || true)
+        fi
+        if [[ -n "${TEST_TAG_STRING-}" ]]; then
+            test_tag_array=(${TEST_TAG_STRING})
+            if [[ "${test_tag_array[0]}" == "icl" ]]; then
+                export GRAPH_NODE_VARIANT="icl"
+                TEST_TAG_STRING="${test_tag_array[@]:1}" || true
+            elif [[ "${test_tag_array[0]}" == "skx" ]]; then
+                export GRAPH_NODE_VARIANT="skx"
+                TEST_TAG_STRING="${test_tag_array[@]:1}" || true
+            fi
+        fi
     fi
 }
 
 
-function reserve_and_cleanup_testbed () {
+function installed () {
+
+    # Check if the given utility is installed. Fail if not installed.
+    #
+    # Duplicate of common.sh function, as this file is also used standalone.
+    #
+    # Arguments:
+    # - ${1} - Utility to check.
+    # Returns:
+    # - 0 - If command is installed.
+    # - 1 - If command is not installed.
+
+    set -exuo pipefail
+
+    command -v "${1}"
+}
+
+
+function move_archives () {
+
+    # Move archive directory to top of workspace, if not already there.
+    #
+    # ARCHIVE_DIR is positioned relative to CSIT_DIR,
+    # but in some jobs CSIT_DIR is not same as WORKSPACE
+    # (e.g. under VPP_DIR). To simplify ci-management settings,
+    # we want to move the data to the top. We do not want simple copy,
+    # as ci-management is eager with recursive search.
+    #
+    # As some scripts may call this function multiple times,
+    # the actual implementation use copying and deletion,
+    # so the workspace gets "union" of contents (except overwrites on conflict).
+    # The consequence is empty ARCHIVE_DIR remaining after this call.
+    #
+    # As the source directory is emptied,
+    # the check for dirs being different is essential.
+    #
+    # Variables read:
+    # - WORKSPACE - Jenkins workspace, move only if the value is not empty.
+    #   Can be unset, then it speeds up manual testing.
+    # - ARCHIVE_DIR - Path to directory with content to be moved.
+    # Directories updated:
+    # - ${WORKSPACE}/archives/ - Created if does not exist.
+    #   Content of ${ARCHIVE_DIR}/ is moved.
+    # Functions called:
+    # - die - Print to stderr and exit.
+
+    set -exuo pipefail
+
+    if [[ -n "${WORKSPACE-}" ]]; then
+        target=$(readlink -f "${WORKSPACE}/archives")
+        if [[ "${target}" != "${ARCHIVE_DIR}" ]]; then
+            mkdir -p "${target}" || die "Archives dir create failed."
+            cp -rf "${ARCHIVE_DIR}"/* "${target}" || die "Copy failed."
+            rm -rf "${ARCHIVE_DIR}"/* || die "Delete failed."
+        fi
+    fi
+}
+
+
+function prepare_topology () {
+
+    # Prepare virtual testbed topology if needed based on flavor.
+
+    # Variables read:
+    # - NODENESS - Node multiplicity of testbed, either "2n" or "3n".
+    # - FLAVOR - Node flavor string, e.g. "clx" or "skx".
+    # Functions called:
+    # - die - Print to stderr and exit.
+    # - terraform_init - Terraform init topology.
+    # - terraform_apply - Terraform apply topology.
 
     set -exuo pipefail
 
+    case_text="${NODENESS}_${FLAVOR}"
+    case "${case_text}" in
+        "2n_aws")
+            terraform_init || die "Failed to call terraform init."
+            terraform_apply || die "Failed to call terraform apply."
+            ;;
+        "3n_aws")
+            terraform_init || die "Failed to call terraform init."
+            terraform_apply || die "Failed to call terraform apply."
+            ;;
+    esac
+}
+
+
+function reserve_and_cleanup_testbed () {
+
     # Reserve physical testbed, perform cleanup, register trap to unreserve.
     # When cleanup fails, remove from topologies and keep retrying
     # until all topologies are removed.
@@ -515,18 +636,25 @@ function reserve_and_cleanup_testbed () {
     # Variables read:
     # - TOPOLOGIES - Array of paths to topology yaml to attempt reservation on.
     # - PYTHON_SCRIPTS_DIR - Path to directory holding the reservation script.
+    # - BUILD_TAG - Any string suitable as filename, identifying
+    #   test run executing this function. May be unset.
     # Variables set:
     # - TOPOLOGIES - Array of paths to topologies, with failed cleanups removed.
     # - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
     # Functions called:
     # - die - Print to stderr and exit.
+    # - ansible_playbook - Perform an action using ansible, see ansible.sh
     # Traps registered:
     # - EXIT - Calls cancel_all for ${WORKING_TOPOLOGY}.
 
-    while [[ ${TOPOLOGIES[@]} ]]; do
+    set -exuo pipefail
+
+    while true; do
         for topo in "${TOPOLOGIES[@]}"; do
             set +e
-            python "${PYTHON_SCRIPTS_DIR}/topo_reservation.py" -t "${topo}"
+            scrpt="${PYTHON_SCRIPTS_DIR}/topo_reservation.py"
+            opts=("-t" "${topo}" "-r" "${BUILD_TAG:-Unknown}")
+            python3 "${scrpt}" "${opts[@]}"
             result="$?"
             set -e
             if [[ "${result}" == "0" ]]; then
@@ -542,9 +670,9 @@ function reserve_and_cleanup_testbed () {
                     }
                     die "Trap attempt failed, unreserve succeeded. Aborting."
                 }
-                # Cleanup check.
+                # Cleanup + calibration checks
                 set +e
-                cleanup_topo
+                ansible_playbook "cleanup, calibration"
                 result="$?"
                 set -e
                 if [[ "${result}" == "0" ]]; then
@@ -552,42 +680,34 @@ function reserve_and_cleanup_testbed () {
                 fi
                 warn "Testbed cleanup failed: ${topo}"
                 untrap_and_unreserve_testbed "Fail of unreserve after cleanup."
-                # WORKING_TOPOLOGY is now empty again.
-                # Build new topology array.
-                #   TOPOLOGIES=("${TOPOLOGIES[@]/$topo}")
-                # does not really work, see:
-                # https://stackoverflow.com/questions/16860877/remove-an-element-from-a-bash-array
-                new_topologies=()
-                for item in "${TOPOLOGIES[@]}"; do
-                    if [[ "${item}" != "${topo}" ]]; then
-                        new_topologies+=("${item}")
-                    fi
-                done
-                TOPOLOGIES=("${new_topologies[@]}")
-                break
             fi
+            # Else testbed is accessible but currently reserved, moving on.
         done
 
         if [[ -n "${WORKING_TOPOLOGY-}" ]]; then
             # Exit the infinite while loop if we made a reservation.
+            warn "Reservation and cleanup successful."
             break
         fi
 
+        if [[ "${#TOPOLOGIES[@]}" == "0" ]]; then
+            die "Run out of operational testbeds!"
+        fi
+
         # Wait ~3minutes before next try.
-        sleep_time="$[ ( $RANDOM % 20 ) + 180 ]s" || {
+        sleep_time="$[ ( ${RANDOM} % 20 ) + 180 ]s" || {
             die "Sleep time calculation failed."
         }
         echo "Sleeping ${sleep_time}"
         sleep "${sleep_time}" || die "Sleep failed."
     done
-    die "Run out of operational testbeds!"
 }
 
 
 function run_pybot () {
 
-    set -exuo pipefail
-
+    # Run pybot with options based on input variables. Create output_info.xml
+    #
     # Variables read:
     # - CSIT_DIR - Path to existing root of local CSIT git repository.
     # - ARCHIVE_DIR - Path to store robot result files in.
@@ -598,13 +718,15 @@ function run_pybot () {
     # Functions called:
     # - die - Print to stderr and exit.
 
+    set -exuo pipefail
+
     all_options=("--outputdir" "${ARCHIVE_DIR}" "${PYBOT_ARGS[@]}")
+    all_options+=("--noncritical" "EXPECTED_FAILING")
     all_options+=("${EXPANDED_TAGS[@]}")
 
     pushd "${CSIT_DIR}" || die "Change directory operation failed."
     set +e
-    # TODO: Make robot tests not require "$(pwd)" == "${CSIT_DIR}".
-    pybot "${all_options[@]}" "${GENERATED_DIR}/tests/"
+    robot "${all_options[@]}" "${GENERATED_DIR}/tests/"
     PYBOT_EXIT_STATUS="$?"
     set -e
 
@@ -614,23 +736,71 @@ function run_pybot () {
     all_options+=("--report" "none")
     all_options+=("--output" "${ARCHIVE_DIR}/output_info.xml")
     all_options+=("${ARCHIVE_DIR}/output.xml")
-    rebot "${all_options[@]}"
+    rebot "${all_options[@]}" || true
     popd || die "Change directory operation failed."
 }
 
 
-function select_tags () {
+function select_arch_os () {
+
+    # Set variables affected by local CPU architecture and operating system.
+    #
+    # Variables set:
+    # - VPP_VER_FILE - Name of file in CSIT dir containing vpp stable version.
+    # - IMAGE_VER_FILE - Name of file in CSIT dir containing the image name.
+    # - PKG_SUFFIX - Suffix of OS package file name, "rpm" or "deb."
 
     set -exuo pipefail
 
+    source /etc/os-release || die "Get OS release failed."
+
+    case "${ID}" in
+        "ubuntu"*)
+            case "${VERSION}" in
+                *"LTS (Focal Fossa)"*)
+                    IMAGE_VER_FILE="VPP_DEVICE_IMAGE_UBUNTU"
+                    VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_FOCAL"
+                    PKG_SUFFIX="deb"
+                    ;;
+                *)
+                    die "Unsupported Ubuntu version!"
+                    ;;
+            esac
+            ;;
+        *)
+            die "Unsupported distro or OS!"
+            ;;
+    esac
+
+    arch=$(uname -m) || {
+        die "Get CPU architecture failed."
+    }
+
+    case "${arch}" in
+        "aarch64")
+            IMAGE_VER_FILE="${IMAGE_VER_FILE}_ARM"
+            ;;
+        *)
+            ;;
+    esac
+}
+
+
+function select_tags () {
+
     # Variables read:
     # - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
     # - TEST_CODE - String affecting test selection, usually jenkins job name.
+    # - DUT - CSIT test/ subdirectory, set while processing tags.
     # - TEST_TAG_STRING - String selecting tags, from gerrit comment.
     #   Can be unset.
     # - TOPOLOGIES_DIR - Path to existing directory with available tpologies.
+    # - BASH_FUNCTION_DIR - Directory with input files to process.
     # Variables set:
     # - TAGS - Array of processed tag boolean expressions.
+    # - SELECTION_MODE - Selection criteria [test, suite, include, exclude].
+
+    set -exuo pipefail
 
     # NIC SELECTION
     start_pattern='^  TG:'
@@ -644,46 +814,111 @@ function select_tags () {
     reserved=$(sed "${sed_command}" "${WORKING_TOPOLOGY}" \
                | grep -hoP "model: \K.*" | sort -u)
     # All topologies DUT NICs - Selected topology DUT NICs
-    exclude_nics=($(comm -13 <(echo "${reserved}") <(echo "${available}")))
+    exclude_nics=($(comm -13 <(echo "${reserved}") <(echo "${available}"))) || {
+        die "Computation of excluded NICs failed."
+    }
 
-    # Select default NIC
+    # Select default NIC tag.
     case "${TEST_CODE}" in
+        *"3n-dnv"* | *"2n-dnv"*)
+            default_nic="nic_intel-x553"
+            ;;
         *"3n-tsh"*)
-            DEFAULT_NIC='nic_intel-x520-da2'
+            default_nic="nic_intel-x520-da2"
+            ;;
+        *"3n-skx"* | *"2n-skx"* | *"2n-clx"* | *"2n-zn2"*)
+            default_nic="nic_intel-xxv710"
+            ;;
+        *"2n-tx2"* | *"mrr-daily-master")
+            default_nic="nic_intel-xl710"
+            ;;
+        *"2n-aws"* | *"3n-aws"*)
+            default_nic="nic_amazon-nitro-50g"
             ;;
         *)
-            DEFAULT_NIC='nic_intel-x710'
+            default_nic="nic_intel-x710"
             ;;
     esac
 
+    sed_nic_sub_cmd="sed s/\${default_nic}/${default_nic}/"
+    awk_nics_sub_cmd=""
+    awk_nics_sub_cmd+='gsub("xxv710","25ge2p1xxv710");'
+    awk_nics_sub_cmd+='gsub("x710","10ge2p1x710");'
+    awk_nics_sub_cmd+='gsub("xl710","40ge2p1xl710");'
+    awk_nics_sub_cmd+='gsub("x520-da2","10ge2p1x520");'
+    awk_nics_sub_cmd+='gsub("x553","10ge2p1x553");'
+    awk_nics_sub_cmd+='gsub("cx556a","100ge2p1cx556a");'
+    awk_nics_sub_cmd+='gsub("e810cq","100ge2p1e810cq");'
+    awk_nics_sub_cmd+='gsub("vic1227","10ge2p1vic1227");'
+    awk_nics_sub_cmd+='gsub("vic1385","40ge2p1vic1385");'
+    awk_nics_sub_cmd+='gsub("nitro-50g","50ge1p1ENA");'
+    awk_nics_sub_cmd+='if ($9 =="drv_avf") drv="avf-";'
+    awk_nics_sub_cmd+='else if ($9 =="drv_rdma_core") drv ="rdma-";'
+    awk_nics_sub_cmd+='else if ($9 =="drv_af_xdp") drv ="af-xdp-";'
+    awk_nics_sub_cmd+='else drv="";'
+    awk_nics_sub_cmd+='print "*"$7"-" drv $11"-"$5"."$3"-"$1"-" drv $11"-"$5'
+
+    # Tag file directory shorthand.
+    tfd="${JOB_SPECS_DIR}"
     case "${TEST_CODE}" in
         # Select specific performance tests based on jenkins job type variable.
         *"ndrpdr-weekly"* )
-            readarray -t test_tag_array < "${BASH_FUNCTION_DIR}/mlr-weekly.txt"
+            readarray -t test_tag_array <<< $(grep -v "#" \
+                ${tfd}/mlr_weekly/${DUT}-${NODENESS}-${FLAVOR}.md |
+                awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+            SELECTION_MODE="--test"
             ;;
         *"mrr-daily"* )
-            readarray -t test_tag_array < "${BASH_FUNCTION_DIR}/mrr-daily.txt"
+            readarray -t test_tag_array <<< $(grep -v "#" \
+                ${tfd}/mrr_daily/${DUT}-${NODENESS}-${FLAVOR}.md |
+                awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+            SELECTION_MODE="--test"
             ;;
         *"mrr-weekly"* )
-            readarray -t test_tag_array < "${BASH_FUNCTION_DIR}/mrr-weekly.txt"
+            readarray -t test_tag_array <<< $(grep -v "#" \
+                ${tfd}/mrr_weekly/${DUT}-${NODENESS}-${FLAVOR}.md |
+                awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+            SELECTION_MODE="--test"
+            ;;
+        *"report-iterative"* )
+            test_sets=(${TEST_TAG_STRING//:/ })
+            # Run only one test set per run
+            report_file=${test_sets[0]}.md
+            readarray -t test_tag_array <<< $(grep -v "#" \
+                ${tfd}/report_iterative/${NODENESS}-${FLAVOR}/${report_file} |
+                awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+            SELECTION_MODE="--test"
+            ;;
+        *"report-coverage"* )
+            test_sets=(${TEST_TAG_STRING//:/ })
+            # Run only one test set per run
+            report_file=${test_sets[0]}.md
+            readarray -t test_tag_array <<< $(grep -v "#" \
+                ${tfd}/report_coverage/${NODENESS}-${FLAVOR}/${report_file} |
+                awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+            SELECTION_MODE="--test"
             ;;
         * )
             if [[ -z "${TEST_TAG_STRING-}" ]]; then
                 # If nothing is specified, we will run pre-selected tests by
                 # following tags.
-                test_tag_array=("mrrAND${DEFAULT_NIC}AND1cAND64bANDip4base"
-                                "mrrAND${DEFAULT_NIC}AND1cAND78bANDip6base"
-                                "mrrAND${DEFAULT_NIC}AND1cAND64bANDl2bdbase"
-                                "mrrAND${DEFAULT_NIC}AND1cAND64bANDl2xcbase"
+                test_tag_array=("mrrAND${default_nic}AND1cAND64bANDip4base"
+                                "mrrAND${default_nic}AND1cAND78bANDip6base"
+                                "mrrAND${default_nic}AND1cAND64bANDl2bdbase"
+                                "mrrAND${default_nic}AND1cAND64bANDl2xcbase"
                                 "!dot1q" "!drv_avf")
             else
                 # If trigger contains tags, split them into array.
                 test_tag_array=(${TEST_TAG_STRING//:/ })
             fi
+            SELECTION_MODE="--include"
             ;;
     esac
 
     # Blacklisting certain tags per topology.
+    #
+    # Reasons for blacklisting:
+    # - ipsechw - Blacklisted on testbeds without crypto hardware accelerator.
     case "${TEST_CODE}" in
         *"2n-skx"*)
             test_tag_array+=("!ipsechw")
@@ -693,28 +928,37 @@ function select_tags () {
             # Not enough nic_intel-xxv710 to support double link tests.
             test_tag_array+=("!3_node_double_link_topoANDnic_intel-xxv710")
             ;;
-        *"3n-tsh"*)
+        *"2n-clx"*)
+            test_tag_array+=("!ipsechw")
+            ;;
+        *"2n-zn2"*)
+            test_tag_array+=("!ipsechw")
+            ;;
+        *"2n-dnv"*)
             test_tag_array+=("!ipsechw")
             test_tag_array+=("!memif")
             test_tag_array+=("!srv6_proxy")
             test_tag_array+=("!vhost")
             test_tag_array+=("!vts")
+            test_tag_array+=("!drv_avf")
             ;;
-        *"3n-hsw"*)
-            # TODO: Introduce NOIOMMU version of AVF tests.
-            # TODO: Make (both) AVF tests work on Haswell,
-            # or document why (some of) it is not possible.
-            # https://github.com/FDio/vpp/blob/master/src/plugins/avf/README.md
+        *"2n-tx2"*)
+            test_tag_array+=("!ipsechw")
+            ;;
+        *"3n-dnv"*)
+            test_tag_array+=("!memif")
+            test_tag_array+=("!srv6_proxy")
+            test_tag_array+=("!vhost")
+            test_tag_array+=("!vts")
             test_tag_array+=("!drv_avf")
-            # All cards have access to QAT. But only one card (xl710)
-            # resides in same NUMA as QAT. Other cards must go over QPI
-            # which we do not want to even run.
-            test_tag_array+=("!ipsechwNOTnic_intel-xl710")
             ;;
-        *)
-            # Default to 3n-hsw due to compatibility.
+        *"3n-tsh"*)
+            # 3n-tsh only has x520 NICs which don't work with AVF
             test_tag_array+=("!drv_avf")
-            test_tag_array+=("!ipsechwNOTnic_intel-xl710")
+            test_tag_array+=("!ipsechw")
+            ;;
+        *"2n-aws"* | *"3n-aws"*)
+            test_tag_array+=("!ipsechw")
             ;;
     esac
 
@@ -722,10 +966,8 @@ function select_tags () {
     test_tag_array+=("${exclude_nics[@]/#/!NIC_}")
 
     TAGS=()
+    prefix=""
 
-    # We will prefix with perftest to prevent running other tests
-    # (e.g. Functional).
-    prefix="perftestAND"
     set +x
     if [[ "${TEST_CODE}" == "vpp-"* ]]; then
         # Automatic prefixing for VPP jobs to limit the NIC used and
@@ -733,13 +975,26 @@ function select_tags () {
         if [[ "${TEST_TAG_STRING-}" == *"nic_"* ]]; then
             prefix="${prefix}mrrAND"
         else
-            prefix="${prefix}mrrAND${DEFAULT_NIC}AND"
+            prefix="${prefix}mrrAND${default_nic}AND"
         fi
     fi
     for tag in "${test_tag_array[@]}"; do
         if [[ "${tag}" == "!"* ]]; then
             # Exclude tags are not prefixed.
             TAGS+=("${tag}")
+        elif [[ "${tag}" == " "* || "${tag}" == *"perftest"* ]]; then
+            # Badly formed tag expressions can trigger way too much tests.
+            set -x
+            warn "The following tag expression hints at bad trigger: ${tag}"
+            warn "Possible cause: Multiple triggers in a single comment."
+            die "Aborting to avoid triggering too many tests."
+        elif [[ "${tag}" == *"OR"* ]]; then
+            # If OR had higher precedence than AND, it would be useful here.
+            # Some people think it does, thus triggering way too much tests.
+            set -x
+            warn "The following tag expression hints at bad trigger: ${tag}"
+            warn "Operator OR has lower precedence than AND. Use space instead."
+            die "Aborting to avoid triggering too many tests."
         elif [[ "${tag}" != "" && "${tag}" != "#"* ]]; then
             # Empty and comment lines are skipped.
             # Other lines are normal tags, they are to be prefixed.
@@ -750,10 +1005,85 @@ function select_tags () {
 }
 
 
-function select_vpp_device_tags () {
+function select_topology () {
+
+    # Variables read:
+    # - NODENESS - Node multiplicity of testbed, either "2n" or "3n".
+    # - FLAVOR - Node flavor string, e.g. "clx" or "skx".
+    # - CSIT_DIR - Path to existing root of local CSIT git repository.
+    # - TOPOLOGIES_DIR - Path to existing directory with available topologies.
+    # Variables set:
+    # - TOPOLOGIES - Array of paths to suitable topology yaml files.
+    # - TOPOLOGIES_TAGS - Tag expression selecting tests for the topology.
+    # Functions called:
+    # - die - Print to stderr and exit.
 
     set -exuo pipefail
 
+    case_text="${NODENESS}_${FLAVOR}"
+    case "${case_text}" in
+        "1n_vbox")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template )
+            TOPOLOGIES_TAGS="2_node_single_link_topo"
+            ;;
+        "1n_skx" | "1n_tx2")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template )
+            TOPOLOGIES_TAGS="2_node_single_link_topo"
+            ;;
+        "2n_skx")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_skx*.yaml )
+            TOPOLOGIES_TAGS="2_node_*_link_topo"
+            ;;
+        "2n_zn2")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_zn2*.yaml )
+            TOPOLOGIES_TAGS="2_node_*_link_topo"
+            ;;
+        "3n_skx")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_skx*.yaml )
+            TOPOLOGIES_TAGS="3_node_*_link_topo"
+            ;;
+        "2n_clx")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_clx*.yaml )
+            TOPOLOGIES_TAGS="2_node_*_link_topo"
+            ;;
+        "2n_dnv")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_dnv*.yaml )
+            TOPOLOGIES_TAGS="2_node_single_link_topo"
+            ;;
+        "3n_dnv")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_dnv*.yaml )
+            TOPOLOGIES_TAGS="3_node_single_link_topo"
+            ;;
+        "3n_tsh")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_tsh*.yaml )
+            TOPOLOGIES_TAGS="3_node_single_link_topo"
+            ;;
+        "2n_tx2")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_tx2*.yaml )
+            TOPOLOGIES_TAGS="2_node_single_link_topo"
+            ;;
+        "2n_aws")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_aws*.yaml )
+            TOPOLOGIES_TAGS="2_node_single_link_topo"
+            ;;
+        "3n_aws")
+            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_aws*.yaml )
+            TOPOLOGIES_TAGS="3_node_single_link_topo"
+            ;;
+        *)
+            # No falling back to default, that should have been done
+            # by the function which has set NODENESS and FLAVOR.
+            die "Unknown specification: ${case_text}"
+    esac
+
+    if [[ -z "${TOPOLOGIES-}" ]]; then
+        die "No applicable topology found!"
+    fi
+}
+
+
+function select_vpp_device_tags () {
+
     # Variables read:
     # - TEST_CODE - String affecting test selection, usually jenkins job name.
     # - TEST_TAG_STRING - String selecting tags, from gerrit comment.
@@ -761,8 +1091,10 @@ function select_vpp_device_tags () {
     # Variables set:
     # - TAGS - Array of processed tag boolean expressions.
 
+    set -exuo pipefail
+
     case "${TEST_CODE}" in
-        # Select specific performance tests based on jenkins job type variable.
+        # Select specific device tests based on jenkins job type variable.
         * )
             if [[ -z "${TEST_TAG_STRING-}" ]]; then
                 # If nothing is specified, we will run pre-selected tests by
@@ -773,12 +1105,27 @@ function select_vpp_device_tags () {
                 # If trigger contains tags, split them into array.
                 test_tag_array=(${TEST_TAG_STRING//:/ })
             fi
+            SELECTION_MODE="--include"
+            ;;
+    esac
+
+    # Blacklisting certain tags per topology.
+    #
+    # Reasons for blacklisting:
+    # - avf - AVF is not possible to run on enic driver of VirtualBox.
+    # - vhost - VirtualBox does not support nesting virtualization on Intel CPU.
+    case "${TEST_CODE}" in
+        *"1n-vbox"*)
+            test_tag_array+=("!avf")
+            test_tag_array+=("!vhost")
+            ;;
+        *)
             ;;
     esac
 
     TAGS=()
 
-    # We will prefix with perftest to prevent running other tests
+    # We will prefix with devicetest to prevent running other tests
     # (e.g. Functional).
     prefix="devicetestAND"
     if [[ "${TEST_CODE}" == "vpp-"* ]]; then
@@ -795,92 +1142,33 @@ function select_vpp_device_tags () {
     done
 }
 
-function select_os () {
 
-    set -exuo pipefail
+function set_environment_variables () {
 
+    # Depending on testbed topology, overwrite defaults set in the
+    # resources/libraries/python/Constants.py file
+    #
+    # Variables read:
+    # - TEST_CODE - String affecting test selection, usually jenkins job name.
     # Variables set:
-    # - VPP_VER_FILE - Name of File in CSIT dir containing vpp stable version.
-    # - IMAGE_VER_FILE - Name of File in CSIT dir containing the image name.
-    # - PKG_SUFFIX - Suffix of OS package file name, "rpm" or "deb."
-
-    os_id=$(grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g') || {
-        die "Get OS release failed."
-    }
-
-    case "${os_id}" in
-        "ubuntu"*)
-            IMAGE_VER_FILE="VPP_DEVICE_IMAGE_UBUNTU"
-            VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_BIONIC"
-            PKG_SUFFIX="deb"
-            ;;
-        "centos"*)
-            IMAGE_VER_FILE="VPP_DEVICE_IMAGE_CENTOS"
-            VPP_VER_FILE="VPP_STABLE_VER_CENTOS"
-            PKG_SUFFIX="rpm"
-            ;;
-        *)
-            die "Unable to identify distro or os from ${OS}"
-            ;;
-    esac
-}
-
-
-function select_topology () {
+    # See specific cases
 
     set -exuo pipefail
 
-    # Variables read:
-    # - NODENESS - Node multiplicity of testbed, either "2n" or "3n".
-    # - FLAVOR - Node flavor string, currently either "hsw" or "skx".
-    # - CSIT_DIR - Path to existing root of local CSIT git repository.
-    # - TOPOLOGIES_DIR - Path to existing directory with available topologies.
-    # Variables set:
-    # - TOPOLOGIES - Array of paths to suitable topology yaml files.
-    # - TOPOLOGIES_TAGS - Tag expression selecting tests for the topology.
-    # Functions called:
-    # - die - Print to stderr and exit.
-
-    case_text="${NODENESS}_${FLAVOR}"
-    case "${case_text}" in
-        # TODO: Move tags to "# Blacklisting certain tags per topology" section.
-        "1n_vbox")
-            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template )
-            TOPOLOGIES_TAGS="2_node_single_link_topo"
-            ;;
-        "1n_skx")
-            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template )
-            TOPOLOGIES_TAGS="2_node_single_link_topo"
-            ;;
-        "2n_skx")
-            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_skx*.yaml )
-            TOPOLOGIES_TAGS="2_node_*_link_topo"
-            ;;
-        "3n_skx")
-            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_skx*.yaml )
-            TOPOLOGIES_TAGS="3_node_*_link_topo"
-            ;;
-        "3n_hsw")
-            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_hsw*.yaml )
-            TOPOLOGIES_TAGS="3_node_single_link_topo"
-            ;;
-        "3n_tsh")
-            TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_tsh*.yaml )
-            TOPOLOGIES_TAGS="3_node_*_link_topo"
+    case "${TEST_CODE}" in
+        *"2n-aws"* | *"3n-aws"*)
+            # T-Rex 2.88 workaround for ENA NICs
+            export TREX_RX_DESCRIPTORS_COUNT=1024
+            export TREX_EXTRA_CMDLINE="--mbuf-factor 19"
+            # Settings to prevent duration stretching
+            export PERF_TRIAL_STL_DELAY=0.1
             ;;
-        *)
-            # No falling back to 3n_hsw default, that should have been done
-            # by the function which has set NODENESS and FLAVOR.
-            die "Unknown specification: ${case_text}"
     esac
-
-    if [[ -z "${TOPOLOGIES-}" ]]; then
-        die "No applicable topology found!"
-    fi
 }
 
 
 function untrap_and_unreserve_testbed () {
+
     # Use this as a trap function to ensure testbed does not remain reserved.
     # Perhaps call directly before script exit, to free testbed for other jobs.
     # This function is smart enough to avoid multiple unreservations (so safe).
@@ -899,6 +1187,7 @@ function untrap_and_unreserve_testbed () {
     # - EXIT - Failure to untrap is reported, but ignored otherwise.
     # Functions called:
     # - die - Print to stderr and exit.
+    # - ansible_playbook - Perform an action using ansible, see ansible.sh
 
     set -xo pipefail
     set +eu  # We do not want to exit early in a "teardown" function.
@@ -908,10 +1197,17 @@ function untrap_and_unreserve_testbed () {
         set -eu
         warn "Testbed looks unreserved already. Trap removal failed before?"
     else
-        cleanup_topo || true
-        python "${PYTHON_SCRIPTS_DIR}/topo_reservation.py" -c -t "${wt}" || {
+        ansible_playbook "cleanup" || true
+        python3 "${PYTHON_SCRIPTS_DIR}/topo_reservation.py" -c -t "${wt}" || {
             die "${1:-FAILED TO UNRESERVE, FIX MANUALLY.}" 2
         }
+        case "${TEST_CODE}" in
+            *"2n-aws"* | *"3n-aws"*)
+                terraform_destroy || die "Failed to call terraform destroy."
+                ;;
+            *)
+                ;;
+        esac
         WORKING_TOPOLOGY=""
         set -eu
     fi
@@ -919,10 +1215,13 @@ function untrap_and_unreserve_testbed () {
 
 
 function warn () {
+
     # Print the message to standard error.
     #
     # Arguments:
     # - ${@} - The text of the message.
 
+    set -exuo pipefail
+
     echo "$@" >&2
 }