-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Copyright (c) 2019 PANTHEON.tech and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2021 PANTHEON.tech and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
source "${BASH_FUNCTION_DIR}/device.sh" || {
die "Source failed!"
}
-
device_image="$(< ${CSIT_DIR}/${IMAGE_VER_FILE})"
case_text="${NODENESS}_${FLAVOR}"
case "${case_text}" in
# We execute reservation over csit-shim-dcr (ssh) which runs sourced
# script's functions. Env variables are read from ssh output
# back to localhost for further processing.
- hostname=$(grep search /etc/resolv.conf | cut -d' ' -f3) || die
- ssh="ssh root@${hostname} -p 6022"
+ # Shim and Jenkins executor are in the same network on the same host
+ # Connect to docker's default gateway IP and shim's exposed port
+ ssh="ssh root@172.17.0.1 -p 6022"
run="activate_wrapper ${NODENESS} ${FLAVOR} ${device_image}"
+ # The "declare -f" output is long and boring.
+ set +x
# backtics to avoid https://midnight-commander.org/ticket/2142
env_vars=`${ssh} "$(declare -f); ${run}"` || {
die "Topology reservation via shim-dcr failed!"
}
+ set -x
set -a
source <(echo "$env_vars" | grep -v /usr/bin/docker) || {
die "Source failed!"
die "Trap attempt failed, please cleanup manually. Aborting!"
}
+ parse_env_variables || die "Parse of environment variables failed!"
+
# Replace all variables in template with those in environment.
source <(echo 'cat <<EOF >topo.yml'; cat ${TOPOLOGIES[0]}; echo EOF;) || {
die "Topology file create failed!"
}
- WORKING_TOPOLOGY="/tmp/topology.yaml"
+ WORKING_TOPOLOGY="${CSIT_DIR}/topologies/available/vpp_device.yaml"
mv topo.yml "${WORKING_TOPOLOGY}" || {
die "Topology move failed!"
}
env_dir="${root_path}/env"
req_path=${2-$CSIT_DIR/requirements.txt}
rm -rf "${env_dir}" || die "Failed to clean previous virtualenv."
- pip install --upgrade virtualenv || {
+ pip3 install virtualenv==20.0.20 || {
die "Virtualenv package install failed."
}
- virtualenv "${env_dir}" || {
- die "Virtualenv creation failed."
+ virtualenv --no-download --python=$(which python3) "${env_dir}" || {
+ die "Virtualenv creation for $(which python3) failed."
}
set +u
source "${env_dir}/bin/activate" || die "Virtualenv activation failed."
set -u
- pip install --upgrade -r "${req_path}" || {
+ pip3 install -r "${req_path}" || {
die "Requirements installation failed."
}
# Most CSIT Python scripts assume PYTHONPATH is set and exported.
set -exuo pipefail
- tar c "${GENERATED_DIR}/tests" | xz -9e > "${ARCHIVE_DIR}/tests.tar.xz" || {
+ tar c "${GENERATED_DIR}/tests" | xz -3 > "${ARCHIVE_DIR}/tests.tar.xz" || {
die "Error creating archive of generated tests."
}
}
fi
}
-function cleanup_topo () {
-
- # Variables read:
- # - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
- # - PYTHON_SCRIPTS_DIR - Path to directory holding the reservation script.
-
- set -exuo pipefail
-
- python "${PYTHON_SCRIPTS_DIR}/topo_cleanup.py" -t "${WORKING_TOPOLOGY}"
- # Not using "|| die" as some callers might want to ignore errors,
- # e.g. in teardowns, such as unreserve.
-}
-
function common_dirs () {
# Variables set:
# - BASH_FUNCTION_DIR - Path to existing directory this file is located in.
# - CSIT_DIR - Path to existing root of local CSIT git repository.
- # - TOPOLOGIES_DIR - Path to existing directory with available tpologies.
+ # - TOPOLOGIES_DIR - Path to existing directory with available topologies.
+ # - JOB_SPECS_DIR - Path to existing directory with job test specifications.
# - RESOURCES_DIR - Path to existing CSIT subdirectory "resources".
# - TOOLS_DIR - Path to existing resources subdirectory "tools".
# - PYTHON_SCRIPTS_DIR - Path to existing tools subdirectory "scripts".
- # - ARCHIVE_DIR - Path to created CSIT subdirectory "archive".
+ # - ARCHIVE_DIR - Path to created CSIT subdirectory "archives".
+ # The name is chosen to match what ci-management expects.
# - DOWNLOAD_DIR - Path to created CSIT subdirectory "download_dir".
# - GENERATED_DIR - Path to created CSIT subdirectory "generated".
# Directories created if not present:
TOPOLOGIES_DIR=$(readlink -e "${CSIT_DIR}/topologies/available") || {
die "Readlink failed."
}
+ JOB_SPECS_DIR=$(readlink -e "${CSIT_DIR}/docs/job_specs") || {
+ die "Readlink failed."
+ }
RESOURCES_DIR=$(readlink -e "${CSIT_DIR}/resources") || {
die "Readlink failed."
}
die "Readlink failed."
}
- ARCHIVE_DIR=$(readlink -f "${CSIT_DIR}/archive") || {
+ ARCHIVE_DIR=$(readlink -f "${CSIT_DIR}/archives") || {
die "Readlink failed."
}
mkdir -p "${ARCHIVE_DIR}" || die "Mkdir failed."
# - TAGS - Array variable holding selected tag boolean expressions.
# - TOPOLOGIES_TAGS - Tag boolean expression filtering tests for topology.
# - TEST_CODE - The test selection string from environment or argument.
+ # - SELECTION_MODE - Selection criteria [test, suite, include, exclude].
# Variables set:
# - PYBOT_ARGS - String holding part of all arguments for pybot.
# - EXPANDED_TAGS - Array of strings pybot arguments compiled from tags.
*"device"*)
PYBOT_ARGS+=("--suite" "tests.${DUT}.device")
;;
- *"func"*)
- PYBOT_ARGS+=("--suite" "tests.${DUT}.func")
- ;;
*"perf"*)
PYBOT_ARGS+=("--suite" "tests.${DUT}.perf")
;;
if [[ ${tag} == "!"* ]]; then
EXPANDED_TAGS+=("--exclude" "${tag#$"!"}")
else
- EXPANDED_TAGS+=("--include" "${TOPOLOGIES_TAGS}AND${tag}")
+ if [[ ${SELECTION_MODE} == "--test" ]]; then
+ EXPANDED_TAGS+=("--test" "${tag}")
+ else
+ EXPANDED_TAGS+=("--include" "${TOPOLOGIES_TAGS}AND${tag}")
+ fi
fi
done
-}
-
-function copy_archives () {
-
- # Create additional archive if workspace variable is set.
- # This way if script is running in jenkins all will be
- # automatically archived to logs.fd.io.
- #
- # Variables read:
- # - WORKSPACE - Jenkins workspace, copy only if the value is not empty.
- # Can be unset, then it speeds up manual testing.
- # - ARCHIVE_DIR - Path to directory with content to be copied.
- # Directories updated:
- # - ${WORKSPACE}/archives/ - Created if does not exist.
- # Content of ${ARCHIVE_DIR}/ is copied here.
- # Functions called:
- # - die - Print to stderr and exit.
-
- set -exuo pipefail
-
- if [[ -n "${WORKSPACE-}" ]]; then
- mkdir -p "${WORKSPACE}/archives/" || die "Archives dir create failed."
- cp -rf "${ARCHIVE_DIR}"/* "${WORKSPACE}/archives" || die "Copy failed."
+ if [[ ${SELECTION_MODE} == "--test" ]]; then
+ EXPANDED_TAGS+=("--include" "${TOPOLOGIES_TAGS}")
fi
}
case_text="${NODENESS}_${FLAVOR}"
case "${case_text}" in
"1n_skx" | "1n_tx2")
- hostname=$(grep search /etc/resolv.conf | cut -d' ' -f3) || die
- ssh="ssh root@${hostname} -p 6022"
+ ssh="ssh root@172.17.0.1 -p 6022"
env_vars=$(env | grep CSIT_ | tr '\n' ' ' ) || die
+ # The "declare -f" output is long and boring.
+ set +x
${ssh} "$(declare -f); deactivate_wrapper ${env_vars}" || {
die "Topology cleanup via shim-dcr failed!"
}
+ set -x
;;
"1n_vbox")
enter_mutex || die
cp -r "${CSIT_DIR}/tests" "${GENERATED_DIR}/tests" || die
cmd_line=("find" "${GENERATED_DIR}/tests" "-type" "f")
cmd_line+=("-executable" "-name" "*.py")
- file_list=$("${cmd_line[@]}") || die
+ # We sort the directories, so log output can be compared between runs.
+ file_list=$("${cmd_line[@]}" | sort) || die
for gen in ${file_list}; do
directory="$(dirname "${gen}")" || die
NODENESS="1n"
FLAVOR="tx2"
;;
+ *"2n-aws"*)
+ NODENESS="2n"
+ FLAVOR="aws"
+ ;;
+ *"3n-aws"*)
+ NODENESS="3n"
+ FLAVOR="aws"
+ ;;
*"2n-skx"*)
NODENESS="2n"
FLAVOR="skx"
;;
+ *"2n-zn2"*)
+ NODENESS="2n"
+ FLAVOR="zn2"
+ ;;
*"3n-skx"*)
NODENESS="3n"
FLAVOR="skx"
NODENESS="3n"
FLAVOR="dnv"
;;
+ *"2n-tx2"*)
+ NODENESS="2n"
+ FLAVOR="tx2"
+ ;;
*"3n-tsh"*)
NODENESS="3n"
FLAVOR="tsh"
;;
- *)
- # Fallback to 3-node Haswell by default (backward compatibility)
- NODENESS="3n"
- FLAVOR="hsw"
- ;;
esac
}
# - TEST_CODE - The test selection string from environment or argument.
# Variables set:
# - TEST_TAG_STRING - The string following trigger word in gerrit comment.
- # May be empty, not set on event types not adding comment.
+ # May be empty, or even not set on event types not adding comment.
# TODO: ci-management scripts no longer need to perform this.
set -exuo pipefail
- trigger=""
if [[ "${GERRIT_EVENT_TYPE-}" == "comment-added" ]]; then
case "${TEST_CODE}" in
*"device"*)
- # On parsing error, ${trigger} stays empty.
- trigger="$(echo "${GERRIT_EVENT_COMMENT_TEXT}" \
- | grep -oE '(devicetest$|devicetest[[:space:]].+$)')" \
- || true
- # Set test tags as string.
- TEST_TAG_STRING="${trigger#$"devicetest"}"
+ trigger="devicetest"
;;
*"perf"*)
- # On parsing error, ${trigger} stays empty.
- comment="${GERRIT_EVENT_COMMENT_TEXT}"
- # As "perftest" can be followed by something, we substitute it.
- comment="${comment/perftest-2n/perftest}"
- comment="${comment/perftest-3n/perftest}"
- comment="${comment/perftest-hsw/perftest}"
- comment="${comment/perftest-skx/perftest}"
- comment="${comment/perftest-dnv/perftest}"
- comment="${comment/perftest-tsh/perftest}"
- tag_string="$(echo "${comment}" \
- | grep -oE '(perftest$|perftest[[:space:]].+$)' || true)"
- # Set test tags as string.
- TEST_TAG_STRING="${tag_string#$"perftest"}"
+ trigger="perftest"
;;
*)
die "Unknown specification: ${TEST_CODE}"
esac
+ # Ignore lines not containing the trigger word.
+ comment=$(fgrep "${trigger}" <<< "${GERRIT_EVENT_COMMENT_TEXT}" || true)
+ # The vpp-csit triggers trail stuff we are not interested in.
+ # Removing them and trigger word: https://unix.stackexchange.com/a/13472
+ # (except relying on \s whitespace, \S non-whitespace and . both).
+ # The last string is concatenated, only the middle part is expanded.
+ cmd=("grep" "-oP" '\S*'"${trigger}"'\S*\s\K.+$') || die "Unset trigger?"
+ # On parsing error, TEST_TAG_STRING probably stays empty.
+ TEST_TAG_STRING=$("${cmd[@]}" <<< "${comment}" || true)
+ if [[ -z "${TEST_TAG_STRING-}" ]]; then
+ # Probably we got a base64 encoded comment.
+ comment="${GERRIT_EVENT_COMMENT_TEXT}"
+ comment=$(base64 --decode <<< "${comment}" || true)
+ comment=$(fgrep "${trigger}" <<< "${comment}" || true)
+ TEST_TAG_STRING=$("${cmd[@]}" <<< "${comment}" || true)
+ fi
+ if [[ -n "${TEST_TAG_STRING-}" ]]; then
+ test_tag_array=(${TEST_TAG_STRING})
+ if [[ "${test_tag_array[0]}" == "icl" ]]; then
+ export GRAPH_NODE_VARIANT="icl"
+ TEST_TAG_STRING="${test_tag_array[@]:1}" || true
+ elif [[ "${test_tag_array[0]}" == "skx" ]]; then
+ export GRAPH_NODE_VARIANT="skx"
+ TEST_TAG_STRING="${test_tag_array[@]:1}" || true
+ fi
+ fi
fi
}
}
+function move_archives () {
+
+ # Move archive directory to top of workspace, if not already there.
+ #
+ # ARCHIVE_DIR is positioned relative to CSIT_DIR,
+ # but in some jobs CSIT_DIR is not same as WORKSPACE
+ # (e.g. under VPP_DIR). To simplify ci-management settings,
+ # we want to move the data to the top. We do not want simple copy,
+ # as ci-management is eager with recursive search.
+ #
+ # As some scripts may call this function multiple times,
+ # the actual implementation use copying and deletion,
+ # so the workspace gets "union" of contents (except overwrites on conflict).
+ # The consequence is empty ARCHIVE_DIR remaining after this call.
+ #
+ # As the source directory is emptied,
+ # the check for dirs being different is essential.
+ #
+ # Variables read:
+ # - WORKSPACE - Jenkins workspace, move only if the value is not empty.
+ # Can be unset, then it speeds up manual testing.
+ # - ARCHIVE_DIR - Path to directory with content to be moved.
+ # Directories updated:
+ # - ${WORKSPACE}/archives/ - Created if does not exist.
+ # Content of ${ARCHIVE_DIR}/ is moved.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ set -exuo pipefail
+
+ if [[ -n "${WORKSPACE-}" ]]; then
+ target=$(readlink -f "${WORKSPACE}/archives")
+ if [[ "${target}" != "${ARCHIVE_DIR}" ]]; then
+ mkdir -p "${target}" || die "Archives dir create failed."
+ cp -rf "${ARCHIVE_DIR}"/* "${target}" || die "Copy failed."
+ rm -rf "${ARCHIVE_DIR}"/* || die "Delete failed."
+ fi
+ fi
+}
+
+
+function prepare_topology () {
+
+ # Prepare virtual testbed topology if needed based on flavor.
+
+ # Variables read:
+ # - NODENESS - Node multiplicity of testbed, either "2n" or "3n".
+ # - FLAVOR - Node flavor string, e.g. "clx" or "skx".
+ # Functions called:
+ # - die - Print to stderr and exit.
+ # - terraform_init - Terraform init topology.
+ # - terraform_apply - Terraform apply topology.
+
+ set -exuo pipefail
+
+ case_text="${NODENESS}_${FLAVOR}"
+ case "${case_text}" in
+ "2n_aws")
+ terraform_init || die "Failed to call terraform init."
+ terraform_apply || die "Failed to call terraform apply."
+ ;;
+ "3n_aws")
+ terraform_init || die "Failed to call terraform init."
+ terraform_apply || die "Failed to call terraform apply."
+ ;;
+ esac
+}
+
+
function reserve_and_cleanup_testbed () {
# Reserve physical testbed, perform cleanup, register trap to unreserve.
# - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
# Functions called:
# - die - Print to stderr and exit.
+ # - ansible_playbook - Perform an action using ansible, see ansible.sh
# Traps registered:
# - EXIT - Calls cancel_all for ${WORKING_TOPOLOGY}.
set +e
scrpt="${PYTHON_SCRIPTS_DIR}/topo_reservation.py"
opts=("-t" "${topo}" "-r" "${BUILD_TAG:-Unknown}")
- python "${scrpt}" "${opts[@]}"
+ python3 "${scrpt}" "${opts[@]}"
result="$?"
set -e
if [[ "${result}" == "0" ]]; then
}
die "Trap attempt failed, unreserve succeeded. Aborting."
}
- # Cleanup check.
+ # Cleanup + calibration checks
set +e
- ansible_hosts "cleanup"
+ ansible_playbook "cleanup, calibration"
result="$?"
set -e
if [[ "${result}" == "0" ]]; then
pushd "${CSIT_DIR}" || die "Change directory operation failed."
set +e
- pybot "${all_options[@]}" "${GENERATED_DIR}/tests/"
+ robot "${all_options[@]}" "${GENERATED_DIR}/tests/"
PYBOT_EXIT_STATUS="$?"
set -e
set -exuo pipefail
- os_id=$(grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g') || {
- die "Get OS release failed."
- }
+ source /etc/os-release || die "Get OS release failed."
- case "${os_id}" in
+ case "${ID}" in
"ubuntu"*)
- IMAGE_VER_FILE="VPP_DEVICE_IMAGE_UBUNTU"
- VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_BIONIC"
- PKG_SUFFIX="deb"
- ;;
- "centos"*)
- IMAGE_VER_FILE="VPP_DEVICE_IMAGE_CENTOS"
- VPP_VER_FILE="VPP_STABLE_VER_CENTOS"
- PKG_SUFFIX="rpm"
+ case "${VERSION}" in
+ *"LTS (Focal Fossa)"*)
+ IMAGE_VER_FILE="VPP_DEVICE_IMAGE_UBUNTU"
+ VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_FOCAL"
+ PKG_SUFFIX="deb"
+ ;;
+ *)
+ die "Unsupported Ubuntu version!"
+ ;;
+ esac
;;
*)
- die "Unable to identify distro or os from ${os_id}"
+ die "Unsupported distro or OS!"
;;
esac
# Variables read:
# - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
# - TEST_CODE - String affecting test selection, usually jenkins job name.
+ # - DUT - CSIT test/ subdirectory, set while processing tags.
# - TEST_TAG_STRING - String selecting tags, from gerrit comment.
# Can be unset.
# - TOPOLOGIES_DIR - Path to existing directory with available tpologies.
# - BASH_FUNCTION_DIR - Directory with input files to process.
# Variables set:
# - TAGS - Array of processed tag boolean expressions.
+ # - SELECTION_MODE - Selection criteria [test, suite, include, exclude].
set -exuo pipefail
*"3n-tsh"*)
default_nic="nic_intel-x520-da2"
;;
- *"3n-skx"* | *"2n-skx"* | *"2n-clx"*)
+ *"3n-skx"* | *"2n-skx"* | *"2n-clx"* | *"2n-zn2"*)
default_nic="nic_intel-xxv710"
;;
- *"3n-hsw"*)
+ *"2n-tx2"* | *"mrr-daily-master")
default_nic="nic_intel-xl710"
;;
+ *"2n-aws"* | *"3n-aws"*)
+ default_nic="nic_amazon-nitro-50g"
+ ;;
*)
default_nic="nic_intel-x710"
;;
esac
sed_nic_sub_cmd="sed s/\${default_nic}/${default_nic}/"
+ awk_nics_sub_cmd=""
+ awk_nics_sub_cmd+='gsub("xxv710","25ge2p1xxv710");'
+ awk_nics_sub_cmd+='gsub("x710","10ge2p1x710");'
+ awk_nics_sub_cmd+='gsub("xl710","40ge2p1xl710");'
+ awk_nics_sub_cmd+='gsub("x520-da2","10ge2p1x520");'
+ awk_nics_sub_cmd+='gsub("x553","10ge2p1x553");'
+ awk_nics_sub_cmd+='gsub("cx556a","100ge2p1cx556a");'
+ awk_nics_sub_cmd+='gsub("e810cq","100ge2p1e810cq");'
+ awk_nics_sub_cmd+='gsub("vic1227","10ge2p1vic1227");'
+ awk_nics_sub_cmd+='gsub("vic1385","40ge2p1vic1385");'
+ awk_nics_sub_cmd+='gsub("nitro-50g","50ge1p1ENA");'
+ awk_nics_sub_cmd+='if ($9 =="drv_avf") drv="avf-";'
+ awk_nics_sub_cmd+='else if ($9 =="drv_rdma_core") drv ="rdma-";'
+ awk_nics_sub_cmd+='else if ($9 =="drv_af_xdp") drv ="af-xdp-";'
+ awk_nics_sub_cmd+='else drv="";'
+ awk_nics_sub_cmd+='print "*"$7"-" drv $11"-"$5"."$3"-"$1"-" drv $11"-"$5'
+
# Tag file directory shorthand.
- tfd="${BASH_FUNCTION_DIR}"
+ tfd="${JOB_SPECS_DIR}"
case "${TEST_CODE}" in
# Select specific performance tests based on jenkins job type variable.
+ *"vpp-device"* )
+ readarray -t test_tag_array <<< $(grep -v "#" \
+ ${tfd}/vpp_device/${DUT}-${NODENESS}-${FLAVOR}.md |
+ awk {"$awk_nics_sub_cmd"} || echo "devicetest") || die
+ SELECTION_MODE="--test"
+ ;;
*"ndrpdr-weekly"* )
- readarray -t test_tag_array < "${tfd}/mlr-weekly.txt" || die
+ readarray -t test_tag_array <<< $(grep -v "#" \
+ ${tfd}/mlr_weekly/${DUT}-${NODENESS}-${FLAVOR}.md |
+ awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+ SELECTION_MODE="--test"
;;
*"mrr-daily"* )
- readarray -t test_tag_array <<< $(${sed_nic_sub_cmd} \
- ${tfd}/mrr-daily-${FLAVOR}.txt) || die
+ readarray -t test_tag_array <<< $(grep -v "#" \
+ ${tfd}/mrr_daily/${DUT}-${NODENESS}-${FLAVOR}.md |
+ awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+ SELECTION_MODE="--test"
;;
*"mrr-weekly"* )
- readarray -t test_tag_array < "${tfd}/mrr-weekly.txt" || die
+ readarray -t test_tag_array <<< $(grep -v "#" \
+ ${tfd}/mrr_weekly/${DUT}-${NODENESS}-${FLAVOR}.md |
+ awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+ SELECTION_MODE="--test"
+ ;;
+ *"report-iterative"* )
+ test_sets=(${TEST_TAG_STRING//:/ })
+ # Run only one test set per run
+ report_file=${test_sets[0]}.md
+ readarray -t test_tag_array <<< $(grep -v "#" \
+ ${tfd}/report_iterative/${NODENESS}-${FLAVOR}/${report_file} |
+ awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+ SELECTION_MODE="--test"
+ ;;
+ *"report-coverage"* )
+ test_sets=(${TEST_TAG_STRING//:/ })
+ # Run only one test set per run
+ report_file=${test_sets[0]}.md
+ readarray -t test_tag_array <<< $(grep -v "#" \
+ ${tfd}/report_coverage/${NODENESS}-${FLAVOR}/${report_file} |
+ awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+ SELECTION_MODE="--test"
;;
* )
if [[ -z "${TEST_TAG_STRING-}" ]]; then
# If nothing is specified, we will run pre-selected tests by
# following tags.
- test_tag_array=("mrrAND${default_nic}AND1cAND64bANDip4base"
- "mrrAND${default_nic}AND1cAND78bANDip6base"
- "mrrAND${default_nic}AND1cAND64bANDl2bdbase"
- "mrrAND${default_nic}AND1cAND64bANDl2xcbase"
- "!dot1q" "!drv_avf")
+ test_tag_array=("mrrAND${default_nic}AND1cAND64bANDethip4-ip4base"
+ "mrrAND${default_nic}AND1cAND78bANDethip6-ip6base"
+ "mrrAND${default_nic}AND1cAND64bANDeth-l2bdbasemaclrn"
+ "mrrAND${default_nic}AND1cAND64bANDeth-l2xcbase"
+ "!drv_af_xdp" "!drv_avf")
else
# If trigger contains tags, split them into array.
test_tag_array=(${TEST_TAG_STRING//:/ })
fi
+ SELECTION_MODE="--include"
;;
esac
#
# Reasons for blacklisting:
# - ipsechw - Blacklisted on testbeds without crypto hardware accelerator.
- # TODO: Add missing reasons here (if general) or where used (if specific).
case "${TEST_CODE}" in
+ *"1n-vbox"*)
+ test_tag_array+=("!avf")
+ test_tag_array+=("!vhost")
+ ;;
*"2n-skx"*)
test_tag_array+=("!ipsechw")
;;
*"2n-clx"*)
test_tag_array+=("!ipsechw")
;;
+ *"2n-zn2"*)
+ test_tag_array+=("!ipsechw")
+ ;;
*"2n-dnv"*)
test_tag_array+=("!ipsechw")
test_tag_array+=("!memif")
test_tag_array+=("!vts")
test_tag_array+=("!drv_avf")
;;
+ *"2n-tx2"*)
+ test_tag_array+=("!ipsechw")
+ ;;
*"3n-dnv"*)
test_tag_array+=("!memif")
test_tag_array+=("!srv6_proxy")
test_tag_array+=("!drv_avf")
test_tag_array+=("!ipsechw")
;;
- *"3n-hsw"*)
- # TODO: Introduce NOIOMMU version of AVF tests.
- # TODO: Make (both) AVF tests work on Haswell,
- # or document why (some of) it is not possible.
- # https://github.com/FDio/vpp/blob/master/src/plugins/avf/README.md
- test_tag_array+=("!drv_avf")
- # All cards have access to QAT. But only one card (xl710)
- # resides in same NUMA as QAT. Other cards must go over QPI
- # which we do not want to even run.
- test_tag_array+=("!ipsechwNOTnic_intel-xl710")
- ;;
- *)
- # Default to 3n-hsw due to compatibility.
- test_tag_array+=("!drv_avf")
- test_tag_array+=("!ipsechwNOTnic_intel-xl710")
+ *"2n-aws"* | *"3n-aws"*)
+ test_tag_array+=("!ipsechw")
;;
esac
test_tag_array+=("${exclude_nics[@]/#/!NIC_}")
TAGS=()
+ prefix=""
- # We will prefix with perftest to prevent running other tests
- # (e.g. Functional).
- prefix="perftestAND"
set +x
if [[ "${TEST_CODE}" == "vpp-"* ]]; then
# Automatic prefixing for VPP jobs to limit the NIC used and
if [[ "${tag}" == "!"* ]]; then
# Exclude tags are not prefixed.
TAGS+=("${tag}")
+ elif [[ "${tag}" == " "* || "${tag}" == *"perftest"* ]]; then
+ # Badly formed tag expressions can trigger way too much tests.
+ set -x
+ warn "The following tag expression hints at bad trigger: ${tag}"
+ warn "Possible cause: Multiple triggers in a single comment."
+ die "Aborting to avoid triggering too many tests."
+ elif [[ "${tag}" == *"OR"* ]]; then
+ # If OR had higher precedence than AND, it would be useful here.
+ # Some people think it does, thus triggering way too much tests.
+ set -x
+ warn "The following tag expression hints at bad trigger: ${tag}"
+ warn "Operator OR has lower precedence than AND. Use space instead."
+ die "Aborting to avoid triggering too many tests."
elif [[ "${tag}" != "" && "${tag}" != "#"* ]]; then
# Empty and comment lines are skipped.
# Other lines are normal tags, they are to be prefixed.
# Variables read:
# - NODENESS - Node multiplicity of testbed, either "2n" or "3n".
- # - FLAVOR - Node flavor string, currently either "hsw" or "skx".
+ # - FLAVOR - Node flavor string, e.g. "clx" or "skx".
# - CSIT_DIR - Path to existing root of local CSIT git repository.
# - TOPOLOGIES_DIR - Path to existing directory with available topologies.
# Variables set:
case_text="${NODENESS}_${FLAVOR}"
case "${case_text}" in
- # TODO: Move tags to "# Blacklisting certain tags per topology" section.
- # TODO: Double link availability depends on NIC used.
"1n_vbox")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template )
TOPOLOGIES_TAGS="2_node_single_link_topo"
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_skx*.yaml )
TOPOLOGIES_TAGS="2_node_*_link_topo"
;;
+ "2n_zn2")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_zn2*.yaml )
+ TOPOLOGIES_TAGS="2_node_*_link_topo"
+ ;;
"3n_skx")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_skx*.yaml )
TOPOLOGIES_TAGS="3_node_*_link_topo"
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_dnv*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
- "3n_hsw")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_hsw*.yaml )
- TOPOLOGIES_TAGS="3_node_single_link_topo"
- ;;
"3n_tsh")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_tsh*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
+ "2n_tx2")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_tx2*.yaml )
+ TOPOLOGIES_TAGS="2_node_single_link_topo"
+ ;;
+ "2n_aws")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_aws*.yaml )
+ TOPOLOGIES_TAGS="2_node_single_link_topo"
+ ;;
+ "3n_aws")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_aws*.yaml )
+ TOPOLOGIES_TAGS="3_node_single_link_topo"
+ ;;
*)
- # No falling back to 3n_hsw default, that should have been done
+ # No falling back to default, that should have been done
# by the function which has set NODENESS and FLAVOR.
die "Unknown specification: ${case_text}"
esac
}
-function select_vpp_device_tags () {
+function set_environment_variables () {
+ # Depending on testbed topology, overwrite defaults set in the
+ # resources/libraries/python/Constants.py file
+ #
# Variables read:
# - TEST_CODE - String affecting test selection, usually jenkins job name.
- # - TEST_TAG_STRING - String selecting tags, from gerrit comment.
- # Can be unset.
# Variables set:
- # - TAGS - Array of processed tag boolean expressions.
+ # See specific cases
set -exuo pipefail
case "${TEST_CODE}" in
- # Select specific device tests based on jenkins job type variable.
- * )
- if [[ -z "${TEST_TAG_STRING-}" ]]; then
- # If nothing is specified, we will run pre-selected tests by
- # following tags. Items of array will be concatenated by OR
- # in Robot Framework.
- test_tag_array=()
- else
- # If trigger contains tags, split them into array.
- test_tag_array=(${TEST_TAG_STRING//:/ })
- fi
- ;;
- esac
-
- # Blacklisting certain tags per topology.
- #
- # Reasons for blacklisting:
- # - avf - AVF is not possible to run on enic driver of VirtualBox.
- # - vhost - VirtualBox does not support nesting virtualization on Intel CPU.
- case "${TEST_CODE}" in
- *"1n-vbox"*)
- test_tag_array+=("!avf")
- test_tag_array+=("!vhost")
- ;;
- *)
+ *"2n-aws"* | *"3n-aws"*)
+ # T-Rex 2.88 workaround for ENA NICs
+ export TREX_RX_DESCRIPTORS_COUNT=1024
+ export TREX_EXTRA_CMDLINE="--mbuf-factor 19"
+ # Settings to prevent duration stretching
+ export PERF_TRIAL_STL_DELAY=0.1
;;
esac
-
- TAGS=()
-
- # We will prefix with devicetest to prevent running other tests
- # (e.g. Functional).
- prefix="devicetestAND"
- if [[ "${TEST_CODE}" == "vpp-"* ]]; then
- # Automatic prefixing for VPP jobs to limit testing.
- prefix="${prefix}"
- fi
- for tag in "${test_tag_array[@]}"; do
- if [[ ${tag} == "!"* ]]; then
- # Exclude tags are not prefixed.
- TAGS+=("${tag}")
- else
- TAGS+=("${prefix}${tag}")
- fi
- done
}
+
function untrap_and_unreserve_testbed () {
# Use this as a trap function to ensure testbed does not remain reserved.
# - EXIT - Failure to untrap is reported, but ignored otherwise.
# Functions called:
# - die - Print to stderr and exit.
+ # - ansible_playbook - Perform an action using ansible, see ansible.sh
set -xo pipefail
set +eu # We do not want to exit early in a "teardown" function.
set -eu
warn "Testbed looks unreserved already. Trap removal failed before?"
else
- ansible_hosts "cleanup" || true
- python "${PYTHON_SCRIPTS_DIR}/topo_reservation.py" -c -t "${wt}" || {
+ ansible_playbook "cleanup" || true
+ python3 "${PYTHON_SCRIPTS_DIR}/topo_reservation.py" -c -t "${wt}" || {
die "${1:-FAILED TO UNRESERVE, FIX MANUALLY.}" 2
}
+ case "${TEST_CODE}" in
+ *"2n-aws"* | *"3n-aws"*)
+ terraform_destroy || die "Failed to call terraform destroy."
+ ;;
+ *)
+ ;;
+ esac
WORKING_TOPOLOGY=""
set -eu
fi