X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fbash%2Ffunction%2Fcommon.sh;h=399b449fa0cde16ac3a778d2356952e9847c792a;hp=69c70935c73ae72fc607f8f4bf3efa64b090faf7;hb=ea6e8be86d47b288df5e11ca5515f6bda71dbd08;hpb=d465d9fba33a323703a2bf40c499d74d0f017091 diff --git a/resources/libraries/bash/function/common.sh b/resources/libraries/bash/function/common.sh index 69c70935c7..399b449fa0 100644 --- a/resources/libraries/bash/function/common.sh +++ b/resources/libraries/bash/function/common.sh @@ -1,4 +1,5 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. +# Copyright (c) 2019 PANTHEON.tech and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -21,47 +22,135 @@ set -exuo pipefail # the code might become more readable (but longer). +function activate_docker_topology () { + set -exuo pipefail + + # Create virtual vpp-device topology. Output of the function is topology + # file describing created environment saved to a file. + # + # Variables read: + # - BASH_FUNCTION_DIR - Path to existing directory this file is located in. + # - TOPOLOGIES - Available topologies. + # - NODENESS - Node multiplicity of desired testbed. + # - FLAVOR - Node flavor string, usually describing the processor. + # - IMAGE_VER_FILE - Name of file that contains the image version. + # Variables set: + # - WORKING_TOPOLOGY - Path to topology file. + + source "${BASH_FUNCTION_DIR}/device.sh" || { + die "Source failed!" + } + + device_image="$(< ${CSIT_DIR}/${IMAGE_VER_FILE})" + case_text="${NODENESS}_${FLAVOR}" + case "${case_text}" in + "1n_skx") + # We execute reservation over csit-shim-dcr (ssh) which runs sourced + # script's functions. Env variables are read from ssh output + # back to localhost for further processing. + hostname=$(grep search /etc/resolv.conf | cut -d' ' -f3) + ssh="ssh root@${hostname} -p 6022" + run="activate_wrapper ${NODENESS} ${FLAVOR} ${device_image}" + # backtics to avoid https://midnight-commander.org/ticket/2142 + env_vars=`${ssh} "$(declare -f); ${run}"` || { + die "Topology reservation via shim-dcr failed!" + } + set -a + source <(echo "$env_vars" | grep -v /usr/bin/docker) || { + die "Source failed!" + } + set +a + ;; + "1n_vbox") + # We execute reservation on localhost. Sourced script automatially + # sets environment variables for further processing. + activate_wrapper "${NODENESS}" "${FLAVOR}" "${device_image}" || die + ;; + *) + die "Unknown specification: ${case_text}!" + esac + + trap 'deactivate_docker_topology' EXIT || { + die "Trap attempt failed, please cleanup manually. Aborting!" + } + + # Replace all variables in template with those in environment. + source <(echo 'cat <topo.yml'; cat ${TOPOLOGIES[0]}; echo EOF;) || { + die "Topology file create failed!" + } + + WORKING_TOPOLOGY="/tmp/topology.yaml" + mv topo.yml "${WORKING_TOPOLOGY}" || { + die "Topology move failed!" + } + cat ${WORKING_TOPOLOGY} | grep -v password || { + die "Topology read failed!" + } +} + + function activate_virtualenv () { set -exuo pipefail + # Update virtualenv pip package, delete and create virtualenv directory, + # activate the virtualenv, install requirements, set PYTHONPATH. + # Arguments: - # - ${1} - Non-empty path to existing directory for creating virtualenv in. + # - ${1} - Path to existing directory for creating virtualenv in. + # If missing or empty, ${CSIT_DIR} is used. + # - ${2} - Path to requirements file, ${CSIT_DIR}/requirements.txt if empty. # Variables read: # - CSIT_DIR - Path to existing root of local CSIT git repository. - # Variables set: - # - ENV_DIR - Path to the created virtualenv subdirectory. # Variables exported: # - PYTHONPATH - CSIT_DIR, as CSIT Python scripts usually need this. # Functions called: # - die - Print to stderr and exit. - # TODO: Do we really need to have ENV_DIR available as a global variable? - - if [[ "${1-}" == "" ]]; then - die "Root location of virtualenv to create is not specified." - fi - ENV_DIR="${1}/env" - rm -rf "${ENV_DIR}" || die "Failed to clean previous virtualenv." + # TODO: Do we want the callers to be able to set the env dir name? + # TODO: + In that case, do we want to support env switching? + # TODO: + In that case we want to make env_dir global. + # TODO: Do we want the callers to override PYTHONPATH loaction? + root_path="${1-$CSIT_DIR}" + env_dir="${root_path}/env" + req_path=${2-$CSIT_DIR/requirements.txt} + rm -rf "${env_dir}" || die "Failed to clean previous virtualenv." pip install --upgrade virtualenv || { die "Virtualenv package install failed." } - virtualenv "${ENV_DIR}" || { + virtualenv "${env_dir}" || { die "Virtualenv creation failed." } set +u - source "${ENV_DIR}/bin/activate" || die "Virtualenv activation failed." + source "${env_dir}/bin/activate" || die "Virtualenv activation failed." set -u - pip install -r "${CSIT_DIR}/requirements.txt" || { - die "CSIT requirements installation failed." + pip install --upgrade -r "${req_path}" || { + die "Requirements installation failed." } - # Most CSIT Python scripts assume PYTHONPATH is set and exported. export PYTHONPATH="${CSIT_DIR}" || die "Export failed." } +function archive_tests () { + + set -exuo pipefail + + # Create .tar.xz of generated/tests for archiving. + # To be run after generate_tests, kept separate to offer more flexibility. + + # Directory read: + # - ${GENERATED_DIR}/tests - Tree of executed suites to archive. + # File rewriten: + # - ${ARCHIVE_DIR}/tests.tar.xz - Archive of generated tests. + + tar c "${GENERATED_DIR}/tests" | xz -9e > "${ARCHIVE_DIR}/tests.tar.xz" || { + die "Error creating archive of generated tests." + } +} + + function check_download_dir () { set -exuo pipefail @@ -100,6 +189,8 @@ function common_dirs () { set -exuo pipefail + # Set global variables, create some directories (without touching content). + # Variables set: # - BASH_FUNCTION_DIR - Path to existing directory this file is located in. # - CSIT_DIR - Path to existing root of local CSIT git repository. @@ -109,6 +200,9 @@ function common_dirs () { # - PYTHON_SCRIPTS_DIR - Path to existing tools subdirectory "scripts". # - ARCHIVE_DIR - Path to created CSIT subdirectory "archive". # - DOWNLOAD_DIR - Path to created CSIT subdirectory "download_dir". + # - GENERATED_DIR - Path to created CSIT subdirectory "generated". + # Directories created if not present: + # ARCHIVE_DIR, DOWNLOAD_DIR, GENERATED_DIR. # Functions called: # - die - Print to stderr and exit. @@ -142,6 +236,10 @@ function common_dirs () { die "Readlink failed." } mkdir -p "${DOWNLOAD_DIR}" || die "Mkdir failed." + GENERATED_DIR="$(readlink -f "${CSIT_DIR}/generated")" || { + die "Readlink failed." + } + mkdir -p "${GENERATED_DIR}" || die "Mkdir failed." } @@ -154,13 +252,28 @@ function compose_pybot_arguments () { # - DUT - CSIT test/ subdirectory, set while processing tags. # - TAGS - Array variable holding selected tag boolean expressions. # - TOPOLOGIES_TAGS - Tag boolean expression filtering tests for topology. + # - TEST_CODE - The test selection string from environment or argument. # Variables set: # - PYBOT_ARGS - String holding part of all arguments for pybot. # - EXPANDED_TAGS - Array of strings pybot arguments compiled from tags. # No explicit check needed with "set -u". - PYBOT_ARGS=("--loglevel" "TRACE" "--variable" "TOPOLOGY_PATH:${WORKING_TOPOLOGY}") - PYBOT_ARGS+=("--suite" "tests.${DUT}.perf") + PYBOT_ARGS=("--loglevel" "TRACE") + PYBOT_ARGS+=("--variable" "TOPOLOGY_PATH:${WORKING_TOPOLOGY}") + + case "${TEST_CODE}" in + *"device"*) + PYBOT_ARGS+=("--suite" "tests.${DUT}.device") + ;; + *"func"*) + PYBOT_ARGS+=("--suite" "tests.${DUT}.func") + ;; + *"perf"*) + PYBOT_ARGS+=("--suite" "tests.${DUT}.perf") + ;; + *) + die "Unknown specification: ${TEST_CODE}" + esac EXPANDED_TAGS=() for tag in "${TAGS[@]}"; do @@ -197,6 +310,38 @@ function copy_archives () { } +function deactivate_docker_topology () { + # Deactivate virtual vpp-device topology by removing containers. + # + # Variables read: + # - NODENESS - Node multiplicity of desired testbed. + # - FLAVOR - Node flavor string, usually describing the processor. + + set -exuo pipefail + + case_text="${NODENESS}_${FLAVOR}" + case "${case_text}" in + "1n_skx") + hostname=$(grep search /etc/resolv.conf | cut -d' ' -f3) + ssh="ssh root@${hostname} -p 6022" + env_vars="$(env | grep CSIT_ | tr '\n' ' ' )" + ${ssh} "$(declare -f); deactivate_wrapper ${env_vars}" || { + die "Topology cleanup via shim-dcr failed!" + } + ;; + "1n_vbox") + enter_mutex || die + clean_environment || { + die "Topology cleanup locally failed!" + } + exit_mutex || die + ;; + *) + die "Unknown specification: ${case_text}!" + esac +} + + function die () { # Print the message to standard error end exit with error code specified # by the second argument. @@ -226,11 +371,44 @@ function die_on_pybot_error () { # - die - Print to stderr and exit. if [[ "${PYBOT_EXIT_STATUS}" != "0" ]]; then - die "${PYBOT_EXIT_STATUS}" "Test failures are present!" + die "Test failures are present!" "${PYBOT_EXIT_STATUS}" fi } +function generate_tests () { + + set -exuo pipefail + + # Populate ${GENERATED_DIR}/tests based on ${CSIT_DIR}/tests/. + # Any previously existing content of ${GENERATED_DIR}/tests is wiped before. + # The generation is done by executing any *.py executable + # within any subdirectory after copying. + + # This is a separate function, because this code is called + # both by autogen checker and entries calling run_pybot. + + # Directories read: + # - ${CSIT_DIR}/tests - Used as templates for the generated tests. + # Directories replaced: + # - ${GENERATED_DIR}/tests - Overwritten by the generated tests. + + rm -rf "${GENERATED_DIR}/tests" + cp -r "${CSIT_DIR}/tests" "${GENERATED_DIR}/tests" + cmd_line=("find" "${GENERATED_DIR}/tests" "-type" "f") + cmd_line+=("-executable" "-name" "*.py") + file_list=$("${cmd_line[@]}") || die + + for gen in ${file_list}; do + directory="$(dirname "${gen}")" || die + filename="$(basename "${gen}")" || die + pushd "${directory}" || die + ./"${filename}" || die + popd || die + done +} + + function get_test_code () { set -exuo pipefail @@ -251,6 +429,14 @@ function get_test_code () { fi case "${TEST_CODE}" in + *"1n-vbox"*) + NODENESS="1n" + FLAVOR="vbox" + ;; + *"1n-skx"*) + NODENESS="1n" + FLAVOR="skx" + ;; *"2n-skx"*) NODENESS="2n" FLAVOR="skx" @@ -259,6 +445,10 @@ function get_test_code () { NODENESS="3n" FLAVOR="skx" ;; + *"3n-tsh"*) + NODENESS="3n" + FLAVOR="tsh" + ;; *) # Fallback to 3-node Haswell by default (backward compatibility) NODENESS="3n" @@ -275,6 +465,7 @@ function get_test_tag_string () { # Variables read: # - GERRIT_EVENT_TYPE - Event type set by gerrit, can be unset. # - GERRIT_EVENT_COMMENT_TEXT - Comment text, read for "comment-added" type. + # - TEST_CODE - The test selection string from environment or argument. # Variables set: # - TEST_TAG_STRING - The string following "perftest" in gerrit comment, # or empty. @@ -283,12 +474,33 @@ function get_test_tag_string () { trigger="" if [[ "${GERRIT_EVENT_TYPE-}" == "comment-added" ]]; then - # On parsing error, ${trigger} stays empty. - trigger="$(echo "${GERRIT_EVENT_COMMENT_TEXT}" \ - | grep -oE '(perftest$|perftest[[:space:]].+$)')" || true + case "${TEST_CODE}" in + *"device"*) + # On parsing error, ${trigger} stays empty. + trigger="$(echo "${GERRIT_EVENT_COMMENT_TEXT}" \ + | grep -oE '(devicetest$|devicetest[[:space:]].+$)')" \ + || true + # Set test tags as string. + TEST_TAG_STRING="${trigger#$"devicetest"}" + ;; + *"perf"*) + # On parsing error, ${trigger} stays empty. + comment="${GERRIT_EVENT_COMMENT_TEXT}" + # As "perftest" can be followed by something, we substitute it. + comment="${comment/perftest-2n/perftest}" + comment="${comment/perftest-3n/perftest}" + comment="${comment/perftest-hsw/perftest}" + comment="${comment/perftest-skx/perftest}" + comment="${comment/perftest-tsh/perftest}" + tag_string="$(echo "${comment}" \ + | grep -oE '(perftest$|perftest[[:space:]].+$)' || true)" + # Set test tags as string. + TEST_TAG_STRING="${tag_string#$"perftest"}" + ;; + *) + die "Unknown specification: ${TEST_CODE}" + esac fi - # Set test tags as string. - TEST_TAG_STRING="${trigger#$"perftest"}" } @@ -350,45 +562,34 @@ function run_pybot () { set -exuo pipefail - # Currently, VPP-1361 causes occasional test failures. - # If real result is more important than time, we can retry few times. - # TODO: We should be retrying on test case level instead. - - # Arguments: - # - ${1} - Optional number of pybot invocations to try to avoid failures. - # Default: 1. # Variables read: # - CSIT_DIR - Path to existing root of local CSIT git repository. # - ARCHIVE_DIR - Path to store robot result files in. # - PYBOT_ARGS, EXPANDED_TAGS - See compose_pybot_arguments.sh + # - GENERATED_DIR - Tests are assumed to be generated under there. # Variables set: # - PYBOT_EXIT_STATUS - Exit status of most recent pybot invocation. # Functions called: # - die - Print to stderr and exit. - # Set ${tries} as an integer variable, to fail on non-numeric input. - local -i "tries" || die "Setting type of variable failed." - tries="${1:-1}" || die "Argument evaluation failed." all_options=("--outputdir" "${ARCHIVE_DIR}" "${PYBOT_ARGS[@]}") all_options+=("${EXPANDED_TAGS[@]}") - while true; do - if [[ "${tries}" -le 0 ]]; then - break - else - tries="$((${tries} - 1))" - fi - pushd "${CSIT_DIR}" || die "Change directory operation failed." - set +e - # TODO: Make robot tests not require "$(pwd)" == "${CSIT_DIR}". - pybot "${all_options[@]}" "${CSIT_DIR}/tests/" - PYBOT_EXIT_STATUS="$?" - set -e - popd || die "Change directory operation failed." - if [[ "${PYBOT_EXIT_STATUS}" == "0" ]]; then - break - fi - done + pushd "${CSIT_DIR}" || die "Change directory operation failed." + set +e + # TODO: Make robot tests not require "$(pwd)" == "${CSIT_DIR}". + pybot "${all_options[@]}" "${GENERATED_DIR}/tests/" + PYBOT_EXIT_STATUS="$?" + set -e + + # Generate INFO level output_info.xml for post-processing. + all_options=("--loglevel" "INFO") + all_options+=("--log" "none") + all_options+=("--report" "none") + all_options+=("--output" "${ARCHIVE_DIR}/output_info.xml") + all_options+=("${ARCHIVE_DIR}/output.xml") + rebot "${all_options[@]}" + popd || die "Change directory operation failed." } @@ -406,117 +607,49 @@ function select_tags () { # - TAGS - Array of processed tag boolean expressions. # NIC SELECTION - # All topologies NICs - available=$(grep -hoPR "model: \K.*" "${TOPOLOGIES_DIR}"/* | sort -u) - # Selected topology NICs - reserved=$(grep -hoPR "model: \K.*" "${WORKING_TOPOLOGY}" | sort -u) - # All topologies NICs - Selected topology NICs + start_pattern='^ TG:' + end_pattern='^ \? \?[A-Za-z0-9]\+:' + # Remove the TG section from topology file + sed_command="/${start_pattern}/,/${end_pattern}/d" + # All topologies DUT NICs + available=$(sed "${sed_command}" "${TOPOLOGIES_DIR}"/* \ + | grep -hoP "model: \K.*" | sort -u) + # Selected topology DUT NICs + reserved=$(sed "${sed_command}" "${WORKING_TOPOLOGY}" \ + | grep -hoP "model: \K.*" | sort -u) + # All topologies DUT NICs - Selected topology DUT NICs exclude_nics=($(comm -13 <(echo "${reserved}") <(echo "${available}"))) + # Select default NIC + case "${TEST_CODE}" in + *"3n-tsh"*) + DEFAULT_NIC='nic_intel-x520-da2' + ;; + *) + DEFAULT_NIC='nic_intel-x710' + ;; + esac + case "${TEST_CODE}" in # Select specific performance tests based on jenkins job type variable. *"ndrpdr-weekly"* ) - test_tag_array=("ndrpdrAND64bAND1c" - "ndrpdrAND78bAND1c") + readarray -t test_tag_array < "${BASH_FUNCTION_DIR}/mlr-weekly.txt" + ;; + *"mrr-daily"* ) + readarray -t test_tag_array < "${BASH_FUNCTION_DIR}/mrr-daily.txt" ;; - *"mrr-daily"* | *"mrr-weekly"* ) - test_tag_array=(# vic - "mrrANDnic_cisco-vic-1227AND64b" - "mrrANDnic_cisco-vic-1385AND64b" - # memif - "mrrANDmemifANDethAND64b" - "mrrANDmemifANDethANDimix" - # crypto - "mrrANDipsecAND64b" - # ip4 base - "mrrANDip4baseAND64b" - # ip4 scale FIB 2M - "mrrANDip4fwdANDfib_2mAND64b" - # ip4 scale FIB 200k - "mrrANDip4fwdANDfib_200kANDnic_intel-*710AND64b" - # ip4 scale FIB 20k - "mrrANDip4fwdANDfib_20kANDnic_intel-*710AND64b" - # ip4 scale ACL - "mrrANDip4fwdANDacl1AND10k_flowsAND64b" - "mrrANDip4fwdANDacl50AND10k_flowsAND64b" - # ip4 scale NAT44 - "mrrANDip4fwdANDnat44ANDbaseAND64b" - "mrrANDip4fwdANDnat44ANDsrc_user_4000AND64b" - # ip4 features - "mrrANDip4fwdANDfeatureANDnic_intel-*710AND64b" - # TODO: Remove when tags in - # tests/vpp/perf/ip4/*-ipolicemarkbase-*.robot - # are fixed - "mrrANDip4fwdANDpolice_markANDnic_intel-*710AND64b" - # ip4 tunnels - "mrrANDip4fwdANDencapANDip6unrlayANDip4ovrlayANDnic_intel-x520-da2AND64b" - "mrrANDip4fwdANDencapANDnic_intel-*710AND64b" - "mrrANDl2ovrlayANDencapANDnic_intel-*710AND64b" - # ip6 base - "mrrANDip6baseANDethAND78b" - # ip6 features - "mrrANDip6fwdANDfeatureANDnic_intel-*710AND78b" - # ip6 scale FIB 2M - "mrrANDip6fwdANDfib_2mANDnic_intel-*710AND78b" - # ip6 scale FIB 200k - "mrrANDip6fwdANDfib_200kANDnic_intel-*710AND78b" - # ip6 scale FIB 20k - "mrrANDip6fwdANDfib_20kANDnic_intel-*710AND78b" - # ip6 tunnels - "mrrANDip6fwdANDencapANDnic_intel-x520-da2AND78b" - # l2xc base - "mrrANDl2xcfwdANDbaseAND64b" - # l2xc scale ACL - "mrrANDl2xcANDacl1AND10k_flowsAND64b" - "mrrANDl2xcANDacl50AND10k_flowsAND64b" - # l2xc scale FIB 2M - "mrrANDl2xcANDfib_2mAND64b" - # l2xc scale FIB 200k - "mrrANDl2xcANDfib_200kANDnic_intel-*710AND64b" - # l2xc scale FIB 20k - "mrrANDl2xcANDfib_20kANDnic_intel-*710AND64b" - # l2bd base - "mrrANDl2bdmaclrnANDbaseAND64b" - # l2bd scale ACL - "mrrANDl2bdmaclrnANDacl1AND10k_flowsAND64b" - "mrrANDl2bdmaclrnANDacl50AND10k_flowsAND64b" - # l2bd scale FIB 2M - "mrrANDl2bdmaclrnANDfib_1mAND64b" - # l2bd scale FIB 200k - "mrrANDl2bdmaclrnANDfib_100kANDnic_intel-*710AND64b" - # l2bd scale FIB 20k - "mrrANDl2bdmaclrnANDfib_10kANDnic_intel-*710AND64b" - # l2 patch base - "mrrANDl2patchAND64b" - # srv6 - "mrrANDsrv6ANDnic_intel-x520-da2AND78b" - # vts - "mrrANDvtsANDnic_intel-x520-da2AND114b" - # vm vhost l2xc base - "mrrANDvhostANDl2xcfwdANDbaseAND64b" - "mrrANDvhostANDl2xcfwdANDbaseANDimix" - # vm vhost l2bd base - "mrrANDvhostANDl2bdmaclrnANDbaseAND64b" - "mrrANDvhostANDl2bdmaclrnANDbaseANDimix" - # vm vhost ip4 base - "mrrANDvhostANDip4fwdANDbaseAND64b" - "mrrANDvhostANDip4fwdANDbaseANDimix" - # Exclude - "!mrrANDip6baseANDdot1qAND78b" - "!vhost_256ANDnic_intel-x520-da2" - "!vhostANDnic_intel-xl710" - "!cfs_opt" - "!lbond_dpdk") + *"mrr-weekly"* ) + readarray -t test_tag_array < "${BASH_FUNCTION_DIR}/mrr-weekly.txt" ;; * ) if [[ -z "${TEST_TAG_STRING-}" ]]; then # If nothing is specified, we will run pre-selected tests by # following tags. - test_tag_array=("mrrANDnic_intel-x710AND1cAND64bANDip4base" - "mrrANDnic_intel-x710AND1cAND78bANDip6base" - "mrrANDnic_intel-x710AND1cAND64bANDl2bdbase" - "mrrANDnic_intel-x710AND1cAND64bANDl2xcbase" - "!dot1q") + test_tag_array=("mrrAND${DEFAULT_NIC}AND1cAND64bANDip4base" + "mrrAND${DEFAULT_NIC}AND1cAND78bANDip6base" + "mrrAND${DEFAULT_NIC}AND1cAND64bANDl2bdbase" + "mrrAND${DEFAULT_NIC}AND1cAND64bANDl2xcbase" + "!dot1q" "!drv_avf") else # If trigger contains tags, split them into array. test_tag_array=(${TEST_TAG_STRING//:/ }) @@ -528,12 +661,22 @@ function select_tags () { case "${TEST_CODE}" in *"3n-hsw"*) test_tag_array+=("!drv_avf") + test_tag_array+=("!ipsechwNOTnic_intel-xl710") ;; *"2n-skx"*) test_tag_array+=("!ipsechw") ;; *"3n-skx"*) test_tag_array+=("!ipsechw") + # Not enough nic_intel-xxv710 to support double link tests. + test_tag_array+=("!3_node_double_link_topoANDnic_intel-xxv710") + ;; + *"3n-tsh"*) + test_tag_array+=("!ipsechw") + test_tag_array+=("!memif") + test_tag_array+=("!srv6_proxy") + test_tag_array+=("!vhost") + test_tag_array+=("!vts") ;; *) # Default to 3n-hsw due to compatibility. @@ -549,10 +692,64 @@ function select_tags () { # We will prefix with perftest to prevent running other tests # (e.g. Functional). prefix="perftestAND" + set +x if [[ "${TEST_CODE}" == "vpp-"* ]]; then # Automatic prefixing for VPP jobs to limit the NIC used and # traffic evaluation to MRR. - prefix="${prefix}mrrANDnic_intel-x710AND" + if [[ "${TEST_TAG_STRING-}" == *"nic_"* ]]; then + prefix="${prefix}mrrAND" + else + prefix="${prefix}mrrAND${DEFAULT_NIC}AND" + fi + fi + for tag in "${test_tag_array[@]}"; do + if [[ "${tag}" == "!"* ]]; then + # Exclude tags are not prefixed. + TAGS+=("${tag}") + elif [[ "${tag}" != "" && "${tag}" != "#"* ]]; then + # Empty and comment lines are skipped. + # Other lines are normal tags, they are to be prefixed. + TAGS+=("${prefix}${tag}") + fi + done + set -x +} + + +function select_vpp_device_tags () { + + set -exuo pipefail + + # Variables read: + # - TEST_CODE - String affecting test selection, usually jenkins job name. + # - TEST_TAG_STRING - String selecting tags, from gerrit comment. + # Can be unset. + # Variables set: + # - TAGS - Array of processed tag boolean expressions. + + case "${TEST_CODE}" in + # Select specific performance tests based on jenkins job type variable. + * ) + if [[ -z "${TEST_TAG_STRING-}" ]]; then + # If nothing is specified, we will run pre-selected tests by + # following tags. Items of array will be concatenated by OR + # in Robot Framework. + test_tag_array=() + else + # If trigger contains tags, split them into array. + test_tag_array=(${TEST_TAG_STRING//:/ }) + fi + ;; + esac + + TAGS=() + + # We will prefix with perftest to prevent running other tests + # (e.g. Functional). + prefix="devicetestAND" + if [[ "${TEST_CODE}" == "vpp-"* ]]; then + # Automatic prefixing for VPP jobs to limit testing. + prefix="${prefix}" fi for tag in "${test_tag_array[@]}"; do if [[ ${tag} == "!"* ]]; then @@ -564,6 +761,34 @@ function select_tags () { done } +function select_os () { + + set -exuo pipefail + + # Variables read: + # - OS - os or distro for selecting container image. + # Variables set: + # - VPP_VER_FILE - Name of File in CSIT dir containing vpp stable version. + # - IMAGE_VER_FILE - Name of File in CSIT dir containing the image name. + # - PKG_SUFFIX - Suffix of OS package file name, "rpm" or "deb." + + case "${OS}" in + "ubuntu"*) + IMAGE_VER_FILE="VPP_DEVICE_IMAGE_UBUNTU" + VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_BIONIC" + PKG_SUFFIX="deb" + ;; + "centos"*) + IMAGE_VER_FILE="VPP_DEVICE_IMAGE_CENTOS" + VPP_VER_FILE="VPP_STABLE_VER_CENTOS" + PKG_SUFFIX="rpm" + ;; + *) + die "Unable to identify distro or os from ${OS}" + ;; + esac +} + function select_topology () { @@ -573,7 +798,7 @@ function select_topology () { # - NODENESS - Node multiplicity of testbed, either "2n" or "3n". # - FLAVOR - Node flavor string, currently either "hsw" or "skx". # - CSIT_DIR - Path to existing root of local CSIT git repository. - # - TOPOLOGIES_DIR - Path to existing directory with available tpologies. + # - TOPOLOGIES_DIR - Path to existing directory with available topologies. # Variables set: # - TOPOLOGIES - Array of paths to suitable topology yaml files. # - TOPOLOGIES_TAGS - Tag expression selecting tests for the topology. @@ -582,28 +807,28 @@ function select_topology () { case_text="${NODENESS}_${FLAVOR}" case "${case_text}" in - "3n_hsw") - TOPOLOGIES=( - "${TOPOLOGIES_DIR}/lf_3n_hsw_testbed1.yaml" - "${TOPOLOGIES_DIR}/lf_3n_hsw_testbed2.yaml" - "${TOPOLOGIES_DIR}/lf_3n_hsw_testbed3.yaml" - ) - TOPOLOGIES_TAGS="3_node_single_link_topo" + "1n_vbox") + TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template ) + TOPOLOGIES_TAGS="2_node_single_link_topo" + ;; + "1n_skx") + TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template ) + TOPOLOGIES_TAGS="2_node_single_link_topo" ;; "2n_skx") - TOPOLOGIES=( - "${TOPOLOGIES_DIR}/lf_2n_skx_testbed21.yaml" - #"${TOPOLOGIES_DIR}/lf_2n_skx_testbed22.yaml" - #"${TOPOLOGIES_DIR}/lf_2n_skx_testbed23.yaml" - "${TOPOLOGIES_DIR}/lf_2n_skx_testbed24.yaml" - ) + TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_skx*.yaml ) TOPOLOGIES_TAGS="2_node_*_link_topo" ;; "3n_skx") - TOPOLOGIES=( - "${TOPOLOGIES_DIR}/lf_3n_skx_testbed31.yaml" - "${TOPOLOGIES_DIR}/lf_3n_skx_testbed32.yaml" - ) + TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_skx*.yaml ) + TOPOLOGIES_TAGS="3_node_*_link_topo" + ;; + "3n_hsw") + TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_hsw*.yaml ) + TOPOLOGIES_TAGS="3_node_single_link_topo" + ;; + "3n_tsh") + TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_tsh*.yaml ) TOPOLOGIES_TAGS="3_node_*_link_topo" ;; *)