2 # Copyright (c) 2018 Cisco and/or its affiliates.
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at:
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
19 # Prints the message to standard error.
24 # Prints the message to standard error end exit with error code specified
33 # Displays help message.
34 die 1 "Usage: `basename $0` csit-[dpdk|vpp|ligato]-[2n-skx|3n-skx|3n-hsw]"
37 function cancel_all () {
38 # Trap function to get into consistent state.
39 python ${SCRIPT_DIR}/resources/tools/scripts/topo_cleanup.py -t $1 || {
40 die 1 "Failure during execution of topology cleanup script!"
42 python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -c -t $1 || {
43 die 1 "Failure during execution of topology un-reservation script!"
48 # Space separated list of available testbeds, described by topology files
49 TOPOLOGIES_3N_HSW=(topologies/available/lf_3n_hsw_testbed1.yaml
50 topologies/available/lf_3n_hsw_testbed2.yaml
51 topologies/available/lf_3n_hsw_testbed3.yaml)
52 TOPOLOGIES_2N_SKX=(topologies/available/lf_2n_skx_testbed21.yaml
53 topologies/available/lf_2n_skx_testbed24.yaml)
54 TOPOLOGIES_3N_SKX=(topologies/available/lf_3n_skx_testbed31.yaml
55 topologies/available/lf_3n_skx_testbed32.yaml)
57 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
58 export PYTHONPATH=${SCRIPT_DIR}
60 RESERVATION_DIR="/tmp/reservation_dir"
61 DOWNLOAD_DIR="${SCRIPT_DIR}/download_dir"
62 ARCHIVE_DIR="${SCRIPT_DIR}/archive"
64 mkdir -p ${DOWNLOAD_DIR} || {
65 die 1 "Failed to create download dir!"
67 mkdir -p ${ARCHIVE_DIR} || {
68 die 1 "Failed to create archive dir!"
72 TEST_CODE=${JOB_NAME-}
73 if [[ -z ${TEST_CODE} ]]; then
81 TOPOLOGIES=${TOPOLOGIES_2N_SKX[@]}
82 TOPOLOGIES_TAGS="2_node_*_link_topo"
85 TOPOLOGIES=${TOPOLOGIES_3N_SKX[@]}
86 TOPOLOGIES_TAGS="3_node_*_link_topo"
89 # Fallback to 3-node Haswell by default (backward compatibility)
90 TOPOLOGIES=${TOPOLOGIES_3N_HSW[@]}
91 TOPOLOGIES_TAGS="3_node_*_link_topo"
95 if [[ -z "${TOPOLOGIES}" ]]; then
96 die 1 "No applicable topology found!"
109 # Use downloaded packages with specific version
110 if [[ "$TEST_CODE" == *daily* ]] || \
111 [[ "$TEST_CODE" == *weekly* ]] || \
112 [[ "$TEST_CODE" == *timed* ]];
114 echo Downloading latest VPP packages from NEXUS...
115 bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
117 die 1 "Failed to get VPP packages!"
120 echo Downloading VPP packages of specific version from NEXUS...
121 DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)
122 VPP_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_STABLE_VER_UBUNTU)
123 bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
124 --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER} || {
125 die 1 "Failed to get VPP packages!"
130 # Use local built packages.
131 mv ../${DUT}*.deb ${DOWNLOAD_DIR}/
134 die 1 "Unable to identify job type from: ${TEST_CODE}!"
143 # Use downloaded packages with specific version
144 if [[ "$TEST_CODE" == *daily* ]] || \
145 [[ "$TEST_CODE" == *weekly* ]] || \
146 [[ "$TEST_CODE" == *timed* ]];
148 echo Downloading latest VPP packages from NEXUS...
149 bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
151 die 1 "Failed to get VPP packages!"
154 echo Downloading VPP packages of specific version from NEXUS...
155 DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)
156 VPP_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_STABLE_VER_UBUNTU)
157 bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
158 --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER} || {
159 die 1 "Failed to get VPP packages!"
164 # Use local builded packages.
165 mv ../${DUT}*.deb ${DOWNLOAD_DIR}/
168 die 1 "Unable to identify job type from: ${TEST_CODE}!"
171 # Extract VPP API to specific folder
172 dpkg -x ${DOWNLOAD_DIR}/vpp_*.deb /tmp/vpp || {
173 die 1 "Failed to extract ${DUT} package!"
176 LIGATO_REPO_URL="https://github.com/ligato/"
177 VPP_AGENT_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_AGENT_STABLE_VER)
178 DOCKER_DEB="docker-ce_18.03.0~ce-0~ubuntu_amd64.deb"
180 # Clone & checkout stable vnf-agent
182 git clone -b ${VPP_AGENT_STABLE_VER} --single-branch \
183 ${LIGATO_REPO_URL}/vpp-agent vpp-agent || {
184 die 1 "Failed to run: git clone ${LIGATO_REPO_URL}/vpp-agent!"
189 wget -q https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/${DOCKER_DEB} || {
190 die 1 "Failed to download Docker package!"
193 sudo dpkg -i ${DOCKER_DEB} || {
194 die 1 "Failed to install Docker!"
197 # Pull ligato/dev_vpp_agent docker image and re-tag as local
198 sudo docker pull ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER} || {
199 die 1 "Failed to pull Docker image!"
202 sudo docker tag ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER}\
203 dev_vpp_agent:latest || {
204 die 1 "Failed to tag Docker image!"
207 # Start dev_vpp_agent container as daemon
208 sudo docker run --rm -itd --name agentcnt dev_vpp_agent bash || {
209 die 1 "Failed to run Docker image!"
212 # Copy latest vpp api into running container
213 sudo docker cp /tmp/vpp/usr/share/vpp/api agentcnt:/usr/share/vpp || {
214 die 1 "Failed to copy files Docker image!"
217 for f in ${DOWNLOAD_DIR}/*; do
218 sudo docker cp $f agentcnt:/opt/vpp-agent/dev/vpp/build-root/ || {
219 die 1 "Failed to copy files Docker image!"
223 # Recompile vpp-agent
224 sudo docker exec -i agentcnt \
225 script -qec '. ~/.bashrc; cd /go/src/github.com/ligato/vpp-agent && make generate && make install' || {
226 die 1 "Failed to build vpp-agent in Docker image!"
228 # Save container state
229 sudo docker commit `sudo docker ps -q` dev_vpp_agent:latest || {
230 die 1 "Failed to commit state of Docker image!"
233 # Build prod_vpp_agent docker image
235 sudo docker build --tag prod_vpp_agent --no-cache . || {
236 die 1 "Failed to build Docker image!"
238 # Export Docker image
239 sudo docker save prod_vpp_agent | gzip > prod_vpp_agent.tar.gz || {
240 die 1 "Failed to save Docker image!"
242 DOCKER_IMAGE="$( readlink -f prod_vpp_agent.tar.gz | tr '\n' ' ' )"
243 rm -r ${DOWNLOAD_DIR}/vpp*
244 mv ${DOCKER_IMAGE} ${DOWNLOAD_DIR}/
249 DPDK_REPO='https://fast.dpdk.org/rel/'
250 # Use downloaded packages with specific version
251 if [[ "$TEST_CODE" == *daily* ]] || \
252 [[ "$TEST_CODE" == *weekly* ]] || \
253 [[ "$TEST_CODE" == *timed* ]];
255 echo "Downloading latest DPDK packages from repo..."
256 DPDK_STABLE_VER=$(wget --no-check-certificate --quiet -O - ${DPDK_REPO} | \
257 grep -v '2015' | grep -Eo 'dpdk-[^\"]+xz' | tail -1)
259 echo "Downloading DPDK packages of specific version from repo..."
260 DPDK_STABLE_VER='dpdk-18.05.tar.xz'
262 if [[ ! -f ${DPDK_STABLE_VER} ]]; then
263 wget --no-check-certificate ${DPDK_REPO}${DPDK_STABLE_VER} || {
264 die 1 "Failed to get DPDK package from ${DPDK_REPO}!"
269 die 1 "Unable to identify DUT type from: ${TEST_CODE}!"
274 if [[ ! "$(ls -A ${DOWNLOAD_DIR})" ]]; then
275 die 1 "No artifacts downloaded!"
278 # ENVIRONMENT PREPARATION
281 pip install virtualenv || {
282 die 1 "Failed to install virtual env!"
284 virtualenv --system-site-packages env || {
285 die 1 "Failed to create virtual env!"
287 source env/bin/activate || {
288 die 1 "Failed to activate virtual env!"
290 pip install -r requirements.txt || {
291 die 1 "Failed to install requirements to virtual env!"
294 # We iterate over available topologies and wait until we reserve topology.
296 for TOPOLOGY in ${TOPOLOGIES};
298 python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -t ${TOPOLOGY}
299 if [ $? -eq 0 ]; then
300 WORKING_TOPOLOGY=${TOPOLOGY}
301 echo "Reserved: ${WORKING_TOPOLOGY}"
302 # On script exit we clean testbed.
303 trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
308 if [ -n "${WORKING_TOPOLOGY}" ]; then
309 # Exit the infinite while loop if we made a reservation.
313 # Wait ~3minutes before next try.
314 SLEEP_TIME=$[ ( $RANDOM % 20 ) + 180 ]s
315 echo "Sleeping ${SLEEP_TIME}"
319 # Clean testbed before execution.
320 python ${SCRIPT_DIR}/resources/tools/scripts/topo_cleanup.py -t ${WORKING_TOPOLOGY} || {
321 die 1 "Failed to cleanup topologies!"
325 PYBOT_ARGS="--outputdir ${ARCHIVE_DIR} --loglevel TRACE --variable TOPOLOGY_PATH:${WORKING_TOPOLOGY} --suite tests.${DUT}.perf"
328 # All topologies NICs
329 TOPOLOGIES_NICS=($(grep -hoPR "model: \K.*" topologies/available/* | sort -u))
330 # Selected topology NICs
331 TOPOLOGY_NICS=($(grep -hoPR "model: \K.*" ${WORKING_TOPOLOGY} | sort -u))
332 # All topologies NICs - Selected topology NICs
333 EXCLUDE_NICS=($(comm -13 <(printf '%s\n' "${TOPOLOGY_NICS[@]}") <(printf '%s\n' "${TOPOLOGIES_NICS[@]}")))
336 # Select specific performance tests based on jenkins job type variable.
338 TEST_TAG_ARRAY=(ndrpdrANDnic_intel-x520-da2AND1c
339 ndrpdrANDnic_intel-x520-da2AND2c
346 TEST_TAG_ARRAY=(mrrAND64bAND1c
352 mrrANDimixAND1cANDvhost
353 mrrANDimixAND2cANDvhost
354 mrrANDimixAND4cANDvhost
355 mrrANDimixAND1cANDmemif
356 mrrANDimixAND2cANDmemif
357 mrrANDimixAND4cANDmemif)
360 if [[ -z "$TEST_TAG_STRING" ]]; then
361 # If nothing is specified, we will run pre-selected tests by
362 # following tags. Items of array will be concatenated by OR in Robot
364 TEST_TAG_ARRAY=(mrrANDnic_intel-x710AND1cAND64bANDip4base
365 mrrANDnic_intel-x710AND1cAND78bANDip6base
366 mrrANDnic_intel-x710AND1cAND64bANDl2bdbase)
368 # If trigger contains tags, split them into array.
369 TEST_TAG_ARRAY=(${TEST_TAG_STRING//:/ })
374 # We will add excluded NICs.
375 TEST_TAG_ARRAY+=("${EXCLUDE_NICS[@]/#/!NIC_}")
379 # We will prefix with perftest to prevent running other tests (e.g. Functional).
381 if [[ ${TEST_CODE} == vpp-* ]]; then
382 # Automatic prefixing for VPP jobs to limit the NIC used and
383 # traffic evaluation to MRR.
384 prefix="${prefix}mrrANDnic_intel-x710AND"
386 for TAG in "${TEST_TAG_ARRAY[@]}"; do
387 if [[ ${TAG} == "!"* ]]; then
388 # Exclude tags are not prefixed.
391 TAGS+=("$prefix${TAG}")
395 # Catenate TAG selections
397 for TAG in "${TAGS[@]}"; do
398 if [[ ${TAG} == "!"* ]]; then
399 EXPANDED_TAGS+=(" --exclude ${TAG#$"!"} ")
401 EXPANDED_TAGS+=(" --include ${TOPOLOGIES_TAGS}AND${TAG} ")
406 pybot ${PYBOT_ARGS}${EXPANDED_TAGS[@]} tests/
407 RETURN_STATUS=$(echo $?)
409 # We will create additional archive if workspace variable is set. This way if
410 # script is running in jenkins all will be automatically archived to logs.fd.io.
411 if [[ -n ${WORKSPACE-} ]]; then
412 cp -r ${ARCHIVE_DIR}/ $WORKSPACE/archives/
415 exit ${RETURN_STATUS}