1807 report: updated virtual topologies for all functional tests.
[csit.git] / bootstrap-verify-perf.sh
1 #!/usr/bin/env bash
2 # Copyright (c) 2018 Cisco and/or its affiliates.
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at:
6 #
7 #     http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 set -xo pipefail
16
17 # FUNCTIONS
18 function warn () {
19     # Prints the message to standard error.
20     echo "$@" >&2
21 }
22
23 function die () {
24     # Prints the message to standard error end exit with error code specified
25     # by first argument.
26     status="$1"
27     shift
28     warn "$@"
29     exit "$status"
30 }
31
32 function help () {
33     # Displays help message.
34     die 1 "Usage: `basename $0` csit-[dpdk|vpp|ligato]-[2n-skx|3n-skx|3n-hsw]"
35 }
36
37 function cancel_all () {
38     # Trap function to get into consistent state.
39     python ${SCRIPT_DIR}/resources/tools/scripts/topo_cleanup.py -t $1 || {
40         die 1 "Failure during execution of topology cleanup script!"
41     }
42     python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -c -t $1 || {
43         die 1 "Failure during execution of topology un-reservation script!"
44     }
45 }
46
47 # VARIABLES
48 # Space separated list of available testbeds, described by topology files
49 TOPOLOGIES_3N_HSW=(topologies/available/lf_3n_hsw_testbed1.yaml
50                    topologies/available/lf_3n_hsw_testbed2.yaml
51                    topologies/available/lf_3n_hsw_testbed3.yaml)
52 TOPOLOGIES_2N_SKX=(topologies/available/lf_2n_skx_testbed21.yaml
53                    topologies/available/lf_2n_skx_testbed24.yaml)
54 TOPOLOGIES_3N_SKX=(topologies/available/lf_3n_skx_testbed31.yaml
55                    topologies/available/lf_3n_skx_testbed32.yaml)
56
57 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
58 export PYTHONPATH=${SCRIPT_DIR}
59
60 RESERVATION_DIR="/tmp/reservation_dir"
61 DOWNLOAD_DIR="${SCRIPT_DIR}/download_dir"
62 ARCHIVE_DIR="${SCRIPT_DIR}/archive"
63
64 mkdir -p ${DOWNLOAD_DIR} || {
65     die 1 "Failed to create download dir!"
66 }
67 mkdir -p ${ARCHIVE_DIR} || {
68     die 1 "Failed to create archive dir!"
69 }
70
71 # Get test code.
72 TEST_CODE=${JOB_NAME-}
73 if [[ -z ${TEST_CODE} ]]; then
74     TEST_CODE=${1}
75     shift
76 fi
77
78 # TOPOLOGY SELECTION
79 case "$TEST_CODE" in
80     *2n-skx*)
81         TOPOLOGIES=$TOPOLOGIES_2N_SKX
82         TOPOLOGIES_TAGS="2_node_*_link_topo"
83         ;;
84     *3n-skx*)
85         TOPOLOGIES=$TOPOLOGIES_3N_SKX
86         TOPOLOGIES_TAGS="3_node_*_link_topo"
87         ;;
88     *)
89         # Fallback to 3-node Haswell by default (backward compatibility)
90         TOPOLOGIES=$TOPOLOGIES_3N_HSW
91         TOPOLOGIES_TAGS="3_node_*_link_topo"
92         ;;
93 esac
94
95 if [[ -z "${TOPOLOGIES}" ]]; then
96     die 1 "No applicable topology found!"
97 fi
98
99 cd ${DOWNLOAD_DIR}
100 case "$TEST_CODE" in
101     *hc2vpp*)
102         DUT="hc2vpp"
103         ;;
104     *vpp*)
105         DUT="vpp"
106
107         case "$TEST_CODE" in
108             csit-vpp-*)
109                 # Use downloaded packages with specific version
110                 if [[ "$TEST_CODE" == *daily* ]] || \
111                    [[ "$TEST_CODE" == *weekly* ]] || \
112                    [[ "$TEST_CODE" == *timed* ]];
113                 then
114                     echo Downloading latest VPP packages from NEXUS...
115                     bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
116                         --skip-install || {
117                         die 1 "Failed to get VPP packages!"
118                     }
119                 else
120                     echo Downloading VPP packages of specific version from NEXUS...
121                     DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)
122                     VPP_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_STABLE_VER_UBUNTU)
123                     bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
124                         --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER} || {
125                         die 1 "Failed to get VPP packages!"
126                     }
127                 fi
128                 ;;
129             vpp-csit-*)
130                 # Use local built packages.
131                 mv ../${DUT}*.deb ${DOWNLOAD_DIR}/
132                 ;;
133             *)
134                 die 1 "Unable to identify job type from: ${TEST_CODE}!"
135                 ;;
136         esac
137         ;;
138     *ligato*)
139         DUT="kubernetes"
140
141         case "$TEST_CODE" in
142             csit-*)
143                 # Use downloaded packages with specific version
144                 if [[ "$TEST_CODE" == *daily* ]] || \
145                    [[ "$TEST_CODE" == *weekly* ]] || \
146                    [[ "$TEST_CODE" == *timed* ]];
147                 then
148                     echo Downloading latest VPP packages from NEXUS...
149                     bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
150                         --skip-install || {
151                         die 1 "Failed to get VPP packages!"
152                     }
153                 else
154                     echo Downloading VPP packages of specific version from NEXUS...
155                     DPDK_STABLE_VER=$(cat ${SCRIPT_DIR}/DPDK_STABLE_VER)
156                     VPP_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_STABLE_VER_UBUNTU)
157                     bash ${SCRIPT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh \
158                         --skip-install --vpp ${VPP_STABLE_VER} --dkms ${DPDK_STABLE_VER} || {
159                         die 1 "Failed to get VPP packages!"
160                     }
161                 fi
162                 ;;
163             vpp-csit-*)
164                 # Use local builded packages.
165                 mv ../${DUT}*.deb ${DOWNLOAD_DIR}/
166                 ;;
167             *)
168                 die 1 "Unable to identify job type from: ${TEST_CODE}!"
169                 ;;
170         esac
171         # Extract VPP API to specific folder
172         dpkg -x ${DOWNLOAD_DIR}/vpp_*.deb /tmp/vpp || {
173             die 1 "Failed to extract ${DUT} package!"
174         }
175
176         LIGATO_REPO_URL="https://github.com/ligato/"
177         VPP_AGENT_STABLE_VER=$(cat ${SCRIPT_DIR}/VPP_AGENT_STABLE_VER)
178         DOCKER_DEB="docker-ce_18.03.0~ce-0~ubuntu_amd64.deb"
179
180         # Clone & checkout stable vnf-agent
181         cd ../..
182         git clone -b ${VPP_AGENT_STABLE_VER} --single-branch \
183             ${LIGATO_REPO_URL}/vpp-agent vpp-agent || {
184             die 1 "Failed to run: git clone ${LIGATO_REPO_URL}/vpp-agent!"
185         }
186         cd vpp-agent
187
188         # Install Docker
189         wget -q https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/${DOCKER_DEB} || {
190             die 1 "Failed to download Docker package!"
191         }
192
193         sudo dpkg -i ${DOCKER_DEB} || {
194             die 1 "Failed to install Docker!"
195         }
196
197         # Pull ligato/dev_vpp_agent docker image and re-tag as local
198         sudo docker pull ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER} || {
199             die 1 "Failed to pull Docker image!"
200         }
201
202         sudo docker tag ligato/dev-vpp-agent:${VPP_AGENT_STABLE_VER}\
203             dev_vpp_agent:latest || {
204             die 1 "Failed to tag Docker image!"
205         }
206
207         # Start dev_vpp_agent container as daemon
208         sudo docker run --rm -itd --name agentcnt dev_vpp_agent bash || {
209             die 1 "Failed to run Docker image!"
210         }
211
212         # Copy latest vpp api into running container
213         sudo docker cp /tmp/vpp/usr/share/vpp/api agentcnt:/usr/share/vpp || {
214             die 1 "Failed to copy files Docker image!"
215         }
216
217         for f in ${DOWNLOAD_DIR}/*; do
218             sudo docker cp $f agentcnt:/opt/vpp-agent/dev/vpp/build-root/ || {
219                 die 1 "Failed to copy files Docker image!"
220             }
221         done
222
223         # Recompile vpp-agent
224         sudo docker exec -i agentcnt \
225             script -qec '. ~/.bashrc; cd /go/src/github.com/ligato/vpp-agent && make generate && make install' || {
226             die 1 "Failed to build vpp-agent in Docker image!"
227         }
228         # Save container state
229         sudo docker commit `sudo docker ps -q` dev_vpp_agent:latest || {
230             die 1 "Failed to commit state of Docker image!"
231         }
232
233         # Build prod_vpp_agent docker image
234         cd docker/prod/ &&\
235             sudo docker build --tag prod_vpp_agent --no-cache . || {
236                 die 1 "Failed to build Docker image!"
237             }
238         # Export Docker image
239         sudo docker save prod_vpp_agent | gzip > prod_vpp_agent.tar.gz || {
240             die 1 "Failed to save Docker image!"
241         }
242         DOCKER_IMAGE="$( readlink -f prod_vpp_agent.tar.gz | tr '\n' ' ' )"
243         rm -r ${DOWNLOAD_DIR}/vpp*
244         mv ${DOCKER_IMAGE} ${DOWNLOAD_DIR}/
245         ;;
246     *dpdk*)
247         DUT="dpdk"
248
249         DPDK_REPO='https://fast.dpdk.org/rel/'
250         # Use downloaded packages with specific version
251         if [[ "$TEST_CODE" == *daily* ]] || \
252            [[ "$TEST_CODE" == *weekly* ]] || \
253            [[ "$TEST_CODE" == *timed* ]];
254         then
255             echo "Downloading latest DPDK packages from repo..."
256             DPDK_STABLE_VER=$(wget --no-check-certificate --quiet -O - ${DPDK_REPO} | \
257                 grep -v '2015' | grep -Eo 'dpdk-[^\"]+xz' | tail -1)
258         else
259             echo "Downloading DPDK packages of specific version from repo..."
260             DPDK_STABLE_VER='dpdk-18.05.tar.xz'
261         fi
262         if [[ ! -f ${DPDK_STABLE_VER} ]]; then
263             wget --no-check-certificate ${DPDK_REPO}${DPDK_STABLE_VER} || {
264                 die 1 "Failed to get DPDK package from ${DPDK_REPO}!"
265             }
266         fi
267         ;;
268     *)
269         die 1 "Unable to identify DUT type from: ${TEST_CODE}!"
270         ;;
271 esac
272 cd ${SCRIPT_DIR}
273
274 if [[ ! "$(ls -A ${DOWNLOAD_DIR})" ]]; then
275     die 1 "No artifacts downloaded!"
276 fi
277
278 # ENVIRONMENT PREPARATION
279 rm -rf env
280
281 pip install virtualenv || {
282     die 1 "Failed to install virtual env!"
283 }
284 virtualenv --system-site-packages env || {
285     die 1 "Failed to create virtual env!"
286 }
287 source env/bin/activate || {
288     die 1 "Failed to activate virtual env!"
289 }
290 pip install -r requirements.txt || {
291     die 1 "Failed to install requirements to virtual env!"
292 }
293
294 # We iterate over available topologies and wait until we reserve topology.
295 while :; do
296     for TOPOLOGY in ${TOPOLOGIES};
297     do
298         python ${SCRIPT_DIR}/resources/tools/scripts/topo_reservation.py -t ${TOPOLOGY}
299         if [ $? -eq 0 ]; then
300             WORKING_TOPOLOGY=${TOPOLOGY}
301             echo "Reserved: ${WORKING_TOPOLOGY}"
302             # On script exit we clean testbed.
303             trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
304             break
305         fi
306     done
307
308     if [ -n "${WORKING_TOPOLOGY}" ]; then
309         # Exit the infinite while loop if we made a reservation.
310         break
311     fi
312
313     # Wait ~3minutes before next try.
314     SLEEP_TIME=$[ ( $RANDOM % 20 ) + 180 ]s
315     echo "Sleeping ${SLEEP_TIME}"
316     sleep ${SLEEP_TIME}
317 done
318
319 # Clean testbed before execution.
320 python ${SCRIPT_DIR}/resources/tools/scripts/topo_cleanup.py -t ${WORKING_TOPOLOGY} || {
321     die 1 "Failed to cleanup topologies!"
322 }
323
324 # CSIT EXECUTION
325 PYBOT_ARGS="--outputdir ${ARCHIVE_DIR} --loglevel TRACE --variable TOPOLOGY_PATH:${WORKING_TOPOLOGY} --suite tests.${DUT}.perf"
326
327 # NIC SELECTION
328 # All topologies NICs
329 TOPOLOGIES_NICS=($(grep -hoPR "model: \K.*" topologies/available/* | sort -u))
330 # Selected topology NICs
331 TOPOLOGY_NICS=($(grep -hoPR "model: \K.*" ${WORKING_TOPOLOGY} | sort -u))
332 # All topologies NICs - Selected topology NICs
333 EXCLUDE_NICS=($(comm -13 <(printf '%s\n' "${TOPOLOGY_NICS[@]}") <(printf '%s\n' "${TOPOLOGIES_NICS[@]}")))
334
335 case "$TEST_CODE" in
336     # Select specific performance tests based on jenkins job type variable.
337     *ndrpdr-weekly* )
338         TAGS=(ndrpdrANDnic_intel-x520-da2AND1c
339               ndrpdrANDnic_intel-x520-da2AND2c
340               ndrpdrAND1cANDipsec
341               ndrpdrAND2cANDipsec)
342         ;;
343     *ndrpdr-timed* )
344         ;;
345     *mrr-daily* )
346        TAGS=(mrrAND64bAND1c
347              mrrAND64bAND2c
348              mrrAND64bAND4c
349              mrrAND78bAND1c
350              mrrAND78bAND2c
351              mrrAND78bAND4c
352              mrrANDimixAND1cANDvhost
353              mrrANDimixAND2cANDvhost
354              mrrANDimixAND4cANDvhost
355              mrrANDimixAND1cANDmemif
356              mrrANDimixAND2cANDmemif
357              mrrANDimixAND4cANDmemif)
358         ;;
359     * )
360         if [[ -z "$TEST_TAG_STRING" ]]; then
361             # If nothing is specified, we will run pre-selected tests by
362             # following tags. Items of array will be concatenated by OR in Robot
363             # Framework.
364             TEST_TAG_ARRAY=(mrrANDnic_intel-x710AND1cAND64bANDip4base
365                             mrrANDnic_intel-x710AND1cAND78bANDip6base
366                             mrrANDnic_intel-x710AND1cAND64bANDl2bdbase)
367         else
368             # If trigger contains tags, split them into array.
369             TEST_TAG_ARRAY=(${TEST_TAG_STRING//:/ })
370             # We will add excluded NICs.
371             TEST_TAG_ARRAY+=("${EXCLUDE_NICS[@]/#/!NIC_}")
372         fi
373
374         TAGS=()
375
376         # We will prefix with perftest to prevent running other tests
377         # (e.g. Functional).
378         prefix="perftestAND"
379         if [[ ${TEST_CODE} == vpp-* ]]; then
380             # Automatic prefixing for VPP jobs to limit the NIC used and
381             # traffic evaluation to MRR.
382             prefix="${prefix}mrrANDnic_intel-x710AND"
383         fi
384         for TAG in "${TEST_TAG_ARRAY[@]}"; do
385             if [[ ${TAG} == "!"* ]]; then
386                 # Exclude tags are not prefixed.
387                 TAGS+=("${TAG}")
388             else
389                 TAGS+=("$prefix${TAG}")
390             fi
391         done
392         ;;
393 esac
394
395 # Catenate TAG selections
396 EXPANDED_TAGS=()
397 for TAG in "${TAGS[@]}"; do
398     if [[ ${TAG} == "!"* ]]; then
399         EXPANDED_TAGS+=(" --exclude ${TAG#$"!"} ")
400     else
401         EXPANDED_TAGS+=(" --include ${TOPOLOGIES_TAGS}AND${TAG} ")
402     fi
403 done
404
405 # Execute the test
406 pybot ${PYBOT_ARGS}${EXPANDED_TAGS[@]} tests/
407 RETURN_STATUS=$(echo $?)
408
409 # We will create additional archive if workspace variable is set. This way if
410 # script is running in jenkins all will be automatically archived to logs.fd.io.
411 if [[ -n ${WORKSPACE-} ]]; then
412     cp -r ${ARCHIVE_DIR}/ $WORKSPACE/archives/
413 fi
414
415 exit ${RETURN_STATUS}