+VIRL_PROD_SERVERS=()
+for index in "${!VIRL_SERVERS[@]}"; do
+ virl_server_status=$(ssh ${SSH_OPTIONS} ${VIRL_USERNAME}@${VIRL_SERVERS[$index]} cat $VIRL_SERVER_STATUS_FILE 2>&1)
+ echo VIRL HOST ${VIRL_SERVERS[$index]} status is \"$virl_server_status\"
+ if [ "$virl_server_status" == "$VIRL_SERVER_EXPECTED_STATUS" ]
+ then
+ # Candidate is in good status. Add to array.
+ VIRL_PROD_SERVERS+=(${VIRL_SERVERS[$index]})
+ fi
+done
+
+VIRL_SERVERS=("${VIRL_PROD_SERVERS[@]}")
+echo "VIRL servers in production: ${VIRL_SERVERS[@]}"
+num_hosts=${#VIRL_SERVERS[@]}
+if [ $num_hosts == 0 ]
+then
+ echo "No more VIRL candidate hosts available, failing."
+ exit 127
+fi
+
+# Get the LOAD of each server based on number of active simulations (testcases)
+VIRL_SERVER_LOAD=()
+for index in "${!VIRL_SERVERS[@]}"; do
+ VIRL_SERVER_LOAD[${index}]=$(ssh ${SSH_OPTIONS} ${VIRL_USERNAME}@${VIRL_SERVERS[$index]} "list-testcases | grep session | wc -l")
+done
+
+# Pick for each TEST_GROUP least loaded server
+VIRL_SERVER=()
+for index in "${!TEST_GROUPS[@]}"; do
+ least_load_server_idx=$(echo "${VIRL_SERVER_LOAD[*]}" | tr -s ' ' '\n' | awk '{print($0" "NR)}' | sort -g -k1,1 | head -1 | cut -f2 -d' ')
+ least_load_server=${VIRL_SERVERS[$least_load_server_idx-1]}
+ VIRL_SERVER+=($least_load_server)
+ # Adjusting load as we are not going run simulation immediately
+ VIRL_SERVER_LOAD[$least_load_server_idx-1]=$((VIRL_SERVER_LOAD[$least_load_server_idx-1]+1))
+done
+
+echo "Selected VIRL servers: ${VIRL_SERVER[@]}"
+
+# Temporarily download VPP and DPDK packages from nexus.fd.io
+if [ "${#}" -ne "0" ]; then
+ arr=(${@})
+ echo ${arr[0]}
+ SKIP_PATCH="skip_patchORskip_vpp_patch"
+ # Download DPDK parts not included in dpdk plugin of vpp build
+ for ARTIFACT in ${DPDK_ARTIFACTS}; do
+ wget -q "${VPP_REPO_URL}/${ARTIFACT}/${DPDK_STABLE_VER}/${ARTIFACT}-${DPDK_STABLE_VER}${VPP_CLASSIFIER}.${PACKAGE}" || exit
+ done
+else
+ rm -f *.${PACKAGE}
+ for ARTIFACT in ${DPDK_ARTIFACTS}; do
+ wget -q "${VPP_REPO_URL}/${ARTIFACT}/${DPDK_STABLE_VER}/${ARTIFACT}-${DPDK_STABLE_VER}${VPP_CLASSIFIER}.${PACKAGE}" || exit
+ done
+ for ARTIFACT in ${VPP_ARTIFACTS}; do
+ wget -q "${VPP_REPO_URL}/${ARTIFACT}/${VPP_STABLE_VER}/${ARTIFACT}-${VPP_STABLE_VER}${VPP_CLASSIFIER}.${PACKAGE}" || exit
+ done
+fi
+
+VPP_PKGS=(*.$PACKAGE)
+echo ${VPP_PKGS[@]}
+VIRL_DIR_LOC="/tmp"
+VPP_PKGS_FULL=(${VPP_PKGS[@]})
+
+# Prepend directory location at remote host to package file list
+for index in "${!VPP_PKGS_FULL[@]}"; do
+ VPP_PKGS_FULL[${index}]=${VIRL_DIR_LOC}/${VPP_PKGS_FULL[${index}]}
+done
+
+echo "Updated file names: " ${VPP_PKGS_FULL[@]}
+
+cat ${VIRL_PKEY}
+
+# Copy the files to VIRL hosts
+DONE=""
+for index in "${!VIRL_SERVER[@]}"; do
+ # Do not copy files in case they have already been copied to the VIRL host
+ [[ "${DONE[@]}" =~ "${VIRL_SERVER[${index}]}" ]] && copy=0 || copy=1
+
+ if [ "${copy}" -eq "0" ]; then
+ echo "VPP packages have already been copied to the VIRL host ${VIRL_SERVER[${index}]}"
+ else
+ scp ${SSH_OPTIONS} *.${PACKAGE} \
+ ${VIRL_USERNAME}@${VIRL_SERVER[${index}]}:${VIRL_DIR_LOC}/
+
+ result=$?
+ if [ "${result}" -ne "0" ]; then
+ echo "Failed to copy VPP packages to VIRL host ${VIRL_SERVER[${index}]}"
+ echo ${result}
+ exit ${result}
+ else
+ echo "VPP packages successfully copied to the VIRL host ${VIRL_SERVER[${index}]}"
+ fi
+ DONE+=(${VIRL_SERVER[${index}]})
+ fi
+done
+
+# Start a simulation on VIRL server
+
+function stop_virl_simulation {
+ for index in "${!VIRL_SERVER[@]}"; do
+ ssh ${SSH_OPTIONS} ${VIRL_USERNAME}@${VIRL_SERVER[${index}]}\
+ "stop-testcase ${VIRL_SID[${index}]}"
+ done
+}
+
+# Upon script exit, cleanup the simulation execution
+trap stop_virl_simulation EXIT
+
+for index in "${!VIRL_SERVER[@]}"; do
+ echo "Starting simulation nr. ${index} on VIRL server ${VIRL_SERVER[${index}]}"
+ # Get given VIRL server limits for max. number of VMs and IPs
+ max_ips=$(get_max_ip_nr ${VIRL_SERVER[${index}]})
+ max_ips_from_sims=$(($(get_max_sim_nr ${VIRL_SERVER[${index}]})*IPS_PER_SIMULATION))
+ # Set quota to lower value
+ IP_QUOTA=$([ $max_ips -le $max_ips_from_sims ] && echo "$max_ips" || echo "$max_ips_from_sims")
+ # Start the simulation
+ VIRL_SID[${index}]=$(ssh ${SSH_OPTIONS} \
+ ${VIRL_USERNAME}@${VIRL_SERVER[${index}]} \
+ "start-testcase -vv --quota ${IP_QUOTA} --copy ${VIRL_TOPOLOGY} \
+ --release ${VIRL_RELEASE} ${VPP_PKGS_FULL[@]}")
+ retval=$?
+ if [ ${retval} -ne "0" ]; then
+ echo "VIRL simulation start failed on ${VIRL_SERVER[${index}]}"
+ exit ${retval}
+ fi
+ if [[ ! "${VIRL_SID[${index}]}" =~ session-[a-zA-Z0-9_]{6} ]]; then
+ echo "No VIRL session ID reported."
+ exit 127
+ fi
+ echo "VIRL simulation nr. ${index} started on ${VIRL_SERVER[${index}]}"
+
+ ssh_do ${VIRL_USERNAME}@${VIRL_SERVER[${index}]}\
+ cat /scratch/${VIRL_SID[${index}]}/topology.yaml
+
+ # Download the topology file from VIRL session and rename it
+ scp ${SSH_OPTIONS} \
+ ${VIRL_USERNAME}@${VIRL_SERVER[${index}]}:/scratch/${VIRL_SID[${index}]}/topology.yaml \
+ topologies/enabled/topology${index}.yaml
+
+ retval=$?
+ if [ ${retval} -ne "0" ]; then
+ echo "Failed to copy topology file from VIRL simulation nr. ${index} on VIRL server ${VIRL_SERVER[${index}]}"
+ exit ${retval}
+ fi
+done
+
+echo ${VIRL_SID[@]}
+
+virtualenv --system-site-packages env
+. env/bin/activate
+
+echo pip install
+pip install -r ${SCRIPT_DIR}/requirements.txt
+
+for index in "${!VIRL_SERVER[@]}"; do
+ pykwalify -s ${SCRIPT_DIR}/resources/topology_schemas/3_node_topology.sch.yaml \
+ -s ${SCRIPT_DIR}/resources/topology_schemas/topology.sch.yaml \
+ -d ${SCRIPT_DIR}/topologies/enabled/topology${index}.yaml \
+ -vvv
+ if [ "$?" -ne "0" ]; then
+ echo "Topology${index} schema validation failed."
+ echo "However, the tests will start."
+ fi
+done
+
+function run_test_set() {
+ set +x
+ OLDIFS=$IFS
+ IFS=","
+ nr=$(echo $1)
+ rm -f ${LOG_PATH}/test_run${nr}.log
+ exec &> >(while read line; do echo "$(date +'%H:%M:%S') $line" \
+ >> ${LOG_PATH}/test_run${nr}.log; done;)
+ suite_str=""
+ for suite in ${TEST_GROUPS[${nr}]}; do
+ suite_str="${suite_str} --suite ${SUITE_PATH}.${suite}"
+ done
+ IFS=$OLDIFS
+
+ echo "PYTHONPATH=`pwd` pybot -L TRACE -W 136\
+ -v TOPOLOGY_PATH:${SCRIPT_DIR}/topologies/enabled/topology${nr}.yaml \
+ ${suite_str} \
+ --include vm_envAND3_node_single_link_topo \
+ --include vm_envAND3_node_double_link_topo \
+ --exclude PERFTEST \
+ --exclude ${SKIP_PATCH} \
+ --noncritical EXPECTED_FAILING \
+ --output ${LOG_PATH}/log_test_set_run${nr} \
+ tests/"
+
+ PYTHONPATH=`pwd` pybot -L TRACE -W 136\
+ -v TOPOLOGY_PATH:${SCRIPT_DIR}/topologies/enabled/topology${nr}.yaml \
+ ${suite_str} \
+ --include vm_envAND3_node_single_link_topo \
+ --include vm_envAND3_node_double_link_topo \
+ --exclude PERFTEST \
+ --exclude ${SKIP_PATCH} \
+ --noncritical EXPECTED_FAILING \
+ --output ${LOG_PATH}/log_test_set_run${nr} \
+ tests/
+
+ local_run_rc=$?
+ echo ${local_run_rc} > ${SHARED_MEMORY_PATH}/rc_test_run${nr}
+ set -x
+}
+
+set +x
+# Send to background an instance of the run_test_set() function for each number,
+# record the pid.
+for index in "${!VIRL_SERVER[@]}"; do
+ run_test_set ${index} &
+ pid=$!
+ echo "Sent to background: Test_set${index} (pid=$pid)"
+ pids[$pid]=$index
+done
+
+echo
+echo -n "Waiting..."
+
+# Watch the stable of background processes.
+# If a pid goes away, remove it from the array.
+while [ -n "${pids[*]}" ]; do
+ for i in $(seq 0 9); do
+ sleep 1
+ echo -n "."
+ done
+ for pid in "${!pids[@]}"; do
+ if ! ps "$pid" >/dev/null; then
+ echo -e "\n"
+ echo "Test_set${pids[$pid]} with PID $pid finished."
+ unset pids[$pid]
+ fi
+ done
+ if [ -z "${!pids[*]}" ]; then
+ break
+ fi
+ echo -n -e "\nStill waiting for test set(s): ${pids[*]} ..."
+done
+
+echo
+echo "All test set runs finished."
+echo
+
+set -x
+
+RC=0
+for index in "${!VIRL_SERVER[@]}"; do
+ echo "Test_set${index} log:"
+ cat ${LOG_PATH}/test_run${index}.log
+ RC_PARTIAL_RUN=$(cat ${SHARED_MEMORY_PATH}/rc_test_run${index})
+ RC=$((RC+RC_PARTIAL_RUN))
+ rm -f ${SHARED_MEMORY_PATH}/rc_test_run${index}
+ rm -f ${LOG_PATH}/test_run${index}.log
+ echo
+done
+
+# Log the final result
+if [ "${RC}" -eq "0" ]; then
+ set +x
+ echo
+ echo "========================================================================================================================================"
+ echo "Final result of all test loops: | PASS |"
+ echo "All critical tests have passed."
+ echo "========================================================================================================================================"
+ echo
+ set -x
+else
+ if [ "${RC}" -eq "1" ]; then
+ HLP_STR="test has"
+ else
+ HLP_STR="tests have"
+ fi
+ set +x
+ echo
+ echo "========================================================================================================================================"
+ echo "Final result of all test loops: | FAIL |"
+ echo "${RC} critical ${HLP_STR} failed."
+ echo "========================================================================================================================================"
+ echo
+ set -x
+fi
+
+echo Post-processing test data...
+
+partial_logs=""
+for index in "${!VIRL_SERVER[@]}"; do
+ partial_logs="${partial_logs} ${LOG_PATH}/log_test_set_run${index}.xml"
+done
+
+# Rebot output post-processing
+rebot --noncritical EXPECTED_FAILING \
+ --output output.xml ${partial_logs}