Use Ubuntu-20.04 for vpp-csit-* job executor images 00/31300/28
authorDave Wallace <dwallacelf@gmail.com>
Thu, 11 Feb 2021 21:44:59 +0000 (16:44 -0500)
committerDave Wallace <dwallacelf@gmail.com>
Sat, 20 Mar 2021 23:29:27 +0000 (19:29 -0400)
- refactor vpp-csit job names to conform to the vpp
  job nomenclature
- add ubuntu-20.04 for CSIT image builder install
  requirements
- fix executor class verification
- add option to build_executor_docker_image.sh to
  dump Dockerfile without building image(s)
- refactor Dockerfile generation to support
  executor classes
- add support for csit_shim and csit_dut docker
  image generation from csit ansible repo
- refactor docker installation
- refactor locale support
- remove old static csit Dockerfiles
- remove unused jenkins labels
- use csit-builder-{os}-prod-{executor_arch} label for
  vpp csit device test
- run tox in csit_pip_cache() to include pylint
  requirements in python cache to avoid downloading
  from pypi during job execution.

Change-Id: I5f4ffc587c6b8c43cb874dfd8c615ce56d3b97a4
Signed-off-by: Dave Wallace <dwallacelf@gmail.com>
29 files changed:
docker/README [deleted file]
docker/csit/csit-shim/Dockerfile [deleted file]
docker/csit/csit-shim/files/badkeypub [deleted file]
docker/csit/csit-shim/files/sshconfig [deleted file]
docker/csit/csit-shim/files/wrapdocker [deleted file]
docker/csit/csit-sut/Dockerfile [deleted file]
docker/csit/csit-sut/supervisord.conf [deleted file]
docker/scripts/README.md
docker/scripts/build_executor_docker_image.sh
docker/scripts/dbld_csit_install_packages.sh
docker/scripts/dbld_dump_build_logs.sh
docker/scripts/dbld_install_docker.sh [deleted file]
docker/scripts/dbld_lfit_requirements.sh
docker/scripts/dbld_vpp_install_packages.sh
docker/scripts/lib_apt.sh
docker/scripts/lib_common.sh
docker/scripts/lib_csit.sh
docker/scripts/lib_dnf.sh
docker/scripts/lib_vpp.sh
docker/scripts/lib_yum.sh
docker/scripts/update_dockerhub_prod_tags.sh
jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804-l.yaml [deleted file]
jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804-us.yaml [deleted file]
jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804arm-s.yaml [deleted file]
jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804arm-us.yaml [deleted file]
jenkins-config/clouds/nomad/FDIONOMAD/vpp-csit-arm-ubuntu18.yaml [deleted file]
jenkins-config/clouds/nomad/FDIONOMAD/vpp-csit-device.yaml [deleted file]
jenkins-config/clouds/nomad/FDIONOMAD/vpp-csit-ubuntu18.yaml [deleted file]
jjb/vpp/vpp.yaml

diff --git a/docker/README b/docker/README
deleted file mode 100644 (file)
index 71e7115..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-This directory contains the docker image definitions for executor images
-which run CI jobs on the FD.io Nomad cluster.
-
-The files in the following repositories were originally created by Ed Kern
-for the Nomad container proof of concept build infrastructure for FD.io
-projects. The original source files and commit history can be found in the
-respective repositories at
-
-  csit/csit-shim:  https://github.com/snergfdio/csit-shim
-  csit/csit-sut:   https://github.com/snergfdio/csit-sut
diff --git a/docker/csit/csit-shim/Dockerfile b/docker/csit/csit-shim/Dockerfile
deleted file mode 100644 (file)
index 288c49b..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-FROM ubuntu:18.04
-MAINTAINER Ed Kern <ejk@cisco.com>
-LABEL Description="CSIT shim container"
-LABEL Vendor="cisco.com"
-LABEL Version="1.2"
-
-# Setup the environment
-ENV DEBIAN_FRONTEND=noninteractive
-ENV NOTVISIBLE "in users profile"
-RUN echo "export VISIBLE=now" >> /etc/profile
-
-ADD files/wrapdocker /usr/local/bin/wrapdocker
-RUN chmod +x /usr/local/bin/wrapdocker
-
-# Install packages and Docker
-RUN apt-get -q update \
- && apt-get install -y -qq \
-        bash \
-        curl \
-        iproute2 \
-        locales \
-        ssh \
-        sudo \
-        tzdata \
-        uuid-runtime \
- && curl -fsSL https://get.docker.com | sh \
- && rm -rf /var/lib/apt/lists/*
-
-# Configure locales
-RUN locale-gen en_US
-
-RUN mkdir /var/run/sshd
-RUN echo 'root:Csit1234' | chpasswd
-RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
-
-# SSH login fix. Otherwise user is kicked off after login
-RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
-
-# Need volume for sidecar docker launches
-VOLUME /var/lib/docker
-
-# SSH to listen on port 6022 in shim
-RUN echo 'Port 6022' >>/etc/ssh/sshd_config
-RUN echo 'Port 6023' >>/etc/ssh/sshd_config
-ADD files/badkeypub /root/.ssh/authorized_keys
-ADD files/sshconfig /root/.ssh/config
-
-# Start sshd by default
-EXPOSE 22
-CMD ["/usr/sbin/sshd", "-D"]
diff --git a/docker/csit/csit-shim/files/badkeypub b/docker/csit/csit-shim/files/badkeypub
deleted file mode 100644 (file)
index 5742684..0000000
+++ /dev/null
@@ -1 +0,0 @@
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyUNd/iRk5Ajw4ZBB0gXyjzecEzQHh/MctgvHGJjasqJDkwYyZBrunUorOZ3n82W8VGdd5+eNINCWOM/ERjuaHjnutfade+ocPgZRdk+kEgTvetDVNWIgBd0PMVcnp57jJfx7CZVqTNgGeVQ8OJ2RbJGeOb/EKApQI74IPkAfc0PSieSw5gC0eqEOHb39Awgp0ycrzsUHF/OEicfCmo+6vvrMGenDe7frKUoTKYMWs7l3DOyFC8NaOxhGD3J1Ne5u3A/r4w6mN1HVI0rFwIcoms+t0B4lb2ODWKZiZikQdn8/eqwsmbSEZZsWN3FkshgjPS83+dNqVwB6pPY5Yqte7 ejk@bhima.local
diff --git a/docker/csit/csit-shim/files/sshconfig b/docker/csit/csit-shim/files/sshconfig
deleted file mode 100644 (file)
index 85ad6f7..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-Host 172.17.0.*
-       StrictHostKeyChecking no
-       UserKnownHostsFile=/dev/null
diff --git a/docker/csit/csit-shim/files/wrapdocker b/docker/csit/csit-shim/files/wrapdocker
deleted file mode 100644 (file)
index 2ca579f..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/bin/bash
-
-# Ensure that all nodes in /dev/mapper correspond to mapped devices currently loaded by the device-mapper kernel driver
-dmsetup mknodes
-
-# First, make sure that cgroups are mounted correctly.
-CGROUP=/sys/fs/cgroup
-: {LOG:=stdio}
-
-[ -d $CGROUP ] ||
-    mkdir $CGROUP
-
-mountpoint -q $CGROUP ||
-    mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
-        echo "Could not make a tmpfs mount. Did you use --privileged?"
-        exit 1
-    }
-
-if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
-then
-    mount -t securityfs none /sys/kernel/security || {
-        echo "Could not mount /sys/kernel/security."
-        echo "AppArmor detection and --privileged mode might break."
-    }
-fi
-
-# Mount the cgroup hierarchies exactly as they are in the parent system.
-for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
-do
-        [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
-        mountpoint -q $CGROUP/$SUBSYS ||
-                mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
-
-        # The two following sections address a bug which manifests itself
-        # by a cryptic "lxc-start: no ns_cgroup option specified" when
-        # trying to start containers withina container.
-        # The bug seems to appear when the cgroup hierarchies are not
-        # mounted on the exact same directories in the host, and in the
-        # container.
-
-        # Named, control-less cgroups are mounted with "-o name=foo"
-        # (and appear as such under /proc/<pid>/cgroup) but are usually
-        # mounted on a directory named "foo" (without the "name=" prefix).
-        # Systemd and OpenRC (and possibly others) both create such a
-        # cgroup. To avoid the aforementioned bug, we symlink "foo" to
-        # "name=foo". This shouldn't have any adverse effect.
-        echo $SUBSYS | grep -q ^name= && {
-                NAME=$(echo $SUBSYS | sed s/^name=//)
-                ln -s $SUBSYS $CGROUP/$NAME
-        }
-
-        # Likewise, on at least one system, it has been reported that
-        # systemd would mount the CPU and CPU accounting controllers
-        # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
-        # but on a directory called "cpu,cpuacct" (note the inversion
-        # in the order of the groups). This tries to work around it.
-        [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
-done
-
-# Note: as I write those lines, the LXC userland tools cannot setup
-# a "sub-container" properly if the "devices" cgroup is not in its
-# own hierarchy. Let's detect this and issue a warning.
-grep -q :devices: /proc/1/cgroup ||
-    echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
-grep -qw devices /proc/1/cgroup ||
-    echo "WARNING: it looks like the 'devices' cgroup is not mounted."
-
-# Now, close extraneous file descriptors.
-pushd /proc/self/fd >/dev/null
-for FD in *
-do
-    case "$FD" in
-    # Keep stdin/stdout/stderr
-    [012])
-        ;;
-    # Nuke everything else
-    *)
-        eval exec "$FD>&-"
-        ;;
-    esac
-done
-popd >/dev/null
-
-
-# If a pidfile is still around (for example after a container restart),
-# delete it so that docker can start.
-rm -rf /var/run/docker.pid
-
-# If we were given a PORT environment variable, start as a simple daemon;
-# otherwise, spawn a shell as well
-if [ "$PORT" ]
-then
-    exec dockerd -H 0.0.0.0:$PORT -H unix:///var/run/docker.sock \
-        $DOCKER_DAEMON_ARGS
-else
-    if [ "$LOG" == "file" ]
-    then
-        dockerd $DOCKER_DAEMON_ARGS &>/var/log/docker.log &
-    else
-        dockerd $DOCKER_DAEMON_ARGS &
-    fi
-    (( timeout = 60 + SECONDS ))
-    until docker info >/dev/null 2>&1
-    do
-        if (( SECONDS >= timeout )); then
-            echo 'Timed out trying to connect to internal docker host.' >&2
-            break
-        fi
-        sleep 1
-    done
-    [[ $1 ]] && exec "$@"
-    exec bash --login
-fi
diff --git a/docker/csit/csit-sut/Dockerfile b/docker/csit/csit-sut/Dockerfile
deleted file mode 100644 (file)
index ba212dc..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM ubuntu:18.04
-MAINTAINER csit-dev <csit-dev@lists.fd.io>
-LABEL Description="CSIT vpp-device ubuntu 18.04 SUT image"
-LABEL Version="0.7"
-
-# Setup the environment
-ENV DEBIAN_FRONTEND=noninteractive
-ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8'
-ENV NOTVISIBLE "in users profile"
-ENV VPP_PYTHON_PREFIX=/var/cache/vpp/python
-
-# Install packages and Docker
-RUN apt-get -q update \
- && apt-get install -y -qq \
-        # general tools
-        apt-transport-https \
-        bridge-utils \
-        cloud-init \
-        curl \
-        gdb \
-        locales \
-        net-tools \
-        openssh-server \
-        pciutils \
-        rsyslog \
-        software-properties-common \
-        ssh \
-        sudo \
-        supervisor \
-        tar \
-        vim \
-        wget \
-        # csit requirements
-        cmake \
-        dkms \
-        gfortran \
-        libblas-dev \
-        libffi-dev \
-        liblapack-dev \
-        libpcap-dev \
-        libssl-dev \
-        python-all \
-        python-apt \
-        python-cffi \
-        python-cffi-backend \
-        python-dev \
-        python-enum34 \
-        python-pip \
-        python-setuptools \
-        python-virtualenv \
-        python3-all \
-        python3-apt \
-        python3-cffi \
-        python3-cffi-backend \
-        python3-dev \
-        python3-pip \
-        python3-setuptools \
-        python3-virtualenv \
-        qemu-system \
-        socat \
-        strongswan \
-        unzip \
-        tcpdump \
-        zlib1g-dev \
-        # vpp requirements
-        ca-certificates \
-        libapr1 \
-        libmbedcrypto1 \
-        libmbedtls10 \
-        libmbedx509-0 \
-        libnuma1 \
-        sshpass \
- && curl -L https://packagecloud.io/fdio/master/gpgkey | sudo apt-key add - \
- && curl -s https://packagecloud.io/install/repositories/fdio/master/script.deb.sh | sudo bash \
- # temp hack due to build.sh
- && apt-get install -y -qq vpp-ext-deps \
- && curl -fsSL https://get.docker.com | sh \
- && rm -rf /var/lib/apt/lists/*
-
-# Configure locales
-RUN locale-gen en_US.UTF-8 \
- && dpkg-reconfigure locales
-
-# Fix permissions
-RUN chown root:syslog /var/log \
- && chmod 755 /etc/default
-
-# Create directory structure
-RUN mkdir -p /tmp/dumps \
- && mkdir -p /var/cache/vpp/python \
- && mkdir -p /var/run/sshd
-
-# CSIT PIP pre-cache
-RUN pip3 install \
-        ecdsa==0.13.3 \
-        paramiko==2.6.0 \
-        pycrypto==2.6.1 \
-        pypcap==1.2.3 \
-        PyYAML==5.1.1 \
-        requests==2.22.0 \
-        robotframework==3.1.2 \
-        scapy==2.4.3 \
-        scp==0.13.2 \
-        ansible==2.7.8 \
-        dill==0.2.8.2 \
-        numpy==1.17.3 \
-        hdrhistogram==0.6.1 \
-        pandas==0.25.3 \
-        plotly==4.1.1 \
-        PTable==0.9.2 \
-        Sphinx==2.2.1 \
-        sphinx-rtd-theme==0.4.0 \
-        sphinxcontrib-programoutput==0.15 \
-        sphinxcontrib-robotdoc==0.11.0 \
-        alabaster==0.7.12 \
-        Babel==2.7.0 \
-        bcrypt==3.1.7 \
-        certifi==2019.9.11 \
-        cffi==1.13.2 \
-        chardet==3.0.4 \
-        cryptography==2.8 \
-        docutils==0.15.2 \
-        future==0.18.2 \
-        idna==2.8 \
-        imagesize==1.1.0 \
-        Jinja2==2.10.3 \
-        MarkupSafe==1.1.1 \
-        packaging==19.2 \
-        pbr==5.4.3 \
-        pycparser==2.19 \
-        Pygments==2.4.2 \
-        PyNaCl==1.3.0 \
-        pyparsing==2.4.4 \
-        python-dateutil==2.8.1 \
-        pytz==2019.3 \
-        retrying==1.3.3 \
-        six==1.13.0 \
-        snowballstemmer==2.0.0 \
-        sphinxcontrib-applehelp==1.0.1 \
-        sphinxcontrib-devhelp==1.0.1 \
-        sphinxcontrib-htmlhelp==1.0.2 \
-        sphinxcontrib-jsmath==1.0.1 \
-        sphinxcontrib-qthelp==1.0.2 \
-        sphinxcontrib-serializinghtml==1.1.3 \
-        urllib3==1.25.6
-
-# ARM workaround
-RUN pip3 install scipy==1.1.0
-
-# SSH settings
-RUN echo 'root:Csit1234' | chpasswd \
- && sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config \
- && sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd \
- && echo "export VISIBLE=now" >> /etc/profile
-
-EXPOSE 2222
-
-COPY supervisord.conf /etc/supervisor/supervisord.conf
-
-CMD ["sh", "-c", "rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api; /usr/bin/supervisord -c /etc/supervisor/supervisord.conf; /usr/sbin/sshd -D -p 2222"]
diff --git a/docker/csit/csit-sut/supervisord.conf b/docker/csit/csit-sut/supervisord.conf
deleted file mode 100644 (file)
index 4a6fe96..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-[unix_http_server]
-file = /tmp/supervisor.sock
-chmod = 0777
-
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
-[supervisorctl]
-serverurl = unix:///tmp/supervisor.sock
-
-[supervisord]
-pidfile = /tmp/supervisord.pid
-identifier = supervisor
-directory = /tmp
-logfile = /tmp/supervisord.log
-loglevel = debug
-nodaemon = false
-
-[program:vpp]
-command = /usr/bin/vpp -c /etc/vpp/startup.conf
-autostart = false
-autorestart = true
-redirect_stderr = true
-priority = 1
index 1a83a24..2c98aa9 100644 (file)
@@ -262,7 +262,6 @@ script will be written to automate the process.
 - fdiotools/builder-debian10
 - fdiotools/builder-ubuntu1804
 - fdiotools/builder-ubuntu2004
-- fdiotools/csit-ubuntu1804
 - fdiotools/csit_dut-ubuntu1804
 - fdiotools/csit_shim-ubuntu1804
 
@@ -291,7 +290,6 @@ Jenkins-Nomad Label.
 
 - class
 -- builder
--- csit
 -- csit_dut
 -- csit_shim
 
index 2eac97d..17118be 100755 (executable)
@@ -1,6 +1,6 @@
 #! /bin/bash
 
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 set -euxo pipefail
 
 # Log all output to stdout & stderr to a log file
-logname="/tmp/$(basename $0).$(date +%Y_%m_%d_%H%M%S).log"
+export DOCKER_DATE=${DOCKER_DATE:-"$(date -u +%Y_%m_%d_%H%M%S_UTC)"}
+logname="/tmp/$(basename $0).${DOCKER_DATE}.log"
 echo -e "\n*** Logging output to $logname ***\n\n"
 exec > >(tee -a $logname) 2>&1
 
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
-. $CIMAN_DOCKER_SCRIPTS/lib_vpp.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_csit.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_apt.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_yum.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_dnf.sh
+. "$CIMAN_DOCKER_SCRIPTS/lib_vpp.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_csit.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_apt.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_yum.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_dnf.sh"
 
 all_os_names=""
 ci_tag=""
 ci_image=""
 os_names=""
 push_to_docker_hub=""
+dump_dockerfile=""
 
 usage() {
     set +x
@@ -40,6 +42,7 @@ usage() {
     echo "  -a            Run all OS's supported on class $EXECUTOR_CLASS & arch $OS_ARCH"
     echo "  -c <class>    Default is '$EXECUTOR_DEFAULT_CLASS'"
     executor_list_classes
+    echo "  -d            Generate Dockerfile, dump it to stdout, and exit"
     echo "  -p            Push docker images to Docker Hub"
     echo "  -r <role>     Add a role based tag (e.g. sandbox-x86_64):"
     executor_list_roles
@@ -47,10 +50,10 @@ usage() {
     exit 1
 }
 
-must_be_run_as_root
-while getopts ":ahpc:r:" opt; do
+must_be_run_as_root_or_docker_group
+while getopts ":ac:dhpr:" opt; do
     case "$opt" in
-        a)  all_os_names=1 ;;
+        a)  all_os_names="1" ;;
         c) if executor_verify_class "$OPTARG" ; then
                EXECUTOR_CLASS="$OPTARG"
                EXECUTOR_CLASS_ARCH="$EXECUTOR_CLASS-$OS_ARCH"
@@ -58,8 +61,9 @@ while getopts ":ahpc:r:" opt; do
                echo "ERROR: Invalid executor class '$OPTARG'!"
                usage
            fi ;;
+        d) dump_dockerfile="1"; set +x ;;
         h) usage ;;
-        p) push_to_docker_hub=1 ;;
+        p) push_to_docker_hub="1" ;;
         r) if executor_verify_role "$OPTARG" ; then
                ci_tag="${OPTARG}-$OS_ARCH"
            else
@@ -87,6 +91,16 @@ if [ -z "$os_names" ] ; then
     echo "ERROR: Missing executor OS name(s) for class '$EXECUTOR_CLASS'!"
     usage
 fi
+for executor_os_name in $os_names ; do
+    if ! executor_verify_os_name "$executor_os_name" ; then
+        set_opts="$-"
+        set +x # disable trace output
+        echo "ERROR: Invalid executor OS name for class '$EXECUTOR_CLASS': $executor_os_name!"
+        executor_list_os_names
+        echo
+        exit 1
+    fi
+done
 
 # Build the specified docker images
 docker_build_setup_ciman
@@ -99,49 +113,47 @@ for executor_os_name in $os_names ; do
     repository="fdiotools/${EXECUTOR_CLASS}-${os_name//.}"
     executor_docker_image="$repository:$DOCKER_TAG"
 
-    if ! executor_verify_os_name "$executor_os_name" ; then
-        set_opts=$-
-        grep -q x <<< $set_opts && set +x # disable undefined variable check
-        echo "WARNING: Invalid executor OS name for class '$EXECUTOR_CLASS': $executor_os_name!"
-        executor_list_os_names
-        echo
-        grep -q x <<< $set_opts && set -x # re-enable undefined variable check
-        continue
-    fi
     case "$executor_os_name" in
         ubuntu*)
-            generate_apt_dockerfile $executor_os_name $docker_from_image \
-                                    $executor_docker_image ;;
+            generate_apt_dockerfile "$EXECUTOR_CLASS" "$executor_os_name" \
+                                    "$docker_from_image" "$executor_docker_image" ;;
         debian*)
-            generate_apt_dockerfile $executor_os_name $docker_from_image \
-                                    $executor_docker_image ;;
+            generate_apt_dockerfile "$EXECUTOR_CLASS" "$executor_os_name" \
+                                    "$docker_from_image" "$executor_docker_image" ;;
         centos-7)
-            generate_yum_dockerfile $executor_os_name $docker_from_image \
-                                    $executor_docker_image ;;
+            generate_yum_dockerfile "$EXECUTOR_CLASS" "$executor_os_name" \
+                                    "$docker_from_image" "$executor_docker_image" ;;
         centos-8)
-            generate_dnf_dockerfile $executor_os_name $docker_from_image \
-                                    $executor_docker_image ;;
+            generate_dnf_dockerfile "$EXECUTOR_CLASS" "$executor_os_name" \
+                                    "$docker_from_image" "$executor_docker_image" ;;
         *)
-            echo "ERROR: Don't know how to generate dockerfile for $executor_os_name!"
+            echo "ERROR: Don't know how to generate dockerfile for OS $executor_os_name!"
             usage ;;
     esac
 
-    docker build -t $executor_docker_image $DOCKER_BUILD_DIR
-    rm -f $DOCKERFILE
-    if [ -n "$ci_tag" ] ; then
-        ci_image="$repository:$ci_tag"
-        echo -e "\nAdding docker tag $ci_image to $executor_docker_image"
-        docker tag $executor_docker_image $ci_image
-    fi
-    if [ -n "$push_to_docker_hub" ] ; then
-        echo -e "\nPushing $executor_docker_image to Docker Hub..."
-        docker login
-        docker push $executor_docker_image
-        if [ -n "$ci_image" ] ; then
-            echo -e "\nPushing $ci_image to Docker Hub..."
-            docker push $ci_image
+    if [ -n "$dump_dockerfile" ] ; then
+        line="==========================================================================="
+        echo -e "\nDockerfile for '$EXECUTOR_CLASS' executor docker image on OS '$executor_os_name':\n$line"
+        cat "$DOCKERFILE"
+        echo -e "$line\n"
+    else
+        docker build -t "$executor_docker_image" "$DOCKER_BUILD_DIR"
+        rm -f "$DOCKERFILE"
+        if [ -n "$ci_tag" ] ; then
+            ci_image="$repository:$ci_tag"
+            echo -e "\nAdding docker tag $ci_image to $executor_docker_image"
+            docker tag "$executor_docker_image" "$ci_image"
+        fi
+        if [ -n "$push_to_docker_hub" ] ; then
+            echo -e "\nPushing $executor_docker_image to Docker Hub..."
+            docker login
+            docker push "$executor_docker_image"
+            if [ -n "$ci_image" ] ; then
+                echo -e "\nPushing $ci_image to Docker Hub..."
+                docker push "$ci_image"
+            fi
         fi
     fi
 done
 
-echo -e "\n$(basename $BASH_SOURCE) COMPLETE!\nHave a great day! :D"
+echo -e "\n$(basename $BASH_SOURCE) COMPLETE\nHave a great day! :D"
index d179b78..d8ce597 100755 (executable)
@@ -1,6 +1,6 @@
 #! /bin/bash
 
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 set -euxo pipefail
 
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
-. $CIMAN_DOCKER_SCRIPTS/lib_csit.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_vpp.sh
+. "$CIMAN_DOCKER_SCRIPTS/lib_csit.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_vpp.sh"
 
-must_be_run_as_root
 must_be_run_in_docker_build
 
-case "$OS_NAME" in
-    ubuntu-18.04)
-        supported_os="true" ;;
-    *)
-        supported_os="" ;;
-esac
-if [ -z "$supported_os" ] ; then
-    echo "CSIT is not supported on $OS_NAME. Skipping CSIT package install..."
+echo_log
+
+if ! csit_supported_executor_class "$FDIOTOOLS_EXECUTOR_CLASS" ; then
+    echo_log "CSIT is not supported on executor class '$FDIOTOOLS_EXECUTOR_CLASS'. Skipping $(basename $0)..."
+    exit 0
+elif ! csit_supported_os "$OS_NAME" ; then
+    echo_log "CSIT is not supported on OS '$OS_NAME'. Skipping $(basename $0)..."
     exit 0
+else
+    echo_log "Starting  $(basename $0)"
 fi
 
-echo_log
-echo_log "Starting  $(basename $0)"
-
 do_git_config csit
 for vpp_branch in ${VPP_BRANCHES[$OS_NAME]} ; do
     # Returns checked out branch in csit_branch
-    csit_checkout_branch_for_vpp $vpp_branch
+    csit_checkout_branch_for_vpp "$vpp_branch"
 
     # Install csit OS packages
-    csit_install_packages $csit_branch
+    csit_install_packages "$csit_branch"
 
     # Install/cache python packages
-    csit_pip_cache $csit_branch
+    csit_pip_cache "$csit_branch"
 done
 
 echo_log -e "Completed $(basename $0)!\n\n=========="
index 212e095..443d538 100755 (executable)
@@ -1,6 +1,6 @@
 #! /bin/bash
 
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -17,14 +17,13 @@ set -euxo pipefail
 
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
 export CIMAN_ROOT=${CIMAN_ROOT:-"$(dirname $(dirname $CIMAN_DOCKER_SCRIPTS))"}
-. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh"
 
-must_be_run_as_root
 must_be_run_in_docker_build
 
 dump_build_logs() {
-    local set_opts=$-
-    grep -q e <<< $set_opts && set +e # disable exit on errors
+    local set_opts="$-"
+    set +e # disable exit on errors
 
     # Find errors
     local found="$(grep -nisH error $DOCKER_BUILD_LOG_DIR/*-bld.log)"
@@ -42,13 +41,13 @@ dump_build_logs() {
         echo -e "\nNo warnings found in build logs\n"
     fi
 
-    grep -q e <<< $set_opts && set -e # re-enable exit on errors
+    grep -q e <<< "$set_opts" && set -e # re-enable exit on errors
 }
 
 dump_cache_files() {
     local cache_files_log="$DOCKER_BUILD_LOG_DIR/cached_files.json"
     tree -a --timefmt "+%Y-%m-%d %H:%M:%S" --prune /root
-    tree -afJ --timefmt "+%Y-%m-%d %H:%M:%S" --prune -o $cache_files_log /root
+    tree -afJ --timefmt "+%Y-%m-%d %H:%M:%S" --prune -o "$cache_files_log" /root
 }
 
 dump_cache_files
diff --git a/docker/scripts/dbld_install_docker.sh b/docker/scripts/dbld_install_docker.sh
deleted file mode 100755 (executable)
index de10283..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-#! /bin/bash
-
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euxo pipefail
-
-export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
-. $CIMAN_DOCKER_SCRIPTS/lib_apt.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_yum.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_dnf.sh
-
-must_be_run_as_root
-must_be_run_in_docker_build
-
-echo_log
-echo_log "Starting  $(basename $0)"
-
-case "$DOCKERFILE_FROM" in
-    *ubuntu*)
-        write_apt_ubuntu_docker_gpg_keyfile
-        apt_install_docker_os_package_dependancies
-        apt_install_docker $DOCKER_APT_UBUNTU_DOCKER_GPGFILE ;;
-    *debian*)
-        write_apt_debian_docker_gpg_keyfile
-        apt_install_docker_os_package_dependancies
-        apt_install_docker $DOCKER_APT_DEBIAN_DOCKER_GPGFILE ;;
-    *centos:7)
-        yum_install_docker_os_package_dependancies
-        yum_install_docker ;;
-    *centos:8)
-        dnf_install_docker_os_package_dependancies
-        dnf_install_docker ;;
-esac
-
-echo_log -e "Completed $(basename $0)!\n\n=========="
index 4b02e24..7067d99 100755 (executable)
 set -euxo pipefail
 
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
-. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh"
 
-must_be_run_as_root
 must_be_run_in_docker_build
 
 # Add packagecloud files
-cat <<EOF > /root/.packagecloud
+cat <<EOF >/root/.packagecloud
 {"url":"https://packagecloud.io","token":"\$token"}
 EOF
 cat <<EOF >/root/packagecloud_api
@@ -33,7 +32,7 @@ EOF
 
 # Copy lf-env.sh for LF Releng scripts
 lf_env_sh="/root/lf-env.sh"
-cp $DOCKER_CIMAN_ROOT/global-jjb/jenkins-init-scripts/lf-env.sh $lf_env_sh
+cp "$DOCKER_CIMAN_ROOT/global-jjb/jenkins-init-scripts/lf-env.sh" "$lf_env_sh"
 chmod 644 "$lf_env_sh"
 cat <<EOF >>"$lf_env_sh"
 
@@ -43,16 +42,16 @@ cat <<EOF >>"$lf_env_sh"
 unset -f lf-activate-venv
 lf-activate-venv() {
     echo "\${FUNCNAME[0]}(): INFO: Adding $LF_VENV/bin to PATH"
-    PATH="$LF_VENV/bin:$PATH"
+    PATH="\$LF_VENV/bin:\$PATH"
     return 0
 }
 EOF
 
 # Install lftools & boto3 for log / artifact upload.
 python3 -m pip install boto3
-mkdir -p $LF_VENV
+mkdir -p "$LF_VENV"
 OLD_PATH="$PATH"
-python3 -m venv $LF_VENV
+python3 -m venv "$LF_VENV"
 PATH="$LF_VENV/bin:$PATH"
 python3 -m pip install --upgrade pip
 python3 -m pip install --upgrade --upgrade-strategy eager lftools
index f18ef75..e0d1f27 100755 (executable)
@@ -1,6 +1,6 @@
 #! /bin/bash
 
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 set -euxo pipefail
 
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
-. $CIMAN_DOCKER_SCRIPTS/lib_vpp.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_apt.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_yum.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_dnf.sh
+. "$CIMAN_DOCKER_SCRIPTS/lib_vpp.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_apt.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_yum.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_dnf.sh"
 
-must_be_run_as_root
 must_be_run_in_docker_build
 
 echo_log
-echo_log "Starting  $(basename $0)"
+if ! vpp_supported_executor_class "$FDIOTOOLS_EXECUTOR_CLASS" ; then
+    echo_log "VPP is not supported on executor class '$FDIOTOOLS_EXECUTOR_CLASS'. Skipping $(basename $0)..."
+    exit 0
+else
+    echo_log "Starting  $(basename $0)"
+fi
 
 do_git_config vpp
 for branch in ${VPP_BRANCHES[$OS_NAME]} ; do
-    do_git_branch $branch
+    do_git_branch "$branch"
 
     # Install OS packages
-    make_vpp "install-dep" $branch
-    make_vpp "centos-pyyaml" $branch # VPP Makefile tests for centos versions
+    make_vpp "install-dep" "$branch"
+    make_vpp "centos-pyyaml" "$branch" # VPP Makefile tests for centos versions
     if [ "$OS_ID" = "ubuntu" ] ; then
         # TODO: fix VPP stable/2005 bug in sphinx-make.sh
         #       which fails on 'yum install python3-venv'
         #       that does not exist.
         # 'Make docs jobs are only run on ubuntu executors
         #  so only run for ubuntu build executors until fixed.
-        make_vpp "docs-venv" $branch
+        make_vpp "docs-venv" "$branch"
     elif [ "$OS_NAME" = "debian-9" ] ; then
         apt_override_cmake_install_with_pip3_version
     fi
 
     # Download, build, and cache external deps packages
-    make_vpp "install-ext-deps" $branch
+    make_vpp "install-ext-deps" "$branch"
     set +e
     vpp_ext_dir="$DOCKER_VPP_DIR/build/external"
     [ -d "$vpp_ext_dir/downloads" ] \
-        && rsync -ac $vpp_ext_dir/downloads/. $DOCKER_DOWNLOADS_DIR
+        && rsync -ac "$vpp_ext_dir/downloads/." "$DOCKER_DOWNLOADS_DIR"
     [ -n "$(ls $vpp_ext_dir/*.deb)" ] \
-        && rsync -ac $vpp_ext_dir/*.deb $DOCKER_DOWNLOADS_DIR
+        && rsync -ac "$vpp_ext_dir/*.deb" "$DOCKER_DOWNLOADS_DIR"
     [ -n "$(ls $vpp_ext_dir/*.rpm)" ] \
-        && rsync -ac $vpp_ext_dir/*.rpm $DOCKER_DOWNLOADS_DIR
+        && rsync -ac "$vpp_ext_dir/*.rpm" "$DOCKER_DOWNLOADS_DIR"
     set -e
 
     # Install/cache python packages
     if [ "$OS_ID" = "ubuntu" ] ; then
-        make_vpp_test "test-dep" $branch
-        make_vpp_test "doc" $branch
-        make_vpp test-wipe $branch
-        make_vpp "bootstrap-doxygen" $branch
+        make_vpp_test "test-dep" "$branch"
+        make_vpp_test "doc" "$branch"
+        make_vpp test-wipe "$branch"
+        make_vpp "bootstrap-doxygen" "$branch"
     fi
 
     # Dump packages installed
     case "$DOCKERFILE_FROM" in
         *ubuntu*)
-            dump_apt_package_list $branch ;;
+            dump_apt_package_list "$branch" ;;
         *debian*)
-            dump_apt_package_list $branch ;;
+            dump_apt_package_list "$branch" ;;
         *centos:7)
-            dump_yum_package_list $branch ;;
+            dump_yum_package_list "$branch" ;;
         *centos:8)
-            dump_dnf_package_list $branch ;;
+            dump_dnf_package_list "$branch" ;;
     esac
 done
 
index 40b5211..8994a78 100644 (file)
@@ -1,7 +1,7 @@
 # lib_apt.sh - Docker build script apt library.
 #              For import only.
 
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -21,7 +21,8 @@ fi
 alias lib_apt_imported=true
 
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
-. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_csit.sh"
 
 dump_apt_package_list() {
     branchname="$(echo $branch | sed -e 's,/,_,')"
@@ -34,16 +35,6 @@ apt_install_packages() {
             --allow-change-held-packages $@
 }
 
-apt_install_docker_os_package_dependancies() {
-    # Assumes 'apt-get update -q' has aready been run.
-    apt_install_packages \
-        apt-transport-https \
-        ca-certificates \
-        curl \
-        gnupg-agent \
-        software-properties-common
-}
-
 # Used for older OS distro's which are incompatible
 # with modern distro cmake vesrion
 apt_override_cmake_install_with_pip3_version() {
@@ -51,45 +42,20 @@ apt_override_cmake_install_with_pip3_version() {
     local os_cmake_ver="$($os_cmake --version | head -1)"
     local pip3_cmake="/usr/local/bin/cmake"
 
-    python3 -m pip install --disable-pip-version-check cmake || true
+    python3 -m pip --disable-pip-version-check install cmake || true
     local pip3_cmake_ver="$($pip3_cmake --version | head -1)"
     echo_log "Overriding $OS_NAME '$os_cmake_ver' with '$pip3_cmake_ver'!"
-    sudo apt-get remove -y cmake --autoremove || true
+    apt-get remove -y cmake --autoremove || true
     update-alternatives --quiet --remove-all cmake || true
-    update-alternatives --quiet --install $os_cmake cmake $pip3_cmake 100
+    update-alternatives --quiet --install "$os_cmake" cmake "$pip3_cmake" 100
     echo_log "Default cmake ($(which cmake)) version: '$(cmake --version | head -1)'!"
 }
 
-apt_install_docker() {
-    local apt_docker_gpg_key_file=$1
-    apt-key add $apt_docker_gpg_key_file
-    add-apt-repository "deb [arch=$DEB_ARCH] \
-    https://download.docker.com/linux/$OS_ID \
-    $(lsb_release -cs) stable"
-    apt-get update -q
-    apt_install_packages -y -qq docker-ce docker-ce-cli containerd.io
-    rm -rf $DOCKER_GPG_KEY_DIR
-}
-
-generate_apt_dockerfile() {
-    local executor_os_name=$1
-    local from_image=$2
-    local executor_image=$3
-    local vpp_install_skip_sysctl_envvar="";
+generate_apt_dockerfile_common() {
+    local executor_class="$1"
+    local executor_image="$2"
 
-    if grep -q debian-9  <<<$executor_os_name ; then
-        # Workaround to VPP package installation failure on debian-9
-        vpp_install_skip_sysctl_envvar="ENV VPP_INSTALL_SKIP_SYSCTL=1"
-    fi
-    cat <<EOF  >$DOCKERIGNOREFILE
-**/__pycache__
-*.pyc
-EOF
-    cat <<EOF >$DOCKERFILE
-FROM $from_image AS executor-image
-LABEL Description="FD.io CI executor docker image for $executor_os_name/$OS_ARCH"
-LABEL Vendor="fd.io"
-LABEL Version="$DOCKER_TAG"
+    cat <<EOF >>"$DOCKERFILE"
 
 # Create download dir to cache external tarballs
 WORKDIR $DOCKER_DOWNLOADS_DIR
@@ -100,250 +66,330 @@ WORKDIR $DOCKER_BUILD_DIR
 COPY . .
 
 # Build Environment Variables
-ENV DEBIAN_FRONTEND=noninteractive
-ENV FDIOTOOLS_IMAGE=$executor_image
-ENV LC_ALL=C.UTF-8
+ENV DEBIAN_FRONTEND="noninteractive"
+ENV FDIOTOOLS_IMAGE="$executor_image"
+ENV FDIOTOOLS_EXECUTOR_CLASS="$executor_class"
 ENV CIMAN_ROOT="$DOCKER_CIMAN_ROOT"
-ENV PATH=$PATH:$DOCKER_CIMAN_ROOT/docker/scripts
-ENV LF_VENV="/root/lf-venv"
+ENV PATH="\$PATH:$DOCKER_CIMAN_ROOT/docker/scripts"
+
+# Configure locales
+RUN apt-get update -qq \\
+  && apt-get install -y \\
+        apt-utils \\
+        locales \\
+  && sed -i 's/# \(en_US\.UTF-8 .*\)/\1/' /etc/locale.gen \\
+  && locale-gen en_US.UTF-8 \\
+  && dpkg-reconfigure --frontend=noninteractive locales \\
+  && update-locale LANG=en_US.UTF-8 \\
+  && TZ=Etc/UTC && ln -snf /usr/share/zoneinfo/\$TZ /etc/localtime && echo \$TZ > /etc/timezone \\
+  && rm -r /var/lib/apt/lists/*
+ENV LANG="en_US.UTF-8" LANGUAGE="en_US" LC_ALL="en_US.UTF-8"
 
 # Install baseline packages (minimum build & utils).
 #
 # ci-management global-jjb requirements:
-#       facter
-#       python3-pip
-#       python3-venv
-#   for lftools:
-#       xmlstarlet
-#       libxml2-dev
-#       libxslt-dev
+#        facter
+#        python3-pip
+#        python3-venv
+#    for lftools:
+#        xmlstarlet
+#        libxml2-dev
+#        libxslt-dev
 #   from packer/provision/baseline.sh:
-#       unzip
-#       xz-utils
-#       git
-#       git-review
-#       libxml2-dev
-#       libxml-xpath-perl
-#       libxslt-dev
-#       make
-#       wget
-#       jq
+#        unzip
+#        xz-utils
+#        git
+#        git-review
+#        libxml2-dev
+#        libxml-xpath-perl
+#        libxslt-dev
+#        make
+#        wget
+#        jq
 #
 # Python build from source requirements:
-#       build-essential
+#        build-essential
 #
-# TODO: Fix broken project requirement install targets
+# TODO:  Fix broken project requirement install targets
+#        graphviz         for 'make bootstrap-doxygen' (VPP)
+#        doxygen          for 'make doxygen' (VPP)
+#        enchant          for 'make docs' (VPP)
+#        libffi-dev       for python cffi install (Ubuntu20.04/VPP/aarch64)
+#        liblapack-dev    for python numpy/scipy (CSIT/aarch64)
+#        libopenblas-dev  for python numpy/scipy (CSIT/aarch64)
+#        libpcap-dev      for python pypcap install (CSIT)
+#        sshpass          for CSIT jobs
 #
-#   graphviz         for 'make bootstrap-doxygen' (VPP)
-#   doxygen          for 'make doxygen' (VPP)
-#   enchant          for 'make docs' (VPP)
-#   libffi-dev       for python cffi install (Ubuntu20.04/VPP/aarch64)
-#   liblapack-dev    for python numpy/scipy (CSIT/aarch64)
-#   libopenblas-dev  for python numpy/scipy (CSIT/aarch64)
-#   libpcap-dev      for python pypcap install (CSIT)
-#   sshpass          for CSIT jobs
+#        From .../csit/resources/tools/presentation/run_report_*.sh:
+#        libxml2
+#        libxml2-dev
+#        libxslt-dev
+#        build-essential
+#        zlib1g-dev
+#        unzip
+#        xvrb
+#        texlive-latex-recommended
+#        texlive-fonts-recommended
+#        texlive-fonts-extra
+#        texlive-latex-extra
+#        latexmk
+#        wkhtmltopdf
+#        inkscape
 #
-RUN apt-get update -q \\
-    && apt-get install -y -qq \\
-        apt-utils \\
-        default-jdk \\
-        default-jre \\
-        doxygen \\
-        enchant \\
-        emacs \\
-        facter \\
-        gawk \\
-        gdb \\
-        gfortran \\
-        git \\
-        git-review \\
-        graphviz \\
-        iproute2 \\
-        iputils-clockdiff \\
-        iputils-ping \\
-        iputils-tracepath \\
-        jq \\
-        libffi-dev \\
-        liblapack-dev \\
-        libopenblas-dev \\
-        libpcap-dev \\
-        libxml2-dev \\
-        libxml-xpath-perl \\
-        libxslt-dev \\
-        make \\
-        python3-pip \\
-        python3-venv \\
-        rsync \\
-        ruby-dev \\
-        sshpass \\
-        sudo \\
-        traceroute \\
-        tree \\
-        vim \\
-        wget \\
-        xmlstarlet \\
-        xz-utils \\
-    && rm -r /var/lib/apt/lists/*
+RUN apt-get update -qq \\
+  && apt-get install -y \\
+             apt-transport-https \\
+             curl \\
+             ca-certificates \\
+             default-jdk \\
+             default-jre \\
+             dnsutils \\
+             doxygen \\
+             enchant \\
+             emacs \\
+             facter \\
+             gawk \\
+             gdb \\
+             gfortran \\
+             git \\
+             git-review \\
+             gnupg-agent \\
+             graphviz \\
+             inkscape \\
+             iproute2 \\
+             iputils-clockdiff \\
+             iputils-ping \\
+             iputils-tracepath \\
+             jq \\
+             latexmk \\
+             libffi-dev \\
+             liblapack-dev \\
+             libopenblas-dev \\
+             libpcap-dev \\
+             libxml2 \\
+             libxml2-dev \\
+             libxml-xpath-perl \\
+             libxslt-dev \\
+             make \\
+             python3-pip \\
+             python3-venv \\
+             rsync \\
+             ruby-dev \\
+             software-properties-common \\
+             sshpass \\
+             sudo \\
+             texlive-fonts-extra \\
+             texlive-fonts-recommended \\
+             texlive-latex-extra \\
+             texlive-latex-recommended \\
+             traceroute \\
+             tree \\
+             unzip \\
+             vim \\
+             wget \\
+             wkhtmltopdf \\
+             xmlstarlet \\
+             xvfb \\
+             xz-utils \\
+             zlib1g-dev \\
+  && curl -L https://packagecloud.io/fdio/master/gpgkey | apt-key add - \\
+  && curl -s https://packagecloud.io/install/repositories/fdio/master/script.deb.sh | bash \\
+  && curl -fsSL https://get.docker.com | sh \\
+  && rm -r /var/lib/apt/lists/*
 
 # Install packages for all project branches
 #
-RUN apt-get update -q \\
-    && dbld_install_docker.sh \\
-    && dbld_vpp_install_packages.sh \\
-    && dbld_csit_install_packages.sh \\
-    && dbld_lfit_requirements.sh \\
-    && rm -r /var/lib/apt/lists/*
+RUN apt-get update -qq \\
+  && dbld_vpp_install_packages.sh \\
+  && dbld_csit_install_packages.sh \\
+  && rm -r /var/lib/apt/lists/*
+EOF
+}
+
+generate_apt_dockerfile_clean() {
+    cat <<EOF >>"$DOCKERFILE"
+
+# Clean up copy-in build tree
+RUN dbld_dump_build_logs.sh \\
+  && rm -rf "/tmp/*" "$DOCKER_BUILD_FILES_DIR"
+EOF
+}
+
+# Generate 'builder' class apt dockerfile
+builder_generate_apt_dockerfile() {
+    local executor_class="$1"
+    local executor_os_name="$2"
+    local executor_image="$3"
+    local vpp_install_skip_sysctl_envvar="";
+
+    if grep -q "debian-9"  <<< "$executor_os_name" ; then
+        # Workaround to VPP package installation failure on debian-9
+        vpp_install_skip_sysctl_envvar="ENV VPP_INSTALL_SKIP_SYSCTL=1"
+    fi
+    generate_apt_dockerfile_common $executor_class $executor_image
+    csit_builder_generate_docker_build_files
+    cat <<EOF >>"$DOCKERFILE"
+
+# Install LF-IT requirements
+ENV LF_VENV="/root/lf-venv"
+RUN apt-get update -qq \\
+  && dbld_lfit_requirements.sh \\
+  && rm -r /var/lib/apt/lists/*
+
+# Install packagecloud requirements
+RUN gem install rake package_cloud \\
+  && curl -s https://packagecloud.io/install/repositories/fdio/master/script.deb.sh | bash
+
+# Install CSIT ssh requirements
+# TODO: Verify why badkey is required & figure out how to avoid it.
+COPY files/badkey /root/.ssh/id_rsa
+COPY files/sshconfig /root/.ssh/config
 
 # CI Runtime Environment
 WORKDIR /
 $vpp_install_skip_sysctl_envvar
-ENV VPP_ZOMBIE_NOCHECK=1
-ENV DOCKER_TEST=1
+ENV VPP_ZOMBIE_NOCHECK="1"
 # TODO: Mount ccache volume into docker container, then remove this.
-ENV CCACHE_DISABLE=1
-RUN gem install rake package_cloud \\
-    && curl -s https://packagecloud.io/install/repositories/fdio/master/script.deb.sh | sudo bash
+ENV CCACHE_DISABLE="1"
+EOF
+    generate_apt_dockerfile_clean
+}
 
-# Clean up copy-in build tree
-RUN dbld_dump_build_logs.sh \\
-    && rm -rf /tmp/*
+# Generate 'csit_dut' class apt dockerfile
+csit_dut_generate_apt_dockerfile() {
+    local executor_class="$1"
+    local executor_os_name="$2"
+    local executor_image="$3"
+
+    csit_dut_generate_docker_build_files
+    generate_apt_dockerfile_common "$executor_class" "$executor_image"
+    cat <<EOF >>"$DOCKERFILE"
+
+# Install csit_dut specific packages
+RUN apt-get update -qq \\
+  && apt-get install -y \\
+             net-tools \\
+             openssh-server \\
+             pciutils \\
+             rsyslog \\
+             supervisor \\
+  && rm -r /var/lib/apt/lists/*
+
+# Fix permissions
+RUN chown root:syslog /var/log \\
+  && chmod 755 /etc/default
+
+# Create directory structure
+RUN mkdir -p /var/run/sshd
+
+# SSH settings
+RUN echo 'root:Csit1234' | chpasswd \\
+  && sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config \\
+  && sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
+
+EXPOSE 2222
+
+COPY files/supervisord.conf /etc/supervisor/supervisord.conf
+
+CMD ["sh", "-c", "rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api; /usr/bin/supervisord -c /etc/supervisor/supervisord.conf; /usr/sbin/sshd -D -p 2222"]
 EOF
+    generate_apt_dockerfile_clean
 }
 
-write_apt_ubuntu_docker_gpg_keyfile() {
-    # To update docker gpg key
-    # curl -fsSL https://download.docker.com/linux/ubuntu/gpg
-    cat <<EOF >$DOCKER_APT_UBUNTU_DOCKER_GPGFILE
------BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
-lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
-38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
-L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
-UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
-cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
-ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
-vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
-G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
-XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
-q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
-tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
-BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
-v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
-tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
-jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
-6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
-XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
-FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
-g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
-ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
-9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
-G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
-FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
-EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
-M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
-Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
-w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
-z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
-eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
-VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
-1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
-zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
-pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
-ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
-BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
-1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
-YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
-mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
-KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
-JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
-cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
-6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
-U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
-VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
-irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
-SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
-QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
-9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
-24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
-dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
-Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
-H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
-/nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
-M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
-xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
-jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
-YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
-=0YYh
------END PGP PUBLIC KEY BLOCK-----
+# Generate 'csit_shim' class apt dockerfile
+csit_shim_generate_apt_dockerfile() {
+    local executor_class="$1"
+    local executor_os_name="$2"
+    local executor_image="$3"
+
+    csit_shim_generate_docker_build_files
+    cat <<EOF >>"$DOCKERFILE"
+
+# Copy-in temporary build tree containing
+# ci-management, vpp, & csit git repos
+WORKDIR $DOCKER_BUILD_DIR
+COPY . .
+
+# Build Environment Variables
+ENV DEBIAN_FRONTEND="noninteractive"
+ENV FDIOTOOLS_IMAGE="$executor_image"
+ENV FDIOTOOLS_EXECUTOR_CLASS="$executor_class"
+ENV CIMAN_ROOT="$DOCKER_CIMAN_ROOT"
+ENV PATH="\$PATH:$DOCKER_CIMAN_ROOT/docker/scripts"
+
+# Configure locales & timezone
+RUN apt-get update -qq \\
+  && apt-get install -y \\
+             apt-utils \\
+             locales \\
+  && sed -i 's/# \(en_US\.UTF-8 .*\)/\1/' /etc/locale.gen \\
+  && locale-gen en_US.UTF-8 \\
+  && dpkg-reconfigure --frontend=noninteractive locales \\
+  && update-locale LANG=en_US.UTF-8 \\
+  && TZ=Etc/UTC && ln -snf /usr/share/zoneinfo/\$TZ /etc/localtime && echo \$TZ > /etc/timezone \\
+  && rm -r /var/lib/apt/lists/*
+ENV LANG=en_US.UTF-8 LANGUAGE=en_US LC_ALL=en_US.UTF-8
+
+COPY files/wrapdocker /usr/local/bin/wrapdocker
+RUN chmod +x /usr/local/bin/wrapdocker
+
+# Install packages and Docker
+RUN apt-get update -qq \\
+  && apt-get install -y  \\
+             bash \\
+             curl \\
+             iproute2 \\
+             locales \\
+             ssh \\
+             sudo \\
+             tzdata \\
+             uuid-runtime \\
+  && curl -fsSL https://get.docker.com | sh \\
+  && rm -rf /var/lib/apt/lists/*
+
+RUN mkdir /var/run/sshd
+RUN echo 'root:Csit1234' | chpasswd
+RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+# SSH login fix. Otherwise user is kicked off after login
+RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
+
+# Need volume for sidecar docker launches
+VOLUME /var/lib/docker
+
+# SSH to listen on port 6022 in shim
+RUN echo 'Port 6022' >>/etc/ssh/sshd_config
+RUN echo 'Port 6023' >>/etc/ssh/sshd_config
+
+# TODO: Verify why badkeypub is required & figure out how to avoid it.
+COPY files/badkeypub /root/.ssh/authorized_keys
+COPY files/sshconfig /root/.ssh/config
+
+# Clean up copy-in build tree
+RUN rm -rf /tmp/* $DOCKER_BUILD_FILES_DIR
+
+# Start sshd by default
+EXPOSE 22
+CMD ["/usr/sbin/sshd", "-D"]
 EOF
 }
 
-write_apt_debian_docker_gpg_keyfile() {
-    # To update docker gpg key
-    # curl -fsSL https://download.docker.com/linux/debian/gpg
-    cat <<EOF >$DOCKER_APT_DEBIAN_DOCKER_GPGFILE
------BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
-lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
-38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
-L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
-UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
-cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
-ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
-vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
-G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
-XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
-q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
-tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
-BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
-v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
-tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
-jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
-6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
-XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
-FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
-g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
-ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
-9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
-G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
-FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
-EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
-M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
-Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
-w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
-z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
-eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
-VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
-1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
-zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
-pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
-ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
-BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
-1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
-YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
-mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
-KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
-JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
-cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
-6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
-U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
-VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
-irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
-SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
-QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
-9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
-24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
-dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
-Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
-H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
-/nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
-M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
-xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
-jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
-YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
-=0YYh
------END PGP PUBLIC KEY BLOCK-----
+generate_apt_dockerfile() {
+    local executor_class="$1"
+    local executor_os_name="$2"
+    local from_image="$3"
+    local executor_image="$4"
+
+    cat <<EOF  >"$DOCKERIGNOREFILE"
+**/__pycache__
+*.pyc
+EOF
+    cat <<EOF  >"$DOCKERFILE"
+FROM $from_image AS ${executor_class}-executor-image
+LABEL Description="FD.io CI '$executor_class' executor docker image for $executor_os_name/$OS_ARCH"
+LABEL Vendor="fd.io"
+LABEL Version="$DOCKER_TAG"
 EOF
+    ${executor_class}_generate_apt_dockerfile "$executor_class" \
+        "$executor_os_name" "$executor_image"
 }
index 39f439f..3146684 100644 (file)
@@ -1,7 +1,7 @@
 # lib_common.sh - Docker build script common library.
 #                 For import only.
 
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 if [ -n "$(alias lib_common_imported 2> /dev/null)" ] ; then
     return 0
 fi
-alias lib_common_imported=true
+alias lib_common_imported="true"
 
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
 export CIMAN_ROOT="$(dirname $(dirname $CIMAN_DOCKER_SCRIPTS))"
 
-must_be_run_as_root() {
-    set_opts=$-
-    grep -q e <<< $set_opts && set +e # disable exit on errors
+must_be_run_as_root_or_docker_group() {
+    set_opts="$-"
+    set +e # disable exit on errors
 
-    # test if the user is root
-    if [ "${EUID:-$(id -u)}" -eq "0" ] ; then
-        grep -q e <<< $set_opts && set -e # re-enable exit on errors
+    # test if the user is root or id is in the 'docker' group
+    if [ "${EUID:-$(id -u)}" -eq "0" ] || grep -q "docker" <<< "$(id)" ; then
+        grep -q e <<< "$set_opts" && set -e # re-enable exit on errors
     else
         set +x
-        echo -e "\nERROR: Must be run as root!"
+        echo -e "\nERROR: Must be run as root or '$USER' must be in the group 'docker'!"
         if [ -n "$(declare -f usage)" ] ; then
             usage
         fi
-        grep -q e <<< $set_opts && set -e # re-enable exit on errors
+        grep -q e <<< "$set_opts" && set -e # re-enable exit on errors
         exit 1
     fi
 }
@@ -54,7 +54,7 @@ echo_log() {
         if [ -z "$(alias running_in_docker_build 2> /dev/null)" ] ; then
             echo
         else
-            echo | tee -a $FDIOTOOLS_IMAGE_BUILD_LOG 1>&2
+            echo | tee -a "$FDIOTOOLS_IMAGE_BUILD_LOG" 1>&2
         fi
         return 0
     fi
@@ -69,14 +69,14 @@ echo_log() {
     if [ -z "$(alias running_in_docker_build 2> /dev/null)" ] ; then
         echo ${echo_opts}"####> $@"
     else
-        echo ${echo_opts}"####> $(date): $@" | tee -a $FDIOTOOLS_IMAGE_BUILD_LOG 1>&2
+        echo ${echo_opts}"####> $(date -u): $@" | tee -a $FDIOTOOLS_IMAGE_BUILD_LOG 1>&2
     fi
 }
 
 dump_echo_log() {
     [ -z "$(alias running_in_docker_build 2> /dev/null)" ] && return 0
-    echo -e "\n\n####> $(date) Build log ($FDIOTOOLS_IMAGE_BUILD_LOG):"
-    cat $FDIOTOOLS_IMAGE_BUILD_LOG
+    echo -e "\n\n####> $(date -u) Build log ($FDIOTOOLS_IMAGE_BUILD_LOG):"
+    cat "$FDIOTOOLS_IMAGE_BUILD_LOG"
 }
 
 do_git_config() {
@@ -84,7 +84,7 @@ do_git_config() {
         echo_log "ERROR: do_git_config(): Invalid number of arguments ($#)!"
         return 1
     fi
-    cd $DOCKER_BUILD_DIR/$1
+    cd "$DOCKER_BUILD_DIR/$1"
 
     # Add user to git config so git commands don't fail
     local git_config_list="$(git config -l)"
@@ -101,21 +101,20 @@ do_git_branch() {
 
     echo_log "  Checking out '$branch' in $(pwd)"
     if [ -n "$(git branch | grep $branch)" ] ; then
-        git checkout $branch
+        git checkout "$branch"
     else
-        git checkout -b $branch --track origin/$branch
+        git checkout -b "$branch" --track "origin/$branch"
     fi
     git pull -q
     echo_log -e "  'git log --oneline | head':\n----- %< -----\n$(git log --oneline | head)\n----- %< -----"
 }
 
 clean_git_repo() {
-    local curr_dir=$(pwd)
-    cd $1
+    pushd "$1" >& /dev/null
     git clean -qfdx
     git checkout -q master
     git pull -q
-    cd $curr_dir
+    popd >& /dev/null
 }
 
 remove_pyc_files_and_pycache_dirs() {
@@ -124,11 +123,6 @@ remove_pyc_files_and_pycache_dirs() {
          -print -exec rm -rf {} \; 2>/dev/null || true
 }
 
-# Well-known filename variables
-export APT_DEBIAN_DOCKER_GPGFILE="docker.linux.debian.gpg"
-export APT_UBUNTU_DOCKER_GPGFILE="docker.linux.ubuntu.gpg"
-export YUM_CENTOS_DOCKER_GPGFILE="docker.linux.centos.gpg"
-
 # OS type variables
 # TODO: Investigate if sourcing /etc/os-release and using env vars from it
 #       works across all OS variants.  If so, clean up copy-pasta...
@@ -157,26 +151,24 @@ esac
 #       an untested docker image into production.
 export EXECUTOR_ROLES="sandbox test"
 export EXECUTOR_DEFAULT_CLASS="builder"
-export EXECUTOR_CLASS="$EXECUTOR_DEFAULT_CLASS"
+export EXECUTOR_CLASS=${EXECUTOR_CLASS:-"$EXECUTOR_DEFAULT_CLASS"}
 export EXECUTOR_CLASS_ARCH="$EXECUTOR_DEFAULT_CLASS-$OS_ARCH"
-export EXECUTOR_CLASSES="$EXECUTOR_DEFAULT_CLASS csit csit_dut csit_shim"
+export EXECUTOR_CLASSES="$EXECUTOR_DEFAULT_CLASS csit_dut csit_shim"
 export EXECUTOR_ARCHS="aarch64 x86_64"
 declare -A EXECUTOR_CLASS_ARCH_OS_NAMES
 EXECUTOR_CLASS_ARCH_OS_NAMES["builder-aarch64"]="centos-8 ubuntu-18.04 ubuntu-20.04"
 EXECUTOR_CLASS_ARCH_OS_NAMES["builder-x86_64"]="centos-7 centos-8 debian-9 debian-10 ubuntu-18.04 ubuntu-20.04"
-EXECUTOR_CLASS_ARCH_OS_NAMES["csit-aarch64"]="ubuntu-18.04"
-EXECUTOR_CLASS_ARCH_OS_NAMES["csit-x86_64"]="ubuntu-18.04"
-EXECUTOR_CLASS_ARCH_OS_NAMES["csit_dut-aarch64"]="ubuntu-18.04"
-EXECUTOR_CLASS_ARCH_OS_NAMES["csit_dut-x86_64"]="ubuntu-18.04"
-EXECUTOR_CLASS_ARCH_OS_NAMES["csit_shim-aarch64"]="ubuntu-18.04"
-EXECUTOR_CLASS_ARCH_OS_NAMES["csit_shim-x86_64"]="ubuntu-18.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["csit_dut-aarch64"]="ubuntu-18.04 ubuntu-20.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["csit_dut-x86_64"]="ubuntu-18.04 ubuntu-20.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["csit_shim-aarch64"]="ubuntu-18.04 ubuntu-20.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["csit_shim-x86_64"]="ubuntu-18.04 ubuntu-20.04"
 export EXECUTOR_CLASS_ARCH_OS_NAMES
 
 executor_list_roles() {
-    local set_opts=$-
-    grep -q u <<< $set_opts && set +u # disable undefined variable check
+    local set_opts="$-"
+    set +u # disable undefined variable check
     local indent=${1:-"     "}
-    grep -q u <<< $set_opts && set -u # re-enable undefined variable check
+    grep -q u <<< "$set_opts" && set -u # re-enable undefined variable check
 
     for role in $EXECUTOR_ROLES ; do
         echo -e "${indent}$role"
@@ -193,10 +185,10 @@ executor_verify_role() {
 }
 
 executor_list_classes() {
-    local set_opts=$-
-    grep -q u <<< $set_opts && set +u # disable undefined variable check
+    local set_opts="$-"
+    set +u # disable undefined variable check
     local indent=${1:-"     "}
-    grep -q u <<< $set_opts && set -u # re-enable undefined variable check
+    grep -q u <<< "$set_opts" && set -u # re-enable undefined variable check
 
     for class in $EXECUTOR_CLASSES ; do
         echo -e "${indent}$class"
@@ -213,10 +205,10 @@ executor_verify_class() {
 }
 
 executor_list_os_names() {
-    local set_opts=$-
-    grep -q u <<< $set_opts && set +u # disable undefined variable check
+    local set_opts="$-"
+    set +u # disable undefined variable check
     local indent=${1:-"     "}
-    grep -q u <<< $set_opts && set -u # re-enable undefined variable check
+    grep -q u <<< "$set_opts" && set -u # re-enable undefined variable check
 
     echo
     echo "Valid executor OS names for class '$EXECUTOR_CLASS':"
@@ -235,40 +227,43 @@ executor_verify_os_name() {
 }
 
 # Docker variables
+export DOCKER_DATE=${DOCKER_DATE:-"$(date -u +%Y_%m_%d_%H%M%S_UTC)"}
 export DOCKER_BUILD_DIR="/scratch/docker-build"
 export DOCKER_CIMAN_ROOT="$DOCKER_BUILD_DIR/ci-management"
 export DOCKERFILE="$DOCKER_BUILD_DIR/Dockerfile"
 export DOCKERIGNOREFILE="$DOCKER_BUILD_DIR/.dockerignore"
 export DOCKERFILE_FROM=${DOCKERFILE_FROM:="${OS_ID}:${OS_VERSION_ID}"}
-export DOCKER_TAG="$(date +%Y_%m_%d_%H%M%S)-$OS_ARCH"
+export DOCKER_TAG="$DOCKER_DATE-$OS_ARCH"
 export DOCKER_VPP_DIR="$DOCKER_BUILD_DIR/vpp"
 export DOCKER_CSIT_DIR="$DOCKER_BUILD_DIR/csit"
-export DOCKER_GPG_KEY_DIR="$DOCKER_BUILD_DIR/gpg-key"
-export DOCKER_APT_UBUNTU_DOCKER_GPGFILE="$DOCKER_GPG_KEY_DIR/$APT_UBUNTU_DOCKER_GPGFILE"
-export DOCKER_APT_DEBIAN_DOCKER_GPGFILE="$DOCKER_GPG_KEY_DIR/$APT_DEBIAN_DOCKER_GPGFILE"
 export DOCKER_DOWNLOADS_DIR="/root/Downloads"
+export DOCKER_BUILD_FILES_DIR="$DOCKER_BUILD_DIR/files"
 
 docker_build_setup_ciman() {
     if [ "$(dirname $CIMAN_ROOT)" != "$DOCKER_BUILD_DIR" ] ; then
         echo_log "Updating $CIMAN_ROOT git submodules..."
-        pushd $CIMAN_ROOT
+        pushd "$CIMAN_ROOT"
         git submodule update --init --recursive
         popd
         if [ -d "$DOCKER_BUILD_DIR" ] ; then
             echo_log "Removing existing DOCKER_BUILD_DIR: $DOCKER_BUILD_DIR..."
-            rm -rf $DOCKER_BUILD_DIR
+            local sudo_cmd=""
+            if [ "$(whoami)" != "$(stat -c %U $DOCKER_BUILD_DIR)" ] ; then
+                sudo_cmd="sudo"
+            fi
+            ${sudo_cmd} rm -rf "$DOCKER_BUILD_DIR"
         fi
         echo_log "Syncing $CIMAN_ROOT into $DOCKER_CIMAN_ROOT..."
-        mkdir -p $DOCKER_BUILD_DIR $DOCKER_GPG_KEY_DIR
-        rsync -a $CIMAN_ROOT/. $DOCKER_CIMAN_ROOT
+        mkdir -p "$DOCKER_BUILD_DIR"
+        rsync -a "$CIMAN_ROOT/." "$DOCKER_CIMAN_ROOT"
     else
-        mkdir -p $DOCKER_BUILD_DIR $DOCKER_GPG_KEY_DIR
+        mkdir -p "$DOCKER_BUILD_DIR"
     fi
 }
 
 # Variables used in docker build environment
-set_opts=$-
-grep -q u <<< $set_opts && set +u # disable undefined variable check
+set_opts="$-"
+set +u # disable undefined variable check
 if [ -n "$FDIOTOOLS_IMAGE" ] ; then
     alias running_in_docker_build=true
     export DOCKER_BUILD_LOG_DIR="$DOCKER_BUILD_DIR/logs"
@@ -276,4 +271,4 @@ if [ -n "$FDIOTOOLS_IMAGE" ] ; then
     export FDIOTOOLS_IMAGE_BUILD_LOG="$DOCKER_BUILD_LOG_DIR/$FDIOTOOLS_IMAGENAME.log"
     mkdir -p $DOCKER_BUILD_LOG_DIR
 fi
-grep -q u <<< $set_opts && set -u # re-enable undefined variable check
+grep -q u <<< "$set_opts" && set -u # re-enable undefined variable check
index a903ab0..b0133ec 100644 (file)
@@ -1,7 +1,7 @@
 # lib_csit.sh - Docker build script CSIT library.
 #               For import only.
 
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -21,20 +21,39 @@ fi
 alias lib_csit_imported=true
 
 export CIMAN_DOCKER_SCRIPTS="${CIMAN_DOCKER_SCRIPTS:-$(dirname $BASH_SOURCE)}"
-. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_apt.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_yum.sh
-. $CIMAN_DOCKER_SCRIPTS/lib_dnf.sh
+. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_apt.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_yum.sh"
+. "$CIMAN_DOCKER_SCRIPTS/lib_dnf.sh"
+
+CSIT_SUPPORTED_EXECUTOR_CLASSES="builder csit_dut"
+csit_supported_executor_class() {
+    if ! grep -q "${1:-}" <<< "$CSIT_SUPPORTED_EXECUTOR_CLASSES" ; then
+        return 1
+    fi
+    return 0
+}
+
+csit_supported_os() {
+    case "$1" in
+        # TODO: Remove ubuntu-18.04 once CSIT has completed transition
+        #       to ubuntu-20.04
+        ubuntu-18.04) return 0 ;;
+        ubuntu-20.04) return 0 ;;
+                   *) ;;
+    esac
+    return 1
+}
 
 csit_checkout_branch_for_vpp() {
-    local vpp_branch=$1
+    local vpp_branch="$1"
     local csit_dir="$DOCKER_CSIT_DIR"
     local csit_bash_function_dir="$csit_dir/resources/libraries/bash/function"
 
     # import checkout_csit_for_vpp() if not defined
     set +e && [ -z "$(declare -f checkout_csit_for_vpp)" ] \
-        && source $csit_bash_function_dir/branch.sh
-    CSIT_DIR=$(csit_dir) checkout_csit_for_vpp $vpp_branch
+        && source "$csit_bash_function_dir/branch.sh"
+    CSIT_DIR="$csit_dir" checkout_csit_for_vpp "$vpp_branch"
 
     csit_branch="$(git branch | grep -e '^*' | mawk '{print $2}')"
 }
@@ -61,34 +80,40 @@ csit_install_packages() {
         python3 -m pip install pyyaml
     fi
 
-    # NOTE: the CSIT baseline os is implicitly pinned to ubuntu 18.04
-    #       so only gather packages from ansible for that OS.
-    if [ "$OS_NAME" = "ubuntu-18.04" ] ; then
-        # Not in double quotes to let bash remove newline characters
-        local exclude_roles="-e calibration -e kernel -e mellanox -e nomad"
-        [ "$OS_ARCH" = "aarch64" ] && exclude_roles="$exclude_roles -e iperf"
-        local yaml_files=$(grep -r packages_by $csit_ansible_dir | cut -d: -f1 | sort -u | grep -v $exclude_roles)
-        packages=$(dbld_csit_find_ansible_packages.py --$OS_ID --$OS_ARCH $yaml_files)
-
-        if [ -n "$packages" ] ; then
-            case "$OS_NAME" in
-                ubuntu*)
-                    apt_install_packages $packages
-                    ;;
-                debian*)
-                    apt_install_packages $packages
-                    ;;
-                centos-7)
-                    yum_install_packages $packages
-                    ;;
-                centos-8)
-                    dnf_install_packages $packages
-                    ;;
-                *)
-                    echo "Unsupported OS ($OS_ID): CSIT packages NOT INSTALLED!"
-                    ;;
-            esac
-        fi
+    local exclude_roles="-e calibration -e kernel -e mellanox -e nomad -e consul"
+    [ "$OS_ARCH" = "aarch64" ] && exclude_roles="$exclude_roles -e iperf"
+
+    # Not in double quotes to let bash remove newline characters
+    local yaml_files="$(grep -r packages_by $csit_ansible_dir | cut -d: -f1 | sort -u | grep -v $exclude_roles)"
+    packages="$(dbld_csit_find_ansible_packages.py --$OS_ID --$OS_ARCH $yaml_files)"
+    packages="${packages/bionic /}"
+    packages="${packages/focal /}"
+
+    # TODO: Fix Ubuntu-18.04 specific package names that fail on Ubuntu-20.04
+    #       (remove when CSIT is updated)
+    if [ "$OS_NAME" = "ubuntu-20.04" ] ; then
+        packages="${packages/libmbedcrypto1/libmbedcrypto3}"
+        packages="${packages/libmbedtls10/libmbedtls12}"
+        packages="$(echo ${packages//python\-/python3\-} | tr ' ' '\n' | sort -u | xargs)"
+    fi
+    if [ -n "$packages" ] ; then
+        case "$OS_NAME" in
+            ubuntu*)
+                apt_install_packages $packages
+                ;;
+            debian*)
+                apt_install_packages $packages
+                ;;
+            centos-7)
+                yum_install_packages $packages
+                ;;
+            centos-8)
+                dnf_install_packages $packages
+                ;;
+            *)
+                echo "Unsupported OS ($OS_ID): CSIT packages NOT INSTALLED!"
+                ;;
+        esac
     fi
 }
 
@@ -97,71 +122,282 @@ csit_pip_cache() {
     local VENV_OPTS=""
     # ensure PS1 is defined (used by virtualenv activate script)
     PS1=${PS1:-"#"}
-    local csit_dir="$DOCKER_CSIT_DIR"
-    local csit_bash_function_dir="$csit_dir/resources/libraries/bash/function"
+    CSIT_DIR="$DOCKER_CSIT_DIR"
 
-    if [ -f "$csit_dir/VPP_REPO_URL" ] \
-           && [ -f "$csit_dir/requirements.txt" ]; then
+    if [ -f "$CSIT_DIR/VPP_REPO_URL" ] \
+           && [ -f "$CSIT_DIR/requirements.txt" ]; then
 
+        local csit_bash_function_dir="$CSIT_DIR/resources/libraries/bash/function"
         local branchname="$(echo $branch | sed -e 's,/,_,')"
         local bld_log="$DOCKER_BUILD_LOG_DIR"
         bld_log="${bld_log}/$FDIOTOOLS_IMAGENAME-$branchname-csit_pip_cache-bld.log"
-
-        export PYTHONPATH=$csit_dir
-        git clean -qfdx
+        local pip_cmd="python3 -m pip --disable-pip-version-check"
+        export PYTHONPATH=$CSIT_DIR
 
         description="Install CSIT python packages from $branch branch"
         echo_log "    Starting  $description..."
-        [ -n "$(declare -f deactivate)" ] && deactivate
-        local PIP=pip
-        local setup_framework=$csit_dir/resources/libraries/python/SetupFramework.py
-        if [ -n "$(grep pip3 $setup_framework)" ]; then
-            PIP=pip3
-            VENV_OPTS="-p python3"
-        fi
-        rm -rf $PYTHONPATH/env
-        virtualenv $VENV_OPTS $PYTHONPATH/env
-        . $PYTHONPATH/env/bin/activate
-        if [ "$OS_ARCH" = "aarch64" ] ; then
-            local numpy_ver="$(grep numpy $PYTHONPATH/requirements.txt)"
-            [ -n "$numpy_ver" ] && $PIP install --upgrade $numpy_ver 2>&1 \
-                | tee -a $bld_log
-        fi
-        $PIP install --upgrade -r $PYTHONPATH/requirements.txt 2>&1 \
-            | tee -a $bld_log
-        $PIP install --upgrade -r $PYTHONPATH/tox-requirements.txt 2>&1 \
-            | tee -a $bld_log
-        if [ "$OS_ARCH" = "x86_64" ] ; then
-            local PRESENTATION_DIR="$PYTHONPATH/resources/tools/presentation"
-            # TODO: Remove condition when 19.08 is deprecated.
-            if [ -n "$(grep -r python3 $PRESENTATION_DIR)" ] && [ "$PIP" = "pip3" ] ; then
-                $PIP install --upgrade -r $PRESENTATION_DIR/requirements.txt 2>&1 \
-                    | tee -a $bld_log
-            else
-                echo_log "Skipping 'pip install $PRESENTATION_DIR/requirements.txt' in branch $branch!"
-            fi
-        fi
+        git clean -qfdx
+        rm -rf "$PYTHONPATH/env"
 
-        deactivate
-        rm -rf $PYTHONPATH/env
+        # TODO: Update CSIT release branches to avoid build breakage
+        #       Fixes https://github.com/pypa/pip/issues/8260
+        $pip_cmd install pip==21.0.1
+        #       rls2009_lts-* branches missing cherry-pick of
+        #       https://gerrit.fd.io/r/c/csit/+/31338
+        sed -i 's/scipy==1.1.0/scipy==1.5.4/' "$PYTHONPATH/requirements.txt"
 
         # Virtualenv version is pinned in common.sh in newer csit branches.
         # (note: xargs removes leading/trailing spaces)
-        install_virtualenv="$(grep 'virtualenv' $csit_bash_function_dir/common.sh | grep pip | grep install | cut -d'|' -f1 | xargs)"
+        local common_sh="$csit_bash_function_dir/common.sh"
+        install_virtualenv="$(grep 'virtualenv' $common_sh | grep pip | grep install | cut -d'|' -f1 | xargs)"
         $install_virtualenv
+        virtualenv --no-download --python=$(which python3) "$CSIT_DIR/env"
+        source "$CSIT_DIR/env/bin/activate"
+
+        if [ "$OS_ARCH" = "aarch64" ] ; then
+            local numpy_ver="$(grep numpy $PYTHONPATH/requirements.txt)"
+            [ -n "$numpy_ver" ] && $pip_cmd install $numpy_ver 2>&1 | \
+                tee -a $bld_log
+        fi
 
+        # Install csit python requirements
+        $pip_cmd install -r "$CSIT_DIR/requirements.txt" 2>&1 | \
+            tee -a "$bld_log"
+        # Install tox python requirements
+        $pip_cmd install -r "$CSIT_DIR/tox-requirements.txt" 2>&1 | \
+            tee -a "$bld_log"
+        # Run tox which installs pylint requirments
+        pushd $CSIT_DIR >& /dev/null
+        tox || true
+        popd >& /dev/null
+
+        # Clean up virtualenv directories
+        deactivate
         git checkout -q -- .
+        git clean -qfdx
         echo_log "    Completed $description!"
     else
-        echo_log "ERROR: Missing or invalid CSIT_DIR: '$csit_dir'!"
+        echo_log "ERROR: Missing or invalid CSIT_DIR: '$CSIT_DIR'!"
         return 1
     fi
 }
 
 docker_build_setup_csit() {
-    if [ ! -d "$DOCKER_CSIT_DIR" ] ; then
-        echo_log "Cloning CSIT into $DOCKER_CSIT_DIR..."
-        git clone -q https://gerrit.fd.io/r/csit $DOCKER_CSIT_DIR
+    if csit_supported_executor_class "$EXECUTOR_CLASS" ; then
+        if [ ! -d "$DOCKER_CSIT_DIR" ] ; then
+            echo_log "Cloning CSIT into $DOCKER_CSIT_DIR..."
+            git clone -q https://gerrit.fd.io/r/csit "$DOCKER_CSIT_DIR"
+        fi
+        clean_git_repo "$DOCKER_CSIT_DIR"
     fi
-    clean_git_repo $DOCKER_CSIT_DIR
+}
+
+csit_dut_generate_docker_build_files() {
+    local build_files_dir="$DOCKER_BUILD_FILES_DIR"
+
+    mkdir -p "$build_files_dir"
+    cat <<EOF >"$build_files_dir/supervisord.conf"
+[unix_http_server]
+file = /tmp/supervisor.sock
+chmod = 0777
+
+[rpcinterface:supervisor]
+supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
+
+[supervisorctl]
+serverurl = unix:///tmp/supervisor.sock
+
+[supervisord]
+pidfile = /tmp/supervisord.pid
+identifier = supervisor
+directory = /tmp
+logfile = /tmp/supervisord.log
+loglevel = debug
+nodaemon = false
+
+[program:vpp]
+command = /usr/bin/vpp -c /etc/vpp/startup.conf
+autostart = false
+autorestart = true
+redirect_stderr = true
+priority = 1
+EOF
+}
+
+csit_builder_generate_docker_build_files() {
+    local build_files_dir="$DOCKER_BUILD_FILES_DIR"
+    local dashes="-----"
+    local dbeg="${dashes}BEGIN"
+    local dend="${dashes}END"
+    local pvt="PRIVATE"
+    local kd="KEY$dashes"
+
+    # TODO: Verify why badkey is required & figure out how to avoid it.
+    mkdir -p "$build_files_dir"
+    cat <<EOF >"$build_files_dir/badkey"
+$dbeg RSA $pvt $kd
+MIIEowIBAAKCAQEAslDXf4kZOQI8OGQQdIF8o83nBM0B4fzHLYLxxiY2rKiQ5MGM
+mQa7p1KKzmd5/NlvFRnXefnjSDQljjPxEY7mh457rX2nXvqHD4GUXZPpBIE73rQ1
+TViIAXdDzFXJ6ee4yX8ewmVakzYBnlUPDidkWyRnjm/xCgKUCO+CD5AH3ND0onks
+OYAtHqhDh29/QMIKdMnK87FBxfzhInHwpqPur76zBnpw3u36ylKEymDFrO5dwzsh
+QvDWjsYRg9ydTXubtwP6+MOpjdR1SNKxcCHKJrPrdAeJW9jg1imYmYpEHZ/P3qsL
+Jm0hGWbFjdxZLIYIz0vN/nTalcAeqT2OWKrXuwIDAQABAoIBAQCcj1g2FOR9ZlYD
+WPANqucJVy4/y9OcXHlwnyiyRjj47WOSRdGxRfUa2uEeikHT3ACo8TB8WwfQDGDw
+8u/075e+az5xvAJo5OQSnD3sz4Hmv6UWSvkFuPZo+xMe5C/M2/QljiQuoBifaeqP
+3rTCQ5ncYCFAMU7b8BmTot551Ybhu2jCbDMHU7nFHEFOvYinkwfVcaqkrVDUuH+D
+c3NkAEH9Jz2MEYA2Va4uqFpGt5lfGiED2kMenwPa8eS5LS5HJsxkfMHGlaHXHFUb
+D+dG/qJtSslVxdzVPgEGvzswo6TgtY1nZTQcB8U63rktFg38B7QGtOkvswAYzxyk
+HdMIiU3RAoGBAOdIEQRcAThj9eiIFywtBgLBOSg4SoOnvELLr6lgUg2+ICmx06LQ
+yaai1QRdOWw1VwZ6apNCD00kaUhBu+ou93yLSDnR2uYftkylhcnVuhDyIeNyb81V
+hV2z0WuNv3aKBFlBxaq391S7WW1XxhpAAagm8fZZur73wV390EVd/hZJAoGBAMVf
+negT2bg5PVKWvsiEU6eZ00W97tlEDLclkiZawXNnM2/c+2x1Tks6Yf1E/j2FFTB4
+r0fesbwN346hCejtq5Bup5YEdFA3KtwT5UyeQQLFGYlCtRmBtOd10wkRS93D0tpX
+iIqkf43Gpx6iFdvBWY5A7N+ZmojCy9zpL5TJ4G3jAoGADOGEoRuGrd9TWMoLkFhJ
+l2mvhz/rVn3HDGlPtT06FK3cGLZgtRavxGoZNw8CHbayzBeRS/ZH5+H5Qx72GkrX
+WcZgFWhMqrhlbMtjMiSHIl556LL86xCyRs+3ACh6211AdMAnBCUOz1dH2cEjtV6P
+ORBCNZg1wGEIEfYK3XIorpECgYBubXfQj8KhUs0fdx3Y3Ehdni/ZdlG7F1qx4YBq
+mx5e7d+Wd6Hn5Z3fcxO9+yrvypS3YN5YrJzuZSiuCSWdP9RcY7y5r1ZQRv1g0nTZ
+MDWZUiNea4cddTd8xKxFB3tV4SkIZi8LustuzDVWa0Mlh4EOmP6uf6c5WxtqRsEL
+UwORFwKBgEjZsfmZGBurjOtSrcsteulOB0D2nOqPVRWXmbSNJT/l73DkEllvVyA/
+wdW39nyFrA2Qw1K2F+l8DkzMd/WEjmioSWCsvTkXlvrqPfByKg01zCbYy/mhRW7d
+7sQrPOIl8ygsc3JrxmvzibdWmng1MehvpAM1ogWeTUa1lsDTNJ/6
+$dend RSA $pvt $kd
+EOF
+    chmod 600 "$build_files_dir/badkey"
+    cat <<EOF >"$build_files_dir/sshconfig"
+Host 172.17.0.*
+        StrictHostKeyChecking no
+        UserKnownHostsFile=/dev/null
+EOF
+}
+
+csit_shim_generate_docker_build_files() {
+    local build_files_dir="$DOCKER_BUILD_FILES_DIR"
+    # TODO: Verify why badkey is required & figure out how to avoid it.
+    local badkey='AAAAB3NzaC1yc2EAAAADAQABAAABAQCyUNd/iRk5Ajw4ZBB0gXyjzecEzQHh/MctgvHGJjasqJDkwYyZBrunUorOZ3n82W8VGdd5+eNINCWOM/ERjuaHjnutfade+ocPgZRdk+kEgTvetDVNWIgBd0PMVcnp57jJfx7CZVqTNgGeVQ8OJ2RbJGeOb/EKApQI74IPkAfc0PSieSw5gC0eqEOHb39Awgp0ycrzsUHF/OEicfCmo+6vvrMGenDe7frKUoTKYMWs7l3DOyFC8NaOxhGD3J1Ne5u3A/r4w6mN1HVI0rFwIcoms+t0B4lb2ODWKZiZikQdn8/eqwsmbSEZZsWN3FkshgjPS83+dNqVwB6pPY5Yqte7'
+
+    mkdir -p "$build_files_dir"
+    # TODO: Verify why badkeypub is required & figure out how to avoid it.
+    echo "ssh-rsa $badkey ejk@bhima.local" >"$build_files_dir/badkeypub"
+
+    cat <<EOF >"$build_files_dir/sshconfig"
+Host 172.17.0.*
+        StrictHostKeyChecking no
+        UserKnownHostsFile=/dev/null
+EOF
+    cat <<EOF >"$build_files_dir/wrapdocker"
+#!/bin/bash
+
+# Ensure that all nodes in /dev/mapper correspond to mapped devices currently loaded by the device-mapper kernel driver
+dmsetup mknodes
+
+# First, make sure that cgroups are mounted correctly.
+CGROUP=/sys/fs/cgroup
+: {LOG:=stdio}
+
+[ -d \$CGROUP ] ||
+    mkdir \$CGROUP
+
+mountpoint -q \$CGROUP ||
+    mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup \$CGROUP || {
+        echo "Could not make a tmpfs mount. Did you use --privileged?"
+        exit 1
+    }
+
+if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
+then
+    mount -t securityfs none /sys/kernel/security || {
+        echo "Could not mount /sys/kernel/security."
+        echo "AppArmor detection and --privileged mode might break."
+    }
+fi
+
+# Mount the cgroup hierarchies exactly as they are in the parent system.
+for SUBSYS in \$(cut -d: -f2 /proc/1/cgroup)
+do
+        [ -d \$CGROUP/\$SUBSYS ] || mkdir \$CGROUP/\$SUBSYS
+        mountpoint -q \$CGROUP/\$SUBSYS ||
+                mount -n -t cgroup -o \$SUBSYS cgroup \$CGROUP/\$SUBSYS
+
+        # The two following sections address a bug which manifests itself
+        # by a cryptic "lxc-start: no ns_cgroup option specified" when
+        # trying to start containers withina container.
+        # The bug seems to appear when the cgroup hierarchies are not
+        # mounted on the exact same directories in the host, and in the
+        # container.
+
+        # Named, control-less cgroups are mounted with "-o name=foo"
+        # (and appear as such under /proc/<pid>/cgroup) but are usually
+        # mounted on a directory named "foo" (without the "name=" prefix).
+        # Systemd and OpenRC (and possibly others) both create such a
+        # cgroup. To avoid the aforementioned bug, we symlink "foo" to
+        # "name=foo". This shouldn't have any adverse effect.
+        echo \$SUBSYS | grep -q ^name= && {
+                NAME=\$(echo \$SUBSYS | sed s/^name=//)
+                ln -s \$SUBSYS \$CGROUP/\$NAME
+        }
+
+        # Likewise, on at least one system, it has been reported that
+        # systemd would mount the CPU and CPU accounting controllers
+        # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
+        # but on a directory called "cpu,cpuacct" (note the inversion
+        # in the order of the groups). This tries to work around it.
+        [ \$SUBSYS = cpuacct,cpu ] && ln -s \$SUBSYS \$CGROUP/cpu,cpuacct
+done
+
+# Note: as I write those lines, the LXC userland tools cannot setup
+# a "sub-container" properly if the "devices" cgroup is not in its
+# own hierarchy. Let's detect this and issue a warning.
+grep -q :devices: /proc/1/cgroup ||
+    echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
+grep -qw devices /proc/1/cgroup ||
+    echo "WARNING: it looks like the 'devices' cgroup is not mounted."
+
+# Now, close extraneous file descriptors.
+pushd /proc/self/fd >/dev/null
+for FD in *
+do
+    case "\$FD" in
+    # Keep stdin/stdout/stderr
+    [012])
+        ;;
+    # Nuke everything else
+    *)
+        eval exec "\$FD>&-"
+        ;;
+    esac
+done
+popd >/dev/null
+
+
+# If a pidfile is still around (for example after a container restart),
+# delete it so that docker can start.
+rm -rf /var/run/docker.pid
+
+# If we were given a PORT environment variable, start as a simple daemon;
+# otherwise, spawn a shell as well
+if [ "\$PORT" ]
+then
+    exec dockerd -H 0.0.0.0:\$PORT -H unix:///var/run/docker.sock \
+        \$DOCKER_DAEMON_ARGS
+else
+    if [ "\$LOG" == "file" ]
+    then
+        dockerd \$DOCKER_DAEMON_ARGS &>/var/log/docker.log &
+    else
+        dockerd \$DOCKER_DAEMON_ARGS &
+    fi
+    (( timeout = 60 + SECONDS ))
+    until docker info >/dev/null 2>&1
+    do
+        if (( SECONDS >= timeout )); then
+            echo 'Timed out trying to connect to internal docker host.' >&2
+            break
+        fi
+        sleep 1
+    done
+    [[ \$1 ]] && exec "\$@"
+    exec bash --login
+fi
+EOF
 }
index 4882180..c5bb5c4 100644 (file)
@@ -1,7 +1,7 @@
 # lib_dnf.sh - Docker build script dnf library.
 #              For import only.
 
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 if [ -n "$(alias lib_dnf_imported 2> /dev/null)" ] ; then
     return 0
 fi
-alias lib_dnf_imported=true
+alias lib_dnf_imported="true"
 
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
-. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh"
 
 dump_dnf_package_list() {
     branchname="$(echo $branch | sed -e 's,/,_,')"
@@ -30,94 +30,76 @@ dump_dnf_package_list() {
 }
 
 dnf_install_packages() {
-    dnf install -y $@
+    dnf install -y "$@"
 }
 
-dnf_install_docker_os_package_dependancies() {
-    dnf_install_packages dnf-utils
-}
+generate_dnf_dockerfile_clean() {
+    cat <<EOF >>"$DOCKERFILE"
 
-dnf_install_docker() {
-    # Note: Support for docker has been removed starting with centos-8, so the
-    #       only recourse is to pin the latest version by what's in the download dir.
-    # Browse the base URL to see what is available & update accordingly.
-
-    if [ "$OS_NAME" = "centos-8" ] ; then
-        dnf install -y https://download.docker.com/linux/$OS_ID/$OS_VERSION_ID/$OS_ARCH/stable/Packages/containerd.io-1.3.7-3.1.el8.$OS_ARCH.rpm
-        dnf install -y https://download.docker.com/linux/$OS_ID/$OS_VERSION_ID/$OS_ARCH/stable/Packages/docker-ce-cli-19.03.13-3.el8.$OS_ARCH.rpm
-        dnf install -y https://download.docker.com/linux/$OS_ID/$OS_VERSION_ID/$OS_ARCH/stable/Packages/docker-ce-19.03.13-3.el8.$OS_ARCH.rpm
-    else
-        echo_log "WARNING: Docker Image unknown for $OS_NAME!"
-    fi
+# Clean up
+RUN dbld_dump_build_logs.sh \\
+  && rm -rf "/tmp/*"
+EOF
 }
 
-generate_dnf_dockerfile() {
-    local executor_os_name=$1
-    local from_image=$2
-    local executor_image=$3
-    local from_image_os_id="$(echo $from_image | cut -d: -f2)"
-
-    cat <<EOF >$DOCKERFILE
-FROM $from_image AS executor-image
-LABEL Description="FD.io CI executor docker image for $executor_os_name/$OS_ARCH"
-LABEL Vendor="fd.io"
-LABEL Version="$DOCKER_TAG"
+generate_dnf_dockerfile_common() {
+    cat <<EOF >>"$DOCKERFILE"
 
 # Build Environment Variables
-ENV FDIOTOOLS_IMAGE=$executor_image
-ENV LC_ALL=C.UTF-8
+ENV FDIOTOOLS_IMAGE="$executor_image"
+ENV FDIOTOOLS_EXECUTOR_CLASS="$executor_class"
 ENV CIMAN_ROOT="$DOCKER_CIMAN_ROOT"
-ENV PATH=$PATH:$DOCKER_CIMAN_ROOT/docker/scripts
-ENV LF_VENV="/root/lf-venv"
+ENV PATH="\$PATH:$DOCKER_CIMAN_ROOT/docker/scripts"
 
 # Copy-in build tree containing
 # ci-management, vpp, & csit git repos
 WORKDIR $DOCKER_BUILD_DIR
 COPY . .
 
+# Configure locales
+RUN LC_ALL=C.UTF-8 dnf install -y glibc-langpack-en
+ENV LANG="en_US.UTF-8" LANGUAGE="en_US" LC_ALL="en_US.UTF-8"
+
 # Install baseline packages (minimum build & utils).
 #
 # ci-management global-jjb requirements:
-#   for lftools:
-#       libxml2-devel
-#       libxslt-devel
-#       xmlstarlet
-#   for lf-env.sh:
-#       facter
+#    for lftools:
+#        libxml2-devel
+#        libxslt-devel
+#        xmlstarlet
+#    for lf-env.sh:
+#        facter
 #   from packer/provision/baseline.sh:
-#       deltarpm
-#       unzip
-#       xz
-#       python3-pip
-#       git
-#       git-review
-#       perl-XML-XPath
-#       make
-#       wget
+#        deltarpm
+#        unzip
+#        xz
+#        python3-pip
+#        git
+#        git-review
+#        perl-XML-XPath
+#        make
+#        wget
 #
-# TODO: Fix broken project requirement install targets
+# TODO:  Fix broken project requirement install targets
+#        graphviz           for 'make bootstrap-doxygen' (VPP)
+#        doxygen            for 'make doxygen' (VPP)
+#        enchant            for 'make docs' (VPP)
+#        libffi-devel       for python cffi install (Ubuntu20.04/VPP/aarch64)
+#        libpcap-devel      for python pypcap install (CSIT)
+#        lapack-devel       for python numpy/scipy (CSIT/aarch64)
+#        openblas-devel     for python numpy/scipy (CSIT/aarch64)
+#        sshpass            for CSIT
 #
-#   graphviz           for 'make bootstrap-doxygen' (VPP)
-#   doxygen            for 'make doxygen' (VPP)
-#   enchant            for 'make docs' (VPP)
-#   libffi-devel       for python cffi install (Ubuntu20.04/VPP/aarch64)
-#   libpcap-devel      for python pypcap install (CSIT)
-#   lapack-devel       for python numpy/scipy (CSIT/aarch64)
-#   openblas-devel     for python numpy/scipy (CSIT/aarch64)
-#   sshpass            for CSIT
-#
-RUN export LC_ALL=C.UTF8 \\
-    && dnf update -y \\
-    && dnf install -y \\
-        dnf-plugins-core \\
-        epel-release \\
-    && dnf config-manager --set-enabled \$(dnf repolist all 2> /dev/null | grep -i powertools | cut -d' ' -f1) --set-enabled epel \\
-    && dnf repolist all \\
-    && dnf clean all
-RUN export LC_ALL=C.UTF8 \\
-    && dnf update -y \\
-    && dnf install -y \\
+RUN dnf update -y \\
+  && dnf install -y \\
+         dnf-plugins-core \\
+         epel-release \\
+  && dnf config-manager --set-enabled \$(dnf repolist all 2> /dev/null | grep -i powertools | cut -d' ' -f1) --set-enabled epel \\
+  && dnf repolist all \\
+  && dnf clean all
+RUN dnf install -y \\
         dnf-utils \\
+        bind-utils \\
         doxygen \\
         enchant \\
         emacs \\
@@ -137,12 +119,10 @@ RUN export LC_ALL=C.UTF8 \\
         libxslt-devel \\
         make \\
         mawk \\
-        mock \\
         openblas-devel \\
         perl \\
         perl-XML-XPath \\
         python3-pip \\
-        rake \\
         rsync \\
         ruby-devel \\
         sshpass \\
@@ -153,24 +133,76 @@ RUN export LC_ALL=C.UTF8 \\
         wget \\
         xmlstarlet \\
         xz \\
-    && dnf clean all
+   && curl -s https://packagecloud.io/install/repositories/fdio/master/script.rpm.sh | bash \\
+   && curl -fsSL https://get.docker.com | sh \\
+   && dnf clean all
 
 # Install OS packages for project branches
 #
 RUN dbld_vpp_install_packages.sh \\
-    && dbld_install_docker.sh \\
-    && dbld_csit_install_packages.sh \\
-    && dbld_lfit_requirements.sh \\
-    && dnf clean all
+  && dbld_csit_install_packages.sh \\
+  && dnf clean all
+EOF
+}
+
+builder_generate_dnf_dockerfile() {
+    local executor_class="$1"
+    local executor_os_name="$2"
+    local from_image="$3"
+    local executor_image="$4"
+
+    generate_dnf_dockerfile_common "$executor_class" "$executor_image"
+    cat <<EOF >>"$DOCKERFILE"
+
+# Install LF-IT requirements
+ENV LF_VENV="/root/lf-venv"
+RUN dbld_lfit_requirements.sh \\
+  && dnf clean all
+
+# Install packagecloud requirements
+RUN gem install package_cloud \\
+  && curl -s https://packagecloud.io/install/repositories/fdio/master/script.rpm.sh | bash
 
 # CI Runtime Environment
 WORKDIR /
-ENV VPP_ZOMBIE_NOCHECK=1
-RUN gem install package_cloud \\
-    && curl -s https://packagecloud.io/install/repositories/fdio/master/script.rpm.sh | sudo bash
+ENV VPP_ZOMBIE_NOCHECK="1"
+# TODO: Mount ccache volume into docker container, then remove this.
+ENV CCACHE_DISABLE="1"
+EOF
+    generate_dnf_dockerfile_clean
+}
 
-# Clean up
-RUN dbld_dump_build_logs.sh \\
-    && rm -rf /tmp/*
+csit_generate_dnf_dockerfile() {
+    echo_log "ERROR: ${FUNCNAME[0]} TBD!"
+    exit 1
+}
+
+csit_dut_generate_dnf_dockerfile() {
+    echo_log "ERROR: ${FUNCNAME[0]} TBD!"
+    exit 1
+}
+
+csit_shim_generate_dnf_dockerfile() {
+    echo_log "ERROR: ${FUNCNAME[0]} TBD!"
+    exit 1
+}
+
+generate_dnf_dockerfile() {
+    local executor_class="$1"
+    local executor_os_name="$2"
+    local from_image="$3"
+    local executor_image="$4"
+
+    cat <<EOF  >"$DOCKERIGNOREFILE"
+**/__pycache__
+*.pyc
+EOF
+    cat <<EOF  >"$DOCKERFILE"
+FROM $from_image AS ${executor_class}-executor-image
+LABEL Description="FD.io CI '$executor_class' executor docker image for $executor_os_name/$OS_ARCH"
+LABEL Vendor="fd.io"
+LABEL Version="$DOCKER_TAG"
 EOF
+    ${executor_class}_generate_dnf_dockerfile "$executor_class" \
+       "$executor_os_name" "$from_image" "$executor_image"
 }
index 5ed538a..f245e2d 100644 (file)
@@ -23,6 +23,15 @@ alias lib_vpp_imported=true
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
 . $CIMAN_DOCKER_SCRIPTS/lib_common.sh
 
+
+VPP_SUPPORTED_EXECUTOR_CLASSES="builder"
+vpp_supported_executor_class() {
+    if ! grep -q "${1:-}" <<< $VPP_SUPPORTED_EXECUTOR_CLASSES ; then
+        return 1
+    fi
+    return 0
+}
+
 make_vpp() {
     local target=$1
     local branch=${2:-"master"}
@@ -71,11 +80,13 @@ make_vpp_test() {
 }
 
 docker_build_setup_vpp() {
-    if [ ! -d "$DOCKER_VPP_DIR" ] ; then
-        echo_log "Cloning VPP into $DOCKER_VPP_DIR..."
-        git clone -q https://gerrit.fd.io/r/vpp $DOCKER_VPP_DIR
+    if vpp_supported_executor_class "$EXECUTOR_CLASS" ; then
+        if [ ! -d "$DOCKER_VPP_DIR" ] ; then
+            echo_log "Cloning VPP into $DOCKER_VPP_DIR..."
+            git clone -q https://gerrit.fd.io/r/vpp $DOCKER_VPP_DIR
+        fi
+        clean_git_repo $DOCKER_VPP_DIR
     fi
-    clean_git_repo $DOCKER_VPP_DIR
 }
 
 # Branches must be listed in chronological order -- oldest stable branch
index dfc34b0..b24b229 100644 (file)
@@ -1,7 +1,7 @@
 # lib_yum.sh - Docker build script yum library.
 #              For import only.
 
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 if [ -n "$(alias lib_yum_imported 2> /dev/null)" ] ; then
     return 0
 fi
-alias lib_yum_imported=true
+alias lib_yum_imported="true"
 
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
-. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh"
 
 dump_yum_package_list() {
     branchname="$(echo $branch | sed -e 's,/,_,')"
@@ -30,82 +30,69 @@ dump_yum_package_list() {
 }
 
 yum_install_packages() {
-    yum install -y $@
+    yum install -y "$@"
 }
 
-yum_install_docker_os_package_dependancies() {
-    yum_install_packages yum-utils
-}
+generate_yum_dockerfile_common() {
+    local executor_class="$1"
+    local executor_image="$2"
 
-yum_install_docker() {
-    yum-config-manager --add-repo \
-                       https://download.docker.com/linux/${OS_ID}/docker-ce.repo
-    yum-config-manager --enablerepo=docker-ce-stable
-    yum_install_packages docker-ce docker-ce-cli containerd.io
-}
+    cat <<EOF >>"$DOCKERFILE"
 
-generate_yum_dockerfile() {
-    local executor_os_name=$1
-    local from_image=$2
-    local executor_image=$3
-    local from_image_os_id="$(echo $from_image | cut -d: -f2)"
-
-    cat <<EOF >$DOCKERFILE
-FROM $from_image AS executor-image
-LABEL Description="FD.io CI executor docker image for $executor_os_name/$OS_ARCH"
-LABEL Vendor="fd.io"
-LABEL Version="$DOCKER_TAG"
+# Create download dir to cache external tarballs
+WORKDIR $DOCKER_DOWNLOADS_DIR
+
+# Copy-in temporary build tree containing
+# ci-management, vpp, & csit git repos
+WORKDIR $DOCKER_BUILD_DIR
+COPY . .
 
 # Build Environment Variables
 ENV FDIOTOOLS_IMAGE=$executor_image
-ENV LC_ALL=en_US.UTF-8
+ENV FDIOTOOLS_EXECUTOR_CLASS=$executor_class
 ENV CIMAN_ROOT="$DOCKER_CIMAN_ROOT"
 ENV PATH=$PATH:$DOCKER_CIMAN_ROOT/docker/scripts
 ENV LF_VENV="/root/lf-venv"
 
-# Copy-in build tree containing
-# ci-management, vpp, & csit git repos
-WORKDIR $DOCKER_BUILD_DIR
-COPY . .
+# Configure locales
+ENV LANG=en_US.UTF-8 LANGUAGE=en_US LC_ALL=en_US.UTF-8
 
 # Install baseline packages (minimum build & utils).
 #
 # ci-management global-jjb requirements:
-#   for lftools:
-#       libxml2-devel
-#       libxslt-devel
-#       xmlstarlet
-#   for lf-env.sh:
-#       facter
+#    for lftools:
+#        libxml2-devel
+#        libxslt-devel
+#        xmlstarlet
+#    for lf-env.sh:
+#        facter
 #   from packer/provision/baseline.sh:
-#       deltarpm
-#       unzip
-#       xz
-#       python3-pip
-#       git
-#       git-review
-#       perl-XML-XPath
-#       make
-#       wget
+#        deltarpm
+#        unzip
+#        xz
+#        python3-pip
+#        git
+#        git-review
+#        perl-XML-XPath
+#        make
+#        wget
 #
-# TODO: Fix broken project requirement install targets
+# TODO:  Fix broken project requirement install targets
+#        graphviz           for 'make bootstrap-doxygen' (VPP)
+#        doxygen            for 'make doxygen' (VPP)
+#        enchant            for 'make docs' (VPP)
+#        libffi-devel       for python cffi install (Ubuntu20.04/VPP/aarch64)
+#        libpcap-devel      for python pypcap install (CSIT)
+#        liblapack-devel    for python numpy/scipy (CSIT/aarch64)
+#        libopenblas-devel  for python numpy/scipy (CSIT/aarch64)
+#        sshpass            for CSIT
 #
-#   graphviz           for 'make bootstrap-doxygen' (VPP)
-#   doxygen            for 'make doxygen' (VPP)
-#   enchant            for 'make docs' (VPP)
-#   libffi-devel       for python cffi install (Ubuntu20.04/VPP/aarch64)
-#   libpcap-devel      for python pypcap install (CSIT)
-#   liblapack-devel    for python numpy/scipy (CSIT/aarch64)
-#   libopenblas-devel  for python numpy/scipy (CSIT/aarch64)
-#   sshpass            for CSIT
-#
-RUN yum update -y \\
-    && yum install -y \\
-        epel-release \\
-    && yum clean all
 RUN yum update -y \\
-    && yum install -y \\
+  && yum install -y epel-release \\
+  && yum clean all
+RUN yum install -y \\
         yum-utils \\
+        bind-utils \\
         deltarpm \\
         doxygen \\
         enchant \\
@@ -127,7 +114,6 @@ RUN yum update -y \\
         libxslt-devel \\
         make \\
         mawk \\
-        mock \\
         perl \\
         perl-XML-XPath \\
         python3-pip \\
@@ -141,25 +127,85 @@ RUN yum update -y \\
         wget \\
         xmlstarlet \\
         xz \\
-    && yum clean all
+  && curl -s https://packagecloud.io/install/repositories/fdio/master/script.rpm.sh | bash \\
+  && curl -fsSL https://get.docker.com | sh \\
+  && yum clean all
 
 # Install packages for all project branches
 #
-RUN yum update -y \\
-    && dbld_install_docker.sh \\
-    && dbld_vpp_install_packages.sh \\
-    && dbld_csit_install_packages.sh \\
-    && dbld_lfit_requirements.sh \\
-    && yum clean all
+RUN dbld_vpp_install_packages.sh \\
+  && dbld_csit_install_packages.sh \\
+  && yum clean all
+EOF
+}
 
-# CI Runtime Environment
-WORKDIR /
-ENV VPP_ZOMBIE_NOCHECK=1
-RUN gem install package_cloud \\
-    && curl -s https://packagecloud.io/install/repositories/fdio/master/script.rpm.sh | sudo bash
+generate_yum_dockerfile_clean() {
+    cat <<EOF >>"$DOCKERFILE"
 
 # Clean up
 RUN dbld_dump_build_logs.sh \\
-    && rm -rf /tmp/*
+  && rm -rf "/tmp/*"
+EOF
+}
+
+builder_generate_yum_dockerfile() {
+    local executor_class="$1"
+    local executor_os_name="$2"
+    local from_image="$3"
+    local executor_image="$4"
+
+    generate_yum_dockerfile_common "$executor_class" "$executor_image"
+    cat <<EOF >>"$DOCKERFILE"
+
+# Install LF-IT requirements
+ENV LF_VENV="/root/lf-venv"
+RUN dbld_lfit_requirements.sh \\
+  && yum clean all
+
+# Install packagecloud requirements
+RUN gem install package_cloud \\
+  && curl -s https://packagecloud.io/install/repositories/fdio/master/script.rpm.sh | bash
+
+# CI Runtime Environment
+WORKDIR /
+ENV VPP_ZOMBIE_NOCHECK="1"
+# TODO: Mount ccache volume into docker container, then remove this.
+ENV CCACHE_DISABLE="1"
+EOF
+    generate_yum_dockerfile_clean
+}
+
+csit_generate_yum_dockerfile() {
+    echo_log "ERROR: ${FUNCNAME[0]} TBD!"
+    exit 1
+}
+
+csit_dut_generate_yum_dockerfile() {
+    echo_log "ERROR: ${FUNCNAME[0]} TBD!"
+    exit 1
+}
+
+csit_shim_generate_yum_dockerfile() {
+    echo_log "ERROR: ${FUNCNAME[0]} TBD!"
+    exit 1
+}
+
+generate_yum_dockerfile() {
+    local executor_class="$1"
+    local executor_os_name="$2"
+    local from_image="$3"
+    local executor_image="$4"
+
+    cat <<EOF  >"$DOCKERIGNOREFILE"
+**/__pycache__
+*.pyc
+EOF
+    cat <<EOF  >"$DOCKERFILE"
+FROM $from_image AS ${executor_class}-executor-image
+LABEL Description="FD.io CI '$executor_class' executor docker image for $executor_os_name/$OS_ARCH"
+LABEL Vendor="fd.io"
+LABEL Version="$DOCKER_TAG"
 EOF
+    ${executor_class}_generate_yum_dockerfile "$executor_class" \
+        "$executor_os_name" "$from_image" "$executor_image"
 }
index 323dabf..187b640 100755 (executable)
@@ -1,6 +1,6 @@
 #! /bin/bash
 
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -17,12 +17,12 @@ set -euo pipefail
 shopt -s extglob
 
 # Log all output to stdout & stderr to a log file
-logname="/tmp/$(basename $0).$(date +%Y_%m_%d_%H%M%S).log"
+logname="/tmp/$(basename $0).$(date -u +%Y_%m_%d_%H%M%S).log"
 echo -e "\n*** Logging output to $logname ***\n"
 exec > >(tee -a $logname) 2>&1
 
 export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
-. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh"
 
 # Global variables
 long_bar="################################################################"
@@ -79,7 +79,7 @@ push_to_dockerhub() {
     for image in "$@" ; do
         set +e
         echo "Pushing '$image' to docker hub..."
-        if ! docker push $image ; then
+        if ! docker push "$image" ; then
             echo "ERROR: 'docker push $image' failed!"
             exit 1
         fi
@@ -182,6 +182,17 @@ get_all_tags_from_dockerhub() {
     echo "$long_bar"
 }
 
+verify_image_version_date_format() {
+    version="$1"
+    # TODO: Remove regex1 when legacy nomenclature is no longer on docker hub.
+    local regex1="^[0-9]{4}_[0-1][0-9]_[0-3][0-9]_[0-2][0-9][0-5][0-9][0-5][0-9]$"
+    local regex2="^[0-9]{4}_[0-1][0-9]_[0-3][0-9]_[0-2][0-9][0-5][0-9][0-5][0-9]_UTC$"
+    if [[ "$version" =~ $regex1 ]] || [[ "$version" =~ $regex2 ]]; then
+        return 0
+    fi
+    return 1
+}
+
 verify_image_name() {
     image_not_found=""
     # Invalid user
@@ -192,9 +203,7 @@ verify_image_name() {
     # Invalid version
     if [ -z "$image_not_found" ] \
            && [ "$image_version" != "prod" ] \
-           && ! [[ "$image_version" =~ \
-           ^[0-9]{4}_[0-1][0-9]_[0-3][0-9]_[0-2][0-9][0-5][0-9][0-5][0-9]$ ]]
-    then
+           && ! verify_image_version_date_format "$image_version"  ]] ; then
         image_not_found="true"
         echo "ERROR: invalid version '$image_version' in '$image_name_new'!"
     fi
@@ -213,7 +222,7 @@ verify_image_name() {
 docker_tag_image() {
     echo ">>> docker tag $1 $2"
     set +e
-    docker tag $1 $2
+    docker tag "$1" "$2"
     local retval="$?"
     set -e
     if [ "$retval" -ne "0" ] ; then
@@ -224,7 +233,7 @@ docker_tag_image() {
 docker_rmi_tag() {
     set +e
     echo ">>> docker rmi $1"
-    docker rmi $1
+    docker rmi "$1"
     local retval="$?"
     set -e
     if [ "$retval" -ne "0" ] ; then
@@ -260,8 +269,8 @@ inspect_images() {
 
 revert_prod_image() {
     inspect_images "EXISTING "
-    docker_tag_image $docker_id_prod $image_name_prev
-    docker_tag_image $docker_id_prev $image_name_prod
+    docker_tag_image "$docker_id_prod" "$image_name_prev"
+    docker_tag_image "$docker_id_prev" "$image_name_prod"
     get_image_id_tags
     inspect_images "REVERTED "
 
@@ -290,25 +299,25 @@ revert_prod_image() {
 
 promote_new_image() {
     inspect_images "EXISTING "
-    docker_tag_image $docker_id_prod $image_name_prev
-    docker_tag_image $docker_id_new $image_name_prod
+    docker_tag_image "$docker_id_prod" "$image_name_prev"
+    docker_tag_image "$docker_id_new" "$image_name_prod"
     get_image_id_tags
     inspect_images "PROMOTED "
 
     local yn=""
     while true; do
         read -p "Push promoted tags to '$image_user/$image_repo' (yes/no)? " yn
-        case ${yn:0:1} in
+        case "${yn:0:1}" in
             y|Y )
                 break ;;
             n|N )
                 echo -e "\nABORTING PROMOTION!\n"
-                docker_tag_image $docker_id_prev $image_name_prod
+                docker_tag_image "$docker_id_prev" "$image_name_prod"
                 local restore_both="$(echo $restore_cmd | mawk '{print $5}')"
                 if [[ -n "$restore_both" ]] ; then
-                    docker_tag_image $image_realname_prev $image_name_prev
+                    docker_tag_image "$image_realname_prev" "$image_name_prev"
                 else
-                    docker_rmi_tag $image_name_prev
+                    docker_rmi_tag "$image_name_prev"
                     image_name_prev=""
                     docker_id_prev=""
                 fi
@@ -320,12 +329,12 @@ promote_new_image() {
         esac
     done
     echo
-    push_to_dockerhub $image_name_new $image_name_prev $image_name_prod
+    push_to_dockerhub "$image_name_new" "$image_name_prev" "$image_name_prod"
     inspect_images ""
     echo_restore_cmd
 }
 
-must_be_run_as_root
+must_be_run_as_root_or_docker_group
 
 # Validate arguments
 num_args="$#"
diff --git a/jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804-l.yaml b/jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804-l.yaml
deleted file mode 100644 (file)
index 747f61a..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-templates:
-  - constraints:
-      - ltarget: "^${attr.cpu.arch}"
-        operand: "="
-        rtarget: "amd64"
-      - ltarget: "^${node.class}"
-        operand: "="
-        rtarget: "builder"
-    cpu: 20000
-    image: "snergster/vpp-ubuntu18"
-    labels: "ubuntu1804-l"
-    memory: 32000
-    prefix: "prod-amd"
diff --git a/jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804-us.yaml b/jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804-us.yaml
deleted file mode 100644 (file)
index b40b58e..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-templates:
-  - constraints:
-      - ltarget: "^${attr.cpu.arch}"
-        operand: "="
-        rtarget: "amd64"
-      - ltarget: "^${node.class}"
-        operand: "="
-        rtarget: "builder"
-    cpu: 14000
-    image: "snergster/vpp-ubuntu18"
-    labels: "ubuntu1804-us"
-    memory: 14000
-    prefix: "prod-amd"
diff --git a/jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804arm-s.yaml b/jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804arm-s.yaml
deleted file mode 100644 (file)
index 5f3afcc..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-templates:
-  - image: "snergster/vpp-arm-ubuntu18"
-    labels: "ubuntu1804arm-s"
-    memory: 16000
-    prefix: "prod-arm"
diff --git a/jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804arm-us.yaml b/jenkins-config/clouds/nomad/FDIONOMAD/ubuntu1804arm-us.yaml
deleted file mode 100644 (file)
index f898934..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-templates:
-  - image: "snergster/vpp-arm-ubuntu18"
-    labels: "ubuntu1804arm-us"
-    prefix: "prod-arm"
diff --git a/jenkins-config/clouds/nomad/FDIONOMAD/vpp-csit-arm-ubuntu18.yaml b/jenkins-config/clouds/nomad/FDIONOMAD/vpp-csit-arm-ubuntu18.yaml
deleted file mode 100644 (file)
index 9ab4631..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-templates:
-  - constraints:
-      - ltarget: "^${node.class}"
-        operand: "="
-        rtarget: "csitarm"
-      - ltarget: "^${attr.cpu.arch}"
-        operand: "="
-        rtarget: "arm64"
-    image: "snergster/vpp-arm-ubuntu18"
-    labels: "vpp-csit-arm-ubuntu18"
-    prefix: "prod-csit-arm"
diff --git a/jenkins-config/clouds/nomad/FDIONOMAD/vpp-csit-device.yaml b/jenkins-config/clouds/nomad/FDIONOMAD/vpp-csit-device.yaml
deleted file mode 100644 (file)
index ef4f9ed..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-templates:
-  - constraints:
-      - ltarget: "^${node.class}"
-        operand: "="
-        rtarget: "csit"
-      - ltarget: "^${attr.cpu.arch}"
-        operand: "="
-        rtarget: "amd64"
-    cpu: 10000
-    image: "snergster/vpp-ubuntu18"
-    labels: "vpp-csit-device vpp-csit-ubuntu18"
-    memory: 18000
-    prefix: "prod-csit"
diff --git a/jenkins-config/clouds/nomad/FDIONOMAD/vpp-csit-ubuntu18.yaml b/jenkins-config/clouds/nomad/FDIONOMAD/vpp-csit-ubuntu18.yaml
deleted file mode 100644 (file)
index ef4f9ed..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-templates:
-  - constraints:
-      - ltarget: "^${node.class}"
-        operand: "="
-        rtarget: "csit"
-      - ltarget: "^${attr.cpu.arch}"
-        operand: "="
-        rtarget: "amd64"
-    cpu: 10000
-    image: "snergster/vpp-ubuntu18"
-    labels: "vpp-csit-device vpp-csit-ubuntu18"
-    memory: 18000
-    prefix: "prod-csit"
index c44a4e0..8031d42 100644 (file)
         os: 'ubuntu2004'
 # [end] VPP-AARCH64 PROJECT
 
-# VPP-CSIT-VERIFY PROJECT
+# VPP-CSIT-VERIFY-DEVICE PROJECT
 - project:
-    name: vpp-csit-verify
+    name: vpp-csit-verify-device
     jobs:
       - 'vpp-csit-verify-device-perpatch':
           make-parallel-jobs: '32'
           exclude:
-            - device-node-arch: '1n-tx2'
+            - testbed-arch: '1n-tx2'
       - 'vpp-csit-verify-device-periodic':
           make-parallel-jobs: '32'
           periodicity: 'H * * * *'
           exclude:
             - stream: '2009'
             - stream: '2101'
-            - device-node-arch: '1n-skx'
-      - 'vpp-csit-verify-perf-{stream}-{node-arch}'
+            - testbed-arch: '1n-skx'
     project: 'vpp'
-    os: ubuntu1804
-    device-executor: 'vpp-csit-device'
-    executor: '{os}-us'
     skip-vote: 'false'
     stream:
       - master:
           branch: 'stable/2101'
           branch-refspec: ''
           repo-stream-part: 'stable.2101'
-    device-node-arch:
-      - 1n-skx
+    testbed-arch:
+      - 1n-skx:
+          os: 'ubuntu2004'
+          executor-arch: 'x86_64'
       - 1n-tx2:
-          device-executor: 'vpp-csit-arm-ubuntu18'
-    node-arch:
-      - 2n-clx
-      - 2n-skx
-      - 2n-zn2
-      - 3n-skx
-      - 3n-hsw
-      - 2n-dnv
-      - 3n-dnv
+          os: 'ubuntu2004'
+          executor-arch: 'aarch64'
+
+# [end] VPP-CSIT-VERIFY-DEVICE PROJECT
+
+# VPP-CSIT-VERIFY-PERF PROJECT
+- project:
+    name: vpp-csit-verify-perf
+    jobs:
+      - 'vpp-csit-verify-perf-{stream}-{os}-{executor-arch}-{testbed-arch}'
+    project: 'vpp'
+    skip-vote: 'false'
+    stream:
+      - master:
+          branch: 'master'
+          branch-refspec: ''
+          repo-stream-part: 'master'
+      - '2009':
+          branch: 'stable/2009'
+          branch-refspec: ''
+          repo-stream-part: 'stable.2009'
+      - '2101':
+          branch: 'stable/2101'
+          branch-refspec: ''
+          repo-stream-part: 'stable.2101'
+    testbed-arch:
+      - 2n-clx:
+          os: 'ubuntu2004'
+          executor-arch: 'x86_64'
+      - 2n-skx:
+          os: 'ubuntu2004'
+          executor-arch: 'x86_64'
+      - 2n-zn2:
+          os: 'ubuntu2004'
+          executor-arch: 'x86_64'
+      - 3n-skx:
+          os: 'ubuntu2004'
+          executor-arch: 'x86_64'
+      - 3n-hsw:
+          os: 'ubuntu2004'
+          executor-arch: 'x86_64'
+      - 2n-dnv:
+          os: 'ubuntu2004'
+          executor-arch: 'x86_64'
+      - 3n-dnv:
+          os: 'ubuntu2004'
+          executor-arch: 'x86_64'
       - 3n-tsh:
-          executor: 'ubuntu1804arm-s'
+          os: 'ubuntu2004'
+          executor-arch: 'aarch64'
           make-parallel-jobs: '16'
       - 2n-tx2:
-          executor: 'ubuntu1804arm-s'
+          os: 'ubuntu2004'
+          executor-arch: 'aarch64'
           make-parallel-jobs: '16'
 
-# [end] VPP-CSIT-VERIFY PROJECT
+# [end] VPP-CSIT-VERIFY-PERF PROJECT
 
 # VPP-CSIT-API-CRC PROJECT
 #
 - project:
     name: vpp-csit-api-crc
     jobs:
-      - 'vpp-csit-verify-api-crc-{stream}'
+      - 'vpp-csit-verify-api-crc-{stream}-{os}-{executor-arch}'
     project: 'vpp'
-    executor: 'vpp-csit-device'
+    executor-arch: 'x86_64'
     skip-vote: 'false'
     stream:
       - master:
           branch: 'stable/2101'
           branch-refspec: ''
           repo-stream-part: 'stable.2101'
-    os: ubuntu1804
+    os: ubuntu2004
 # [end] VPP-CSIT-API-CRC PROJECT
 
 # LF RELENG VPP PROJECTS
 
 # VPP-CSIT-VERIFY-DEVICE-PERIODIC JOB TEMPLATE
 - job-template:
-    name: 'vpp-csit-verify-device-{stream}-{device-node-arch}'
+    name: 'vpp-csit-verify-device-{stream}-{os}-{executor-arch}-{testbed-arch}'
     id: 'vpp-csit-verify-device-periodic'
     description: |
       <ul>
           <li>executor
               <ul>
-                  <li>{device-executor}
+                  <li>csit-builder-{os}-prod-{executor-arch}
+              </ul>
+          <li>testbed architecture
+              <ul>
+                  <li>{testbed-arch}
               </ul>
       </ul>
 
     project-type: freestyle
-    node: '{device-executor}'
+    node: 'csit-builder-{os}-prod-{executor-arch}'
     concurrent: true
     archive-artifacts: '**/csit_current/**/*.*'
     latest-only: false
 
 # VPP-CSIT-VERIFY-DEVICE-PERPATCH JOB TEMPLATE
 - job-template:
-    name: 'vpp-csit-verify-device-{stream}-{device-node-arch}'
+    name: 'vpp-csit-verify-device-{stream}-{os}-{executor-arch}-{testbed-arch}'
     id: 'vpp-csit-verify-device-perpatch'
     description: |
       <ul>
           <li>executor
               <ul>
-                  <li>{device-executor}
+                  <li>csit-builder-{os}-prod-{executor-arch}
+              </ul>
+          <li>testbed architecture
+              <ul>
+                  <li>{testbed-arch}
               </ul>
       </ul>
 
     project-type: freestyle
-    node: '{device-executor}'
+    node: 'csit-builder-{os}-prod-{executor-arch}'
     concurrent: true
     archive-artifacts: '**/csit_current/**/*.*'
     latest-only: false
 
 # VPP-CSIT-VERIFY-PERF JOB TEMPLATE
 - job-template:
-    name: 'vpp-csit-verify-perf-{stream}-{node-arch}'
+    name: 'vpp-csit-verify-perf-{stream}-{os}-{executor-arch}-{testbed-arch}'
     description: |
       <ul>
           <li>executor
               <ul>
-                  <li>{executor}
+                  <li>builder-{os}-prod-{executor-arch}
+              </ul>
+          <li>testbed architecture
+              <ul>
+                  <li>{testbed-arch}
               </ul>
           <li>Objective
           Contrary to csit-vpp job, this also measures the parent performance,
       </ul>
 
     project-type: freestyle
-    node: '{executor}'
+    node: 'builder-{os}-prod-{executor-arch}'
     concurrent: true
     archive-artifacts: >
       **/csit_current/**/*.*
       - gerrit-trigger-csit-perftest:
           project: '{project}'
           branch: '{branch}'
-          trigger-word: 'perftest-{node-arch}'
+          trigger-word: 'perftest-{testbed-arch}'
           skip-vote: true
 
     builders:
 
 # VPP-CSIT-VERIFY-API-CRC JOB TEMPLATE
 - job-template:
-    name: 'vpp-csit-verify-api-crc-{stream}'
+    name: 'vpp-csit-verify-api-crc-{stream}-{os}-{executor-arch}'
 
     project-type: freestyle
-    node: '{os}-us'
+    node: 'builder-{os}-prod-{executor-arch}'
     concurrent: true
     latest-only: false