1 # lib_csit.sh - Docker build script CSIT library.
4 # Copyright (c) 2023 Cisco and/or its affiliates.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at:
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
17 # Don't import more than once.
18 if [ -n "$(alias lib_csit_imported 2> /dev/null)" ] ; then
21 alias lib_csit_imported=true
23 export CIMAN_DOCKER_SCRIPTS="${CIMAN_DOCKER_SCRIPTS:-$(dirname $BASH_SOURCE)}"
24 . "$CIMAN_DOCKER_SCRIPTS/lib_common.sh"
25 . "$CIMAN_DOCKER_SCRIPTS/lib_apt.sh"
27 CSIT_SUPPORTED_EXECUTOR_CLASSES="builder csit_dut"
28 csit_supported_executor_class() {
29 if ! grep -q "${1:-}" <<< "$CSIT_SUPPORTED_EXECUTOR_CLASSES" ; then
37 ubuntu-22.04) return 0 ;;
43 csit_checkout_branch_for_vpp() {
45 local csit_dir="$DOCKER_CSIT_DIR"
46 local csit_bash_function_dir="$csit_dir/resources/libraries/bash/function"
48 # import checkout_csit_for_vpp() if not defined
49 set +e && [ -z "$(declare -f checkout_csit_for_vpp)" ] \
50 && source "$csit_bash_function_dir/branch.sh"
51 CSIT_DIR="$csit_dir" checkout_csit_for_vpp "$vpp_branch"
53 # shellcheck disable=SC2034
54 csit_branch="$(git branch | grep -e '^*' | mawk '{print $2}')"
57 csit_install_packages() {
60 branchname="$(echo $branch | sed -e 's,/,_,')"
61 local csit_dir="$DOCKER_CSIT_DIR"
62 local csit_ansible_dir="$csit_dir/fdio.infra.ansible"
63 if [ ! -d "$csit_ansible_dir" ] ; then
64 csit_ansible_dir="$csit_dir/resources/tools/testbed-setup/ansible"
66 local bld_log="$DOCKER_BUILD_LOG_DIR/$FDIOTOOLS_IMAGENAME"
67 bld_log="${bld_log}-$branchname-csit_install_packages-bld.log"
70 python3 -m pip install pyyaml
72 local exclude_roles="-e calibration -e kernel -e mellanox -e nomad -e consul"
73 [ "$OS_ARCH" = "aarch64" ] && exclude_roles="$exclude_roles -e iperf"
75 # Not in double quotes to let bash remove newline characters
77 yaml_files="$(grep -r packages_by $csit_ansible_dir | cut -d: -f1 | sort -u | grep -v $exclude_roles)"
78 packages="$(dbld_csit_find_ansible_packages.py --$OS_ID --$OS_ARCH $yaml_files)"
79 packages="${packages/bionic /}"
80 packages="${packages/focal /}"
81 packages="${packages/libmbedcrypto1/libmbedcrypto3}"
82 packages="${packages/libmbedtls10/libmbedtls12}"
83 packages="$(echo ${packages//python\-/python3\-} | tr ' ' '\n' | sort -u | xargs)"
85 if [ -n "$packages" ] ; then
88 apt_install_packages $packages
91 apt_install_packages $packages
94 echo "Unsupported OS ($OS_ID): CSIT packages NOT INSTALLED!"
102 # ensure PS1 is defined (used by virtualenv activate script)
104 CSIT_DIR="$DOCKER_CSIT_DIR"
106 if [ -f "$CSIT_DIR/VPP_REPO_URL" ] \
107 && [ -f "$CSIT_DIR/requirements.txt" ]; then
110 # use bash variable substitution to replace '/' with '_' to convert from
111 # vpp to csit branch name nomenclature
112 branchname="${branch////_}"
113 local csit_bash_function_dir="$CSIT_DIR/resources/libraries/bash/function"
114 local bld_log="$DOCKER_BUILD_LOG_DIR"
115 bld_log="${bld_log}/$FDIOTOOLS_IMAGENAME-$branchname-csit_pip_cache-bld.log"
116 local pip_cmd="python3 -m pip --disable-pip-version-check"
117 export PYTHONPATH=$CSIT_DIR
119 description="Install CSIT python packages from $branch branch"
120 echo_log " Starting $description..."
122 rm -rf "$PYTHONPATH/env"
124 # Virtualenv version is pinned in common.sh in newer csit branches.
125 # (note: xargs removes leading/trailing spaces)
126 local common_sh="$csit_bash_function_dir/common.sh"
127 install_virtualenv="$(grep 'virtualenv' $common_sh | grep pip | grep install | cut -d'|' -f1 | xargs)"
129 virtualenv --no-download --python="$(which python3)" "$CSIT_DIR/env"
130 source "$CSIT_DIR/env/bin/activate"
132 if [ "$OS_ARCH" = "aarch64" ] ; then
134 numpy_ver="$(grep numpy $PYTHONPATH/requirements.txt)"
135 [ -n "$numpy_ver" ] && $pip_cmd install $numpy_ver 2>&1 | \
139 # TODO: Update CSIT release branches to PyYAML==6.0.1 or greater
140 # then remove this workaround: https://github.com/yaml/pyyaml/issues/736
141 if grep -q 'PyYAML==5.4.1' "$PYTHONPATH/requirements.txt" ; then
143 constraintfile="/tmp/constraint.txt"
145 # create a constraint file that limits the Cython version to one that should work
146 echo 'Cython < 3.0' > "$constraintfile"
148 # seed pip's local wheel cache with a PyYAML wheel
149 PIP_CONSTRAINT="$constraintfile" $pip_cmd wheel PyYAML==5.4.1
151 rm -f "$constraintfile"
154 # Install csit python requirements
155 $pip_cmd install -r "$CSIT_DIR/requirements.txt" 2>&1 | \
157 # Install tox python requirements
158 $pip_cmd install -r "$CSIT_DIR/tox-requirements.txt" 2>&1 | \
160 # Run tox which installs pylint requirments
161 pushd "$CSIT_DIR" >& /dev/null || exit 1
163 popd >& /dev/null || exit 1
165 # Clean up virtualenv directories
169 echo_log " Completed $description!"
171 echo_log "ERROR: Missing or invalid CSIT_DIR: '$CSIT_DIR'!"
176 docker_build_setup_csit() {
177 if csit_supported_executor_class "$EXECUTOR_CLASS" ; then
178 if [ ! -d "$DOCKER_CSIT_DIR" ] ; then
179 echo_log "Cloning CSIT into $DOCKER_CSIT_DIR..."
180 git clone -q https://gerrit.fd.io/r/csit "$DOCKER_CSIT_DIR"
182 clean_git_repo "$DOCKER_CSIT_DIR"
186 csit_dut_generate_docker_build_files() {
187 local build_files_dir="$DOCKER_BUILD_FILES_DIR"
189 mkdir -p "$build_files_dir"
190 cat <<EOF >"$build_files_dir/supervisord.conf"
192 file = /tmp/supervisor.sock
195 [rpcinterface:supervisor]
196 supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
199 serverurl = unix:///tmp/supervisor.sock
202 pidfile = /tmp/supervisord.pid
203 identifier = supervisor
205 logfile = /tmp/supervisord.log
210 command = /usr/bin/vpp -c /etc/vpp/startup.conf
213 redirect_stderr = true
218 csit_builder_generate_docker_build_files() {
219 local build_files_dir="$DOCKER_BUILD_FILES_DIR"
221 local dbeg="${dashes}BEGIN"
222 local dend="${dashes}END"
224 local kd="KEY$dashes"
226 # TODO: Verify why badkey is required & figure out how to avoid it.
227 mkdir -p "$build_files_dir"
228 cat <<EOF >"$build_files_dir/badkey"
230 MIIEowIBAAKCAQEAslDXf4kZOQI8OGQQdIF8o83nBM0B4fzHLYLxxiY2rKiQ5MGM
231 mQa7p1KKzmd5/NlvFRnXefnjSDQljjPxEY7mh457rX2nXvqHD4GUXZPpBIE73rQ1
232 TViIAXdDzFXJ6ee4yX8ewmVakzYBnlUPDidkWyRnjm/xCgKUCO+CD5AH3ND0onks
233 OYAtHqhDh29/QMIKdMnK87FBxfzhInHwpqPur76zBnpw3u36ylKEymDFrO5dwzsh
234 QvDWjsYRg9ydTXubtwP6+MOpjdR1SNKxcCHKJrPrdAeJW9jg1imYmYpEHZ/P3qsL
235 Jm0hGWbFjdxZLIYIz0vN/nTalcAeqT2OWKrXuwIDAQABAoIBAQCcj1g2FOR9ZlYD
236 WPANqucJVy4/y9OcXHlwnyiyRjj47WOSRdGxRfUa2uEeikHT3ACo8TB8WwfQDGDw
237 8u/075e+az5xvAJo5OQSnD3sz4Hmv6UWSvkFuPZo+xMe5C/M2/QljiQuoBifaeqP
238 3rTCQ5ncYCFAMU7b8BmTot551Ybhu2jCbDMHU7nFHEFOvYinkwfVcaqkrVDUuH+D
239 c3NkAEH9Jz2MEYA2Va4uqFpGt5lfGiED2kMenwPa8eS5LS5HJsxkfMHGlaHXHFUb
240 D+dG/qJtSslVxdzVPgEGvzswo6TgtY1nZTQcB8U63rktFg38B7QGtOkvswAYzxyk
241 HdMIiU3RAoGBAOdIEQRcAThj9eiIFywtBgLBOSg4SoOnvELLr6lgUg2+ICmx06LQ
242 yaai1QRdOWw1VwZ6apNCD00kaUhBu+ou93yLSDnR2uYftkylhcnVuhDyIeNyb81V
243 hV2z0WuNv3aKBFlBxaq391S7WW1XxhpAAagm8fZZur73wV390EVd/hZJAoGBAMVf
244 negT2bg5PVKWvsiEU6eZ00W97tlEDLclkiZawXNnM2/c+2x1Tks6Yf1E/j2FFTB4
245 r0fesbwN346hCejtq5Bup5YEdFA3KtwT5UyeQQLFGYlCtRmBtOd10wkRS93D0tpX
246 iIqkf43Gpx6iFdvBWY5A7N+ZmojCy9zpL5TJ4G3jAoGADOGEoRuGrd9TWMoLkFhJ
247 l2mvhz/rVn3HDGlPtT06FK3cGLZgtRavxGoZNw8CHbayzBeRS/ZH5+H5Qx72GkrX
248 WcZgFWhMqrhlbMtjMiSHIl556LL86xCyRs+3ACh6211AdMAnBCUOz1dH2cEjtV6P
249 ORBCNZg1wGEIEfYK3XIorpECgYBubXfQj8KhUs0fdx3Y3Ehdni/ZdlG7F1qx4YBq
250 mx5e7d+Wd6Hn5Z3fcxO9+yrvypS3YN5YrJzuZSiuCSWdP9RcY7y5r1ZQRv1g0nTZ
251 MDWZUiNea4cddTd8xKxFB3tV4SkIZi8LustuzDVWa0Mlh4EOmP6uf6c5WxtqRsEL
252 UwORFwKBgEjZsfmZGBurjOtSrcsteulOB0D2nOqPVRWXmbSNJT/l73DkEllvVyA/
253 wdW39nyFrA2Qw1K2F+l8DkzMd/WEjmioSWCsvTkXlvrqPfByKg01zCbYy/mhRW7d
254 7sQrPOIl8ygsc3JrxmvzibdWmng1MehvpAM1ogWeTUa1lsDTNJ/6
257 chmod 600 "$build_files_dir/badkey"
258 cat <<EOF >"$build_files_dir/sshconfig"
260 StrictHostKeyChecking no
261 UserKnownHostsFile=/dev/null
265 csit_shim_generate_docker_build_files() {
266 local build_files_dir="$DOCKER_BUILD_FILES_DIR"
267 # TODO: Verify why badkey is required & figure out how to avoid it.
268 local badkey='AAAAB3NzaC1yc2EAAAADAQABAAABAQCyUNd/iRk5Ajw4ZBB0gXyjzecEzQHh/MctgvHGJjasqJDkwYyZBrunUorOZ3n82W8VGdd5+eNINCWOM/ERjuaHjnutfade+ocPgZRdk+kEgTvetDVNWIgBd0PMVcnp57jJfx7CZVqTNgGeVQ8OJ2RbJGeOb/EKApQI74IPkAfc0PSieSw5gC0eqEOHb39Awgp0ycrzsUHF/OEicfCmo+6vvrMGenDe7frKUoTKYMWs7l3DOyFC8NaOxhGD3J1Ne5u3A/r4w6mN1HVI0rFwIcoms+t0B4lb2ODWKZiZikQdn8/eqwsmbSEZZsWN3FkshgjPS83+dNqVwB6pPY5Yqte7'
270 mkdir -p "$build_files_dir"
271 # TODO: Verify why badkeypub is required & figure out how to avoid it.
272 echo "ssh-rsa $badkey ejk@bhima.local" >"$build_files_dir/badkeypub"
274 cat <<EOF >"$build_files_dir/sshconfig"
276 StrictHostKeyChecking no
277 UserKnownHostsFile=/dev/null
279 cat <<EOF >"$build_files_dir/wrapdocker"
282 # Ensure that all nodes in /dev/mapper correspond to mapped devices currently loaded by the device-mapper kernel driver
285 # First, make sure that cgroups are mounted correctly.
286 CGROUP=/sys/fs/cgroup
292 mountpoint -q \$CGROUP ||
293 mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup \$CGROUP || {
294 echo "Could not make a tmpfs mount. Did you use --privileged?"
298 if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
300 mount -t securityfs none /sys/kernel/security || {
301 echo "Could not mount /sys/kernel/security."
302 echo "AppArmor detection and --privileged mode might break."
306 # Mount the cgroup hierarchies exactly as they are in the parent system.
307 for SUBSYS in \$(cut -d: -f2 /proc/1/cgroup)
309 [ -d \$CGROUP/\$SUBSYS ] || mkdir \$CGROUP/\$SUBSYS
310 mountpoint -q \$CGROUP/\$SUBSYS ||
311 mount -n -t cgroup -o \$SUBSYS cgroup \$CGROUP/\$SUBSYS
313 # The two following sections address a bug which manifests itself
314 # by a cryptic "lxc-start: no ns_cgroup option specified" when
315 # trying to start containers withina container.
316 # The bug seems to appear when the cgroup hierarchies are not
317 # mounted on the exact same directories in the host, and in the
320 # Named, control-less cgroups are mounted with "-o name=foo"
321 # (and appear as such under /proc/<pid>/cgroup) but are usually
322 # mounted on a directory named "foo" (without the "name=" prefix).
323 # Systemd and OpenRC (and possibly others) both create such a
324 # cgroup. To avoid the aforementioned bug, we symlink "foo" to
325 # "name=foo". This shouldn't have any adverse effect.
326 echo \$SUBSYS | grep -q ^name= && {
327 NAME=\$(echo \$SUBSYS | sed s/^name=//)
328 ln -s \$SUBSYS \$CGROUP/\$NAME
331 # Likewise, on at least one system, it has been reported that
332 # systemd would mount the CPU and CPU accounting controllers
333 # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
334 # but on a directory called "cpu,cpuacct" (note the inversion
335 # in the order of the groups). This tries to work around it.
336 [ \$SUBSYS = cpuacct,cpu ] && ln -s \$SUBSYS \$CGROUP/cpu,cpuacct
339 # Note: as I write those lines, the LXC userland tools cannot setup
340 # a "sub-container" properly if the "devices" cgroup is not in its
341 # own hierarchy. Let's detect this and issue a warning.
342 grep -q :devices: /proc/1/cgroup ||
343 echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
344 grep -qw devices /proc/1/cgroup ||
345 echo "WARNING: it looks like the 'devices' cgroup is not mounted."
347 # Now, close extraneous file descriptors.
348 pushd /proc/self/fd >/dev/null
352 # Keep stdin/stdout/stderr
355 # Nuke everything else
364 # If a pidfile is still around (for example after a container restart),
365 # delete it so that docker can start.
366 rm -rf /var/run/docker.pid
368 # If we were given a PORT environment variable, start as a simple daemon;
369 # otherwise, spawn a shell as well
372 exec dockerd -H 0.0.0.0:\$PORT -H unix:///var/run/docker.sock \
375 if [ "\$LOG" == "file" ]
377 dockerd \$DOCKER_DAEMON_ARGS &>/var/log/docker.log &
379 dockerd \$DOCKER_DAEMON_ARGS &
381 (( timeout = 60 + SECONDS ))
382 until docker info >/dev/null 2>&1
384 if (( SECONDS >= timeout )); then
385 echo 'Timed out trying to connect to internal docker host.' >&2
390 [[ \$1 ]] && exec "\$@"