Not fixing .rst, .md, .yaml, conf.py, .vat, and so on.
Change-Id: Icc585d6dbebc8eb5c483b10326302571e94c614d
Signed-off-by: Vratko Polak <vrpolak@cisco.com>
13 files changed:
# to dissuade non-tox callers.
# This script runs pylint and propagates its exit code.
# to dissuade non-tox callers.
# This script runs pylint and propagates its exit code.
-# Config is taken from pylint.cfg, and proper virtualenv is assumed to be active.
+# Config is taken from pylint.cfg,
+# and proper virtualenv is assumed to be active.
# The pylint output stored to pylint.log (overwriting).
# "set -eu" handles failures from the following two lines.
# The pylint output stored to pylint.log (overwriting).
# "set -eu" handles failures from the following two lines.
repository installation was not successful."
fi
repository installation was not successful."
fi
- packages=$(apt-cache -o Dir::Etc::SourceList=${apt_fdio_repo_file} \
+ pkgs=$(apt-cache -o Dir::Etc::SourceList=${apt_fdio_repo_file} \
-o Dir::Etc::SourceParts=${apt_fdio_repo_file} dumpavail \
| grep Package: | cut -d " " -f 2 | grep vpp) || {
die "Retrieval of available VPP packages failed."
-o Dir::Etc::SourceParts=${apt_fdio_repo_file} dumpavail \
| grep Package: | cut -d " " -f 2 | grep vpp) || {
die "Retrieval of available VPP packages failed."
- for package in ${packages}; do
+ for package in ${pkgs}; do
# Filter packages with given version
pkg_info=$(apt-cache show -- ${package}) || {
die "apt-cache show on ${package} failed."
# Filter packages with given version
pkg_info=$(apt-cache show -- ${package}) || {
die "apt-cache show on ${package} failed."
}
# If version is set we will add suffix.
artifacts=()
}
# If version is set we will add suffix.
artifacts=()
- packages=(vpp vpp-selinux-policy vpp-devel vpp-lib vpp-plugins vpp-api-python)
+ pkgs=(vpp vpp-selinux-policy vpp-devel vpp-lib vpp-plugins vpp-api-python)
if [ -z "${VPP_VERSION-}" ]; then
if [ -z "${VPP_VERSION-}" ]; then
- artifacts+=(${packages[@]})
- artifacts+=(${packages[@]/%/-${VPP_VERSION-}})
+ artifs+=(${pkgs[@]/%/-${VPP_VERSION-}})
fi
if [[ "${INSTALL:-false}" == "true" ]]; then
fi
if [[ "${INSTALL:-false}" == "true" ]]; then
- sudo yum -y install "${artifacts[@]}" || {
+ sudo yum -y install "${artifs[@]}" || {
die "Install VPP artifact failed."
}
else
die "Install VPP artifact failed."
}
else
- sudo yum -y install --downloadonly --downloaddir=. "${artifacts[@]}" || {
+ sudo yum -y install --downloadonly --downloaddir=. "${artifs[@]}" || {
die "Download VPP artifacts failed."
}
fi
die "Download VPP artifacts failed."
}
fi
die "Packagecloud FD.io repo fetch failed."
}
# If version is set we will add suffix.
die "Packagecloud FD.io repo fetch failed."
}
# If version is set we will add suffix.
- artifacts=()
- packages=(vpp vpp-devel vpp-lib vpp-plugins libvpp0)
+ artifs=()
+ pkgs=(vpp vpp-devel vpp-lib vpp-plugins libvpp0)
if [ -z "${VPP_VERSION-}" ]; then
if [ -z "${VPP_VERSION-}" ]; then
- artifacts+=(${packages[@]})
- artifacts+=(${packages[@]/%/-${VPP_VERSION-}})
+ artifs+=(${pkgs[@]/%/-${VPP_VERSION-}})
fi
if [[ "${INSTALL:-false}" == "true" ]]; then
fi
if [[ "${INSTALL:-false}" == "true" ]]; then
- sudo yum -y install "${artifacts[@]}" || {
+ sudo yum -y install "${artifs[@]}" || {
die "Install VPP artifact failed."
}
else
die "Install VPP artifact failed."
}
else
- sudo yum -y install --downloadonly --downloaddir=. "${artifacts[@]}" || {
+ sudo yum -y install --downloadonly --downloaddir=. "${artifs[@]}" || {
die "Download VPP artifacts failed."
}
fi
die "Download VPP artifacts failed."
}
fi
TEST_TAG_STRING=$("${cmd[@]}" <<< "${comment}" || true)
if [[ -z "${TEST_TAG_STRING-}" ]]; then
# Probably we got a base64 encoded comment.
TEST_TAG_STRING=$("${cmd[@]}" <<< "${comment}" || true)
if [[ -z "${TEST_TAG_STRING-}" ]]; then
# Probably we got a base64 encoded comment.
- comment=$(base64 --decode <<< "${GERRIT_EVENT_COMMENT_TEXT}" || true)
+ comment="${GERRIT_EVENT_COMMENT_TEXT}"
+ comment=$(base64 --decode <<< "${comment}" || true)
comment=$(fgrep "${trigger}" <<< "${comment}" || true)
TEST_TAG_STRING=$("${cmd[@]}" <<< "${comment}" || true)
fi
comment=$(fgrep "${trigger}" <<< "${comment}" || true)
TEST_TAG_STRING=$("${cmd[@]}" <<< "${comment}" || true)
fi
sed -i "${sed_cmd}" "${sed_file}" || die "RTE_MAX_NUMA_NODES Patch failed"
# Patch L3FWD.
sed -i "${sed_cmd}" "${sed_file}" || die "RTE_MAX_NUMA_NODES Patch failed"
# Patch L3FWD.
- sed_rxd="s/^#define RTE_TEST_RX_DESC_DEFAULT 128/#define RTE_TEST_RX_DESC_DEFAULT 1024/g"
- sed_txd="s/^#define RTE_TEST_TX_DESC_DEFAULT 512/#define RTE_TEST_TX_DESC_DEFAULT 1024/g"
+ sed_rxd="s/^#define RTE_TEST_RX_DESC_DEFAULT 128"
+ sed_rxd+="/#define RTE_TEST_RX_DESC_DEFAULT 1024/g"
+ sed_txd="s/^#define RTE_TEST_TX_DESC_DEFAULT 512"
+ sed_txd+="/#define RTE_TEST_TX_DESC_DEFAULT 1024/g"
sed_file="./main.c"
pushd examples/l3fwd || die "Pushd failed"
sed -i "${sed_rxd}" "${sed_file}" || die "Patch failed"
sed_file="./main.c"
pushd examples/l3fwd || die "Pushd failed"
sed -i "${sed_rxd}" "${sed_file}" || die "Patch failed"
pushd "${DPDK_DIR}" || die "Pushd failed"
# Patch L3FWD.
pushd "${DPDK_DIR}" || die "Pushd failed"
# Patch L3FWD.
- sed_rxd="s/^#define RTE_TEST_RX_DESC_DEFAULT 128/#define RTE_TEST_RX_DESC_DEFAULT 2048/g"
- sed_txd="s/^#define RTE_TEST_TX_DESC_DEFAULT 512/#define RTE_TEST_TX_DESC_DEFAULT 2048/g"
+ sed_rxd="s/^#define RTE_TEST_RX_DESC_DEFAULT 128"
+ sed_rxd+="/#define RTE_TEST_RX_DESC_DEFAULT 2048/g"
+ sed_txd="s/^#define RTE_TEST_TX_DESC_DEFAULT 512"
+ sed_txd+="/#define RTE_TEST_TX_DESC_DEFAULT 2048/g"
sed_file="./main.c"
pushd examples/l3fwd || die "Pushd failed"
sed -i "${sed_rxd}" "${sed_file}" || die "Patch failed"
sed_file="./main.c"
pushd examples/l3fwd || die "Pushd failed"
sed -i "${sed_rxd}" "${sed_file}" || die "Patch failed"
# - ${CSIT_DIR}/DPDK_STABLE_VER - DPDK version to use
# by csit-vpp not-timed jobs.
# - ${CSIT_DIR}/${VPP_VER_FILE} - Ubuntu VPP version to use.
# - ${CSIT_DIR}/DPDK_STABLE_VER - DPDK version to use
# by csit-vpp not-timed jobs.
# - ${CSIT_DIR}/${VPP_VER_FILE} - Ubuntu VPP version to use.
- # - ../*vpp*.deb|rpm - Relative to ${DOWNLOAD_DIR}, copied for vpp-csit jobs.
+ # - ../*vpp*.deb|rpm - Relative to ${DOWNLOAD_DIR},
+ # copied for vpp-csit jobs.
# Directories updated:
# - ${DOWNLOAD_DIR}, vpp-*.deb files are copied here for vpp-csit jobs.
# - ./ - Assumed ${DOWNLOAD_DIR}, *vpp*.deb|rpm files
# Directories updated:
# - ${DOWNLOAD_DIR}, vpp-*.deb files are copied here for vpp-csit jobs.
# - ./ - Assumed ${DOWNLOAD_DIR}, *vpp*.deb|rpm files
download_artifacts || die
;;
"vpp-csit-"*)
download_artifacts || die
;;
"vpp-csit-"*)
+ # Shorten line.
+ pgks="${PKG_SUFFIX}"
# Use locally built packages.
# Use locally built packages.
- mv "${DOWNLOAD_DIR}"/../*vpp*."${PKG_SUFFIX}" "${DOWNLOAD_DIR}"/ || {
+ mv "${DOWNLOAD_DIR}"/../*vpp*."${pkgs}" "${DOWNLOAD_DIR}"/ || {
die "Move command failed."
}
;;
die "Move command failed."
}
;;
"using build default ($(grep -c ^processor /proc/cpuinfo))."
fi
"using build default ($(grep -c ^processor /proc/cpuinfo))."
fi
- make UNATTENDED=y pkg-verify || die "VPP build using make pkg-verify failed."
+ make UNATTENDED=y pkg-verify || die "VPP build with make pkg-verify failed."
echo "* VPP ${1-} BUILD SUCCESSFULLY COMPLETED" || {
die "Argument not found."
}
echo "* VPP ${1-} BUILD SUCCESSFULLY COMPLETED" || {
die "Argument not found."
}
k8s_contiv_patch="kubecon.contiv-vpp-yaml-patch.diff"
# Pull the most recent Docker images
k8s_contiv_patch="kubecon.contiv-vpp-yaml-patch.diff"
# Pull the most recent Docker images
- bash <(curl -s https://raw.githubusercontent.com/contiv/vpp/master/k8s/pull-images.sh)
+ url="https://raw.githubusercontent.com/contiv/vpp/master/k8s/pull-images.sh"
+ bash <(curl -s "${url}")
# Apply resources
wget ${k8s_contiv}
# Apply resources
wget ${k8s_contiv}
function k8s_utils.cri_shim_install {
# Install the CRI Shim on host
function k8s_utils.cri_shim_install {
# Install the CRI Shim on host
- sudo su root -c 'bash <(curl -s https://raw.githubusercontent.com/contiv/vpp/master/k8s/cri-install.sh)'
+ url"https://raw.githubusercontent.com/contiv/vpp/master/k8s/cri-install.sh"
+ sudo su root -c "bash <(curl -s '${url}')"
}
function k8s_utils.cri_shim_uninstall {
# Uninstall the CRI Shim on host
}
function k8s_utils.cri_shim_uninstall {
# Uninstall the CRI Shim on host
- sudo su root -c 'bash <(curl -s https://raw.githubusercontent.com/contiv/vpp/master/k8s/cri-install.sh) --uninstall'
+ url="https://raw.githubusercontent.com/contiv/vpp/master/k8s/cri-install.sh"
+ sudo su root -c "bash <(curl -s '${url}') --uninstall"
}
function k8s_utils.kube_proxy_install {
# Installing custom version of Kube-Proxy to enable Kubernetes services
}
function k8s_utils.kube_proxy_install {
# Installing custom version of Kube-Proxy to enable Kubernetes services
- bash <(curl -s https://raw.githubusercontent.com/contiv/vpp/master/k8s/proxy-install.sh)
+ url="https://raw.githubusercontent.com/contiv/vpp/master/k8s/"
+ url+="proxy-install.sh"
+ bash <(curl -s "${url}")
}
function k8s_utils.apply {
}
function k8s_utils.apply {
function k8s_utils.affinity_non_vpp {
# Set affinity for all non VPP docker containers to CPU 0
function k8s_utils.affinity_non_vpp {
# Set affinity for all non VPP docker containers to CPU 0
- for i in `sudo docker ps --format "{{.ID}} {{.Names}}" | grep -v vpp | cut -d' ' -f1`; do
+ command='sudo docker ps --format "{{.ID}} {{.Names}}"'
+ command+=" | grep -v vpp | cut -d' ' -f1"
+ for i in $(${command}); do
sudo docker update --cpuset-cpus 0 ${i}
done
}
sudo docker update --cpuset-cpus 0 ${i}
done
}
ip_address = f" {iperf3_attributes[u'ip_address']}" if u"ip_address" \
in iperf3_attributes else u""
iperf3_cmd[u"name"] = u"iperf3"
ip_address = f" {iperf3_attributes[u'ip_address']}" if u"ip_address" \
in iperf3_attributes else u""
iperf3_cmd[u"name"] = u"iperf3"
+ # TODO: Use OptionString library.
iperf3_cmd[u"args"] = f"--{iperf3_attributes[u'role']}{ip_address} " \
f"--interval 0{json_results} " \
f"--version{iperf3_attributes[u'ip_version']}"
iperf3_cmd[u"args"] = f"--{iperf3_attributes[u'role']}{ip_address} " \
f"--interval 0{json_results} " \
f"--version{iperf3_attributes[u'ip_version']}"
if not matching:
self._reported[api_name] = crc
self.log_and_raise(
if not matching:
self._reported[api_name] = crc
self.log_and_raise(
- f"No active collection contains API {api_name!r} with CRC "
- f"{crc!r}"
+ f"No active collection has API {api_name!r} with CRC {crc!r}"
)
options = self._options[api_name]
options.pop(u"vat_help", None)
)
options = self._options[api_name]
options.pop(u"vat_help", None)
cp -r src/* ${WORKING_DIR}/
# Copy the source files to be processed:
cp -r src/* ${WORKING_DIR}/
# Copy the source files to be processed:
-rsync -a --include '*/' --include '*.py' --exclude '*' ../../../resources/libraries/python/ ${WORKING_DIR}/resources/libraries/python/
+from_dir="../../../resources/libraries/python/"
+to_dir="${WORKING_DIR}/resources/libraries/python/"
+command="rsync -a --include '*/'"
+${command} --include '*.py' --exclude '*' "${from_dir}" "${to_dir}"
cp ../../../resources/__init__.py ${WORKING_DIR}/resources/
cp ../../../resources/libraries/__init__.py ${WORKING_DIR}/resources/libraries/
cp ../../../resources/__init__.py ${WORKING_DIR}/resources/
cp ../../../resources/libraries/__init__.py ${WORKING_DIR}/resources/libraries/
-rsync -a --include '*/' --include '*.robot' --exclude '*' ../../../resources/libraries/robot/ ${WORKING_DIR}/resources/libraries/robot/
-rsync -a --include '*/' --include '*.robot' --exclude '*' ../../../tests/ ${WORKING_DIR}/tests/
+from_dir="../../../resources/libraries/robot/"
+to_dir="${WORKING_DIR}/resources/libraries/robot/"
+${command} --include '*.robot' --exclude '*' "${from_dir}" "${to_dir}"
+from_dir="../../../tests/"
+to_dir="${WORKING_DIR}/tests/"
+${command} --include '*.robot' --exclude '*' "${from_dir}" "${to_dir}"
# Create virtual environment:
virtualenv --python=$(which python3) ${WORKING_DIR}/env
# Create virtual environment:
virtualenv --python=$(which python3) ${WORKING_DIR}/env
# Generate the documentation:
DATE=$(date -u '+%d-%b-%Y')
# Generate the documentation:
DATE=$(date -u '+%d-%b-%Y')
-sphinx-build -v -c ${WORKING_DIR} -a -b html -E -D release=$1 -D version="$1 documentation - $DATE" ${WORKING_DIR} ${BUILD_DIR}/
+command="sphinx-build -v -c '${WORKING_DIR}' -a -b html -E -D release='$1' -D"
+command+=" version='$1 documentation - $DATE' '${WORKING_DIR}' '${BUILD_DIR}/'"
+${command}
find . -type d -name 'env' | xargs rm -rf
find . -type d -name 'env' | xargs rm -rf
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
- @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " doctest to run all doctests embedded in the documentation"
@echo " coverage to run coverage check of the documentation (if enabled)"
@echo " dummy to check syntax errors of document sources"
@echo " coverage to run coverage check of the documentation (if enabled)"
@echo " dummy to check syntax errors of document sources"
# See the License for the specific language governing permissions and
# limitations under the License.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""This module gets a bandwith limit together with other parameters, reads
+"""This script gets a bandwith limit together with other parameters, reads
the iPerf3 configuration and sends the traffic. At the end, it measures
the packet loss and latency.
"""
the iPerf3 configuration and sends the traffic. At the end, it measures
the packet loss and latency.
"""
if [[ ${cfg_install_latex} -eq 1 ]] ;
then
sudo apt-get -y install xvfb texlive-latex-recommended \
if [[ ${cfg_install_latex} -eq 1 ]] ;
then
sudo apt-get -y install xvfb texlive-latex-recommended \
- texlive-fonts-recommended texlive-fonts-extra texlive-latex-extra latexmk wkhtmltopdf inkscape
- sudo sed -i.bak 's/^\(main_memory\s=\s\).*/\110000000/' /usr/share/texlive/texmf-dist/web2c/texmf.cnf
+ texlive-fonts-recommended texlive-fonts-extra texlive-latex-extra \
+ latexmk wkhtmltopdf inkscape
+ target="/usr/share/texlive/texmf-dist/web2c/texmf.cnf"
+ sudo sed -i.bak 's/^\(main_memory\s=\s\).*/\110000000/' "${target}"
fi
# Create working directories
fi
# Create working directories