-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# Trailing comments are optional, for tracking how to test the message.
-# https://logs.fd.io/production/vex-yul-rot-jenkins-1/
-# vpp-beta-merge-master-ubuntu1804/3370/archives/build-root/
-19.08-rc0~806-g4f9446444~b3370:
+19.08.1-release:
acl_add_replace: '0x13bc8539' # perf
acl_add_replace_reply: '0xac407b0c' # perf
acl_dump: '0xef34fea4' # perf teardown
avf_create_reply: '0xfda5941f' # perf
# ^^ tc01-64B-1c-avf-eth-l2bdbasemaclrn-mrr
# ^ l2bdmaclrnANDbaseANDdrv_avf
+ bond_create: '0xf19b4ad0' # perf
+ bond_enslave: '0x0ded34f6' # perf
bridge_domain_add_del: '0xc6360720' # dev
bridge_domain_add_del_reply: '0xe8d4e804' # dev
classify_add_del_session: '0x85fd79f4' # dev
classify_add_del_session_reply: '0xe8d4e804' # dev
classify_add_del_table: '0x9bd794ae' # dev
classify_add_del_table_reply: '0x05486349' # dev
- cli_inband: '0xb1ad59b3' # dev setup
- cli_inband_reply: '0x6d3c80a4' # dev setup
+ cli_inband: '0xf8377302' # dev setup
+ cli_inband_reply: '0x05879051' # dev setup
cop_interface_enable_disable: '0x69d24598' # dev
cop_interface_enable_disable_reply: '0xe8d4e804' # dev
cop_whitelist_enable_disable: '0x8bb8f6dc' # dev
create_vlan_subif_reply: '0xfda5941f' # virl
gbp_bridge_domain_add: '0x70f1069c' # perf
gbp_bridge_domain_add_reply: '0xe8d4e804' # perf
- gbp_route_domain_add: '0x355b67c0' # perf
+ gbp_route_domain_add: '0x1560adc7' # perf
gbp_route_domain_add_reply: '0xe8d4e804' # perf
- gbp_endpoint_add: '0x6003c704' # perf
+ gbp_endpoint_add: '0xf0efa120' # perf
gbp_endpoint_add_reply: '0x1dd3ff3e' # perf
gbp_endpoint_group_add: '0x1031b376' # perf
gbp_endpoint_group_add_reply: '0xe8d4e804' # perf
- gbp_subnet_add_del: '0x4be859ed' # perf
+ gbp_subnet_add_del: '0xf42b9430' # perf
gbp_subnet_add_del_reply: '0xe8d4e804' # perf
- gbp_contract_add_del: '0xc64310d2' # perf
+ gbp_contract_add_del: '0x5b63d90a' # perf
gbp_contract_add_del_reply: '0x1992deab' # perf
gbp_ext_itf_add_del: '0x6995e85f' # perf
gbp_ext_itf_add_del_reply: '0xe8d4e804' # perf
- gre_tunnel_add_del: '0x04199f47' # virl
- gre_tunnel_add_del_reply: '0x903324db' # virl
+ gre_tunnel_add_del: '0x4bf7bdec' # virl
+ gre_tunnel_add_del_reply: '0x5383d31f' # virl
gpe_enable_disable: '0xeb0e943b' # virl
gpe_enable_disable_reply: '0xe8d4e804' # virl
hw_interface_set_mtu: '0x132da1e7' # dev
hw_interface_set_mtu_reply: '0xe8d4e804' # dev
input_acl_set_interface: '0xe09537b0' # dev
input_acl_set_interface_reply: '0xe8d4e804' # dev
- ip_address_details: '0x2f1dbc7d' # dev
+ ip_address_details: '0x7002eee7' # dev
ip_address_dump: '0x6b7bcd0a' # dev
- ip_neighbor_add_del: '0x7a68a3c4' # dev
+ ip_neighbor_add_del: '0x029dad44' # dev
ip_neighbor_add_del_reply: '0x1992deab' # dev
- ip_probe_neighbor: '0x2736142d' # virl
- ip_route_add_del: '0x83e086ce' # dev
+ ip_probe_neighbor: '0x37bc128d' # virl
+ ip_route_add_del: '0x5ceee41c' # dev
ip_route_add_del_reply: '0x1992deab' # dev
ip_source_check_interface_add_del: '0x0a60152a' # virl
ip_source_check_interface_add_del_reply: '0xe8d4e804' # virl
ip_table_add_del: '0xe5d378f2' # dev
ip_table_add_del_reply: '0xe8d4e804' # dev
+ ipsec_tunnel_if_add_del: '0xd5a98274' # perf
ipsec_interface_add_del_spd: '0x1e3b8286' # dev
ipsec_interface_add_del_spd_reply: '0xe8d4e804' # dev
ipsec_sad_entry_add_del: '0xa25ab61e' # dev
ipsec_sad_entry_add_del_reply: '0x9ffac24b' # dev
ipsec_spd_add_del: '0x9ffdf5da' # dev
ipsec_spd_add_del_reply: '0xe8d4e804' # dev
- ipsec_spd_entry_add_del: '0x6bc6a3b5' # dev
+ ipsec_spd_entry_add_del: '0xdb217840' # dev
ipsec_spd_entry_add_del_reply: '0x9ffac24b' # dev
l2_interface_vlan_tag_rewrite: '0xb90be6b4' # virl
l2_interface_vlan_tag_rewrite_reply: '0xe8d4e804' # virl
lisp_locator_set_dump: '0xc79e8ab0' # virl
lisp_map_resolver_details: '0x60a5f5ca' # virl
lisp_map_resolver_dump: '0x51077d14' # virl
+ macip_acl_add: '0x0c680ca5' # perf
+ macip_acl_dump: '0xef34fea4' # perf
+ macip_acl_interface_add_del: '0x6a6be97c' # perf
+ macip_acl_interface_get: '0x51077d14' # perf
memif_create: '0x6597cdb2' # dev
memif_create_reply: '0xfda5941f' # dev
memif_details: '0x4f5a3397' # dev
memif_dump: '0x51077d14' # dev
memif_socket_filename_add_del: '0x30e3929d' # dev
memif_socket_filename_add_del_reply: '0xe8d4e804' # dev
- nat_det_add_del_map: '0x04b76549' # perf
+ nat_det_add_del_map: '0x112fde05' # perf
nat_det_add_del_map_reply: '0xe8d4e804' # perf
- nat44_interface_add_del_feature: '0xef3edad1' # perf
+ nat44_interface_add_del_feature: '0xf3699b83' # perf
nat44_interface_add_del_feature_reply: '0xe8d4e804' # perf
# ^^^^ tc01-64B-1c-ethip4udp-ip4base-nat44-mrr
# ^ nat44NOTscaleNOTsrc_user_1
show_threads: '0x51077d14' # dev
show_threads_reply: '0xf5e0b66f' # dev
show_version: '0x51077d14' # dev setup
- show_version_reply: '0xb9bcf6df' # dev setup
+ show_version_reply: '0xc919bde1' # dev setup
sw_interface_add_del_address: '0x7b583179' # dev
sw_interface_add_del_address_reply: '0xe8d4e804' # dev
- sw_interface_details: '0xe4ee7eb6' # dev setup
- sw_interface_dump: '0x052753c5' # dev setup
+ sw_interface_bond_dump: '0x51077d14' # perf
+ sw_interface_details: '0x52a9262e' # dev setup
+ sw_interface_dump: '0xaa610c27' # dev setup
sw_interface_ip6nd_ra_config: '0xc3f02daa' # dev
sw_interface_ip6nd_ra_config_reply: '0xe8d4e804' # dev
sw_interface_rx_placement_details: '0x0e9e33f4' # perf
sw_interface_set_table_reply: '0xe8d4e804' # dev
sw_interface_set_vxlan_bypass: '0xe74ca095' # dev
sw_interface_set_vxlan_bypass_reply: '0xe8d4e804' # dev
+ sw_interface_slave_dump: '0x529cb13f' # perf
sw_interface_tap_v2_dump: '0x51077d14' # dev
sw_interface_tap_v2_details: '0x5ee87a5f' # dev
+ sw_interface_set_unnumbered: '0xa2c1bbda' # perf
sw_interface_vhost_user_details: '0x91ff3307' # dev
sw_interface_vhost_user_dump: '0x51077d14' # dev
tap_create_v2: '0x8fa99320' # dev
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# I think archiving the diff is enough.
diff_cmd=("diff" "-dur" "${GENERATED_DIR}/tests_tmp" "${GENERATED_DIR}/tests")
-lines="$("${diff_cmd[@]}" | tee "autogen.log" | wc -l)" || die
+# Diff returns RC=1 if output is nonzero, so we do not die on the next line.
+lines="$("${diff_cmd[@]}" | tee "autogen.log" | wc -l)"
if [ "${lines}" != "0" ]; then
# TODO: Decide which text goes to stdout and which to stderr.
- warn "Autogen conflict diff nonzero lines: ${lines}"
+ warn "Autogen conflict, diff sees nonzero lines: ${lines}"
# TODO: Disable if output size does more harm than good.
cat "autogen.log" >&2
warn
--- /dev/null
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+# This file should be executed from tox, as the assumed working directory
+# is different from where this file is located.
+# This file does not have executable flag nor shebang,
+# to dissuade non-tox callers.
+
+# This script runs a few grep-based command and fails
+# if it detects any file edited or added since HEAD~
+# containing a copyright notice in first 3 lines,
+# but not the current year (in the same line).
+# The offending lines are stored to copyright_year.log (overwriting).
+#
+# 3 lines were chosen, because first two lines could be shebang and empty line,
+# and more than 3 lines would start failing on files with multiple copyright
+# holders. There, only the last updating entity needs to bump its year,
+# and put other copyright lines below.
+
+# "set -eu" handles failures from the following two lines.
+BASH_CHECKS_DIR="$(dirname $(readlink -e "${BASH_SOURCE[0]}"))"
+BASH_FUNCTION_DIR="$(readlink -e "${BASH_CHECKS_DIR}/../../function")"
+source "${BASH_FUNCTION_DIR}/common.sh" || {
+ echo "Source failed." >&2
+ exit 1
+}
+
+year=$(date +'%Y')
+IFS=$'\n'
+files=($(git diff --name-only HEAD~ || true))
+unset IFS
+truncate -s 0 "copyright_year.log" || die
+# A change can have thousands of files, supress console output in the cycle.
+set +x
+for fil in "${files[@]}"; do
+ # Greps do "fail" on 0 line output, we need to ignore that
+ # as 0 lines is good. We need both set +e to ensure everything executes,
+ # and || true later to avoid dying on zero.
+ piped_command="set +ex; head -n 3 '${fil}' | fgrep -i 'Copyright'"
+ piped_command+=" | fgrep -v '${year}' | awk '{print \"${fil}: \" \$0}'"
+ piped_command+=" >> 'copyright_year.log'"
+ wrong_strings="$(bash -c "${piped_command}" || true)" || die
+done
+set -x
+lines="$(< "copyright_year.log" wc -l)"
+if [ "${lines}" != "0" ]; then
+ # TODO: Decide which text goes to stdout and which to stderr.
+ warn "Copyright lines with wrong year detected: ${lines}"
+ # TODO: Disable when output size does more harm than good.
+ pwd
+ cat "copyright_year.log" >&2
+ warn
+ warn "Copyright year checker: FAIL"
+ exit 1
+fi
+
+warn
+warn "Copyright year checker: PASS"
--- /dev/null
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -xeuo pipefail
+
+# This file should be executed from tox, as the assumend working directory
+# is different from where this file is located.
+# This file does not have executable flag nor shebang,
+# to dissuade non-tox callers.
+
+# "set -eu" handles failures from the following two lines.
+BASH_CHECKS_DIR="$(dirname $(readlink -e "${BASH_SOURCE[0]}"))"
+BASH_FUNCTION_DIR="$(readlink -e "${BASH_CHECKS_DIR}/../../function")"
+source "${BASH_FUNCTION_DIR}/common.sh" || {
+ echo "Source failed." >&2
+ exit 1
+}
+
+common_dirs || die
+log_file="$(pwd)/doc_verify.log" || die
+
+# Pre-cleanup.
+rm -f "${log_file}" || die
+rm -f "${DOC_GEN_DIR}/csit.docs.tar.gz" || die
+rm -rf "${DOC_GEN_DIR}/_build" || die
+
+# Documentation generation.
+# Here we do store only stderr to file while stdout (inlcuding Xtrace) is
+# printed to console. This way we can track increased errors in future.
+# We do not need to do trap as the env will be closed after tox finished the
+# task.
+exec 3>&1 || die
+export BASH_XTRACEFD="3" || die
+
+pushd "${DOC_GEN_DIR}" || die
+source ./run_doc.sh ${GERRIT_BRANCH:-local} 2> ${log_file} || true
+popd || die
+
+if [[ ! -f "${log_file}" ]] || [[ -s "${log_file}" ]]; then
+ # Output file not exists or is non empty.
+ warn
+ warn "Doc verify checker: FAIL"
+ exit 1
+fi
+
+warn
+warn "Doc verify checker: PASS"
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
set -exuo pipefail
-# This file should be executed from tox, as the assumend working directory
+# This file should be executed from tox, as the assumed working directory
# is different from where this file is located.
# This file does not have executable flag nor shebang,
# to dissuade non-tox callers.
-#!/usr/bin/env bash
-
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#'(ipsec[[:digit:]]+tnlhw|ipsec[[:digit:]]+tnlsw|'
#'srhip6|tcp|udp|lispip6|lispip4|vxlan){0,1}'
#'(http){0,1}-'
- '(.*)-(dev|ndrpdr|cps|rps|reconf)$'
+ '(.*)-(dev|ndrpdr|bps|cps|rps|reconf)$'
)
s_suite_rules=(
'number of SUT nodes'
#'(ipsec[[:digit:]]+tnlhw|ipsec[[:digit:]]+tnlsw|'
#'srhip6|tcp|udp|lispip6|lispip4|vxlan){0,1}'
#'(http){0,1}-'
- '(.*)-(dev|ndrpdr|cps|rps|reconf)$'
+ '(.*)-(dev|ndrpdr|bps|cps|rps|reconf)$'
)
rm -f "tc_naming.log" || die
fi
warn
-warn "Testcase naming checker: PASS"
\ No newline at end of file
+warn "Testcase naming checker: PASS"
set +x
for package in ${packages}; do
# Filter packages with given version
- pkg_info=$(apt-cache show ${package}) || {
+ pkg_info=$(apt-cache show -- ${package}) || {
die "apt-cache show on ${package} failed."
}
ver=$(echo ${pkg_info} | grep -o "Version: ${VPP_VERSION-}[^ ]*" | \
NODENESS="1n"
FLAVOR="tx2"
;;
+ *"2n-clx"*)
+ NODENESS="2n"
+ FLAVOR="clx"
+ ;;
*"2n-skx"*)
NODENESS="2n"
FLAVOR="skx"
# - ipsechw - Blacklisted on testbeds without crypto hardware accelerator.
# TODO: Add missing reasons here (if general) or where used (if specific).
case "${TEST_CODE}" in
+ *"2n-clx"*)
+ test_tag_array+=("!ipsechw")
+ ;;
*"2n-skx"*)
test_tag_array+=("!ipsechw")
;;
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
+ "2n_clx")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_clx*.yaml )
+ TOPOLOGIES_TAGS="2_node_*_link_topo"
+ ;;
"2n_skx")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_skx*.yaml )
TOPOLOGIES_TAGS="2_node_*_link_topo"
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""Constants used in CSIT."""
+import os
+
+
+def get_str_from_env(env_var_names, default_value):
+ """Attempt to read string from environment variable, return that or default.
+
+ If environment variable exists, but is empty (and default is not),
+ empty string is returned.
+
+ Several environment variable names are examined, as CSIT currently supports
+ a mix of naming conventions.
+ Here "several" means there are hard coded prefixes to try,
+ and env_var_names itself can be single name, or a list or a tuple of names.
+
+ :param env_var_names: Base names of environment variable to attempt to read.
+ :param default_value: Value to return if the env var does not exist.
+ :type env_var_names: str, or list of str, or tuple of str
+ :type default_value: str
+ :returns: The value read, or default value.
+ :rtype: str
+ """
+ prefixes = ("FDIO_CSIT_", "CSIT_", "")
+ if not isinstance(env_var_names, (list, tuple)):
+ env_var_names = [env_var_names]
+ for name in env_var_names:
+ for prefix in prefixes:
+ value = os.environ.get(prefix + name, None)
+ if value is not None:
+ return value
+ return default_value
+
+
+def get_int_from_env(env_var_names, default_value):
+ """Attempt to read int from environment variable, return that or default.
+
+ String value is read, default is returned also if conversion fails.
+
+ :param env_var_names: Base names of environment variable to attempt to read.
+ :param default_value: Value to return if read or conversion fails.
+ :type env_var_names: str, or list of str, or tuple of str
+ :type default_value: int
+ :returns: The value read, or default value.
+ :rtype: int
+ """
+ env_str = get_str_from_env(env_var_names, "")
+ try:
+ return int(env_str)
+ except ValueError:
+ return default_value
+
+
+def get_float_from_env(env_var_names, default_value):
+ """Attempt to read float from environment variable, return that or default.
+
+ String value is read, default is returned also if conversion fails.
+
+ :param env_var_names: Base names of environment variable to attempt to read.
+ :param default_value: Value to return if read or conversion fails.
+ :type env_var_names: str, or list of str, or tuple of str
+ :type default_value: float
+ :returns: The value read, or default value.
+ :rtype: float
+ """
+ env_str = get_str_from_env(env_var_names, "")
+ try:
+ return float(env_str)
+ except ValueError:
+ return default_value
+
+
+def get_pessimistic_bool_from_env(env_var_names):
+ """Attempt to read bool from environment variable, assume False by default.
+
+ Conversion is lenient and pessimistic, only few strings are considered true.
+
+ :param env_var_names: Base names of environment variable to attempt to read.
+ :type env_var_names: str, or list of str, or tuple of str
+ :returns: The value read, or False.
+ :rtype: bool
+ """
+ env_str = get_str_from_env(env_var_names, "").lower()
+ return bool(env_str in ("true", "yes", "y", "1"))
+
+
+def get_optimistic_bool_from_env(env_var_names):
+ """Attempt to read bool from environment variable, assume True by default.
+
+ Conversion is lenient and optimistic, only few strings are considered false.
+
+ :param env_var_names: Base names of environment variable to attempt to read.
+ :type env_var_names: str, or list of str, or tuple of str
+ :returns: The value read, or True.
+ :rtype: bool
+ """
+ env_str = get_str_from_env(env_var_names, "").lower()
+ return bool(env_str not in ("false", "no", "n", "0"))
+
+
class Constants(object):
"""Constants used in CSIT.
# Equivalent to ~0 used in vpp code
BITWISE_NON_ZERO = 0xffffffff
+ # Global "kill switch" for CRC checking during runtime.
+ FAIL_ON_CRC_MISMATCH = get_pessimistic_bool_from_env("FAIL_ON_CRC_MISMATCH")
+
# Mapping from NIC name to its bps limit.
# TODO: Implement logic to lower limits to TG NIC or software. Or PCI.
NIC_NAME_TO_LIMIT = {
"""Start VPP in all containers."""
for container in self.containers:
self.engine.container = self.containers[container]
- # We need to install supervisor client/server system to control VPP
- # as a service
- self.engine.install_supervisor()
self.engine.start_vpp()
def restart_vpp_in_all_containers(self):
interface in container (only single container can be configured).
:param kwargs: Named parameters.
:type chain_topology: str
- :param kwargs: dict
+ :type kwargs: dict
"""
# Count number of DUTs based on node's host information
dut_cnt = len(Counter([self.containers[container].node['host']
"""Configure VPP in chain topology with l2xc.
:param kwargs: Named parameters.
- :param kwargs: dict
+ :type kwargs: dict
"""
self.engine.create_vpp_startup_config()
self.engine.create_vpp_exec_config(
"""Configure VPP in cross horizontal topology (single memif).
:param kwargs: Named parameters.
- :param kwargs: dict
+ :type kwargs: dict
"""
if 'DUT1' in self.engine.container.name:
if_pci = Topology.get_interface_pci_addr(
"""Configure VPP in chain topology with l2xc (functional).
:param kwargs: Named parameters.
- :param kwargs: dict
+ :type kwargs: dict
"""
- self.engine.create_vpp_startup_config_func_dev()
+ self.engine.create_vpp_startup_config()
self.engine.create_vpp_exec_config(
'memif_create_chain_functional.exec',
mid1=kwargs['mid1'], mid2=kwargs['mid2'],
"""Configure VPP in chain topology with ip4.
:param kwargs: Named parameters.
- :param kwargs: dict
+ :type kwargs: dict
"""
self.engine.create_vpp_startup_config()
"""Configure VPP in pipeline topology with ip4.
:param kwargs: Named parameters.
- :param kwargs: dict
+ :type kwargs: dict
"""
self.engine.create_vpp_startup_config()
node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
"""System info."""
raise NotImplementedError
- def install_supervisor(self):
- """Install supervisord inside a container."""
- if isinstance(self, LXC):
- self.execute('sleep 3; apt-get update')
- self.execute('apt-get install -y supervisor')
- self.execute('echo "{config}" > {config_file} && '
- 'supervisord -c {config_file}'.
- format(
- config='[unix_http_server]\n'
- 'file = /tmp/supervisor.sock\n\n'
- '[rpcinterface:supervisor]\n'
- 'supervisor.rpcinterface_factory = '
- 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
- '[supervisorctl]\n'
- 'serverurl = unix:///tmp/supervisor.sock\n\n'
- '[supervisord]\n'
- 'pidfile = /tmp/supervisord.pid\n'
- 'identifier = supervisor\n'
- 'directory = /tmp\n'
- 'logfile=/tmp/supervisord.log\n'
- 'loglevel=debug\n'
- 'nodaemon=false\n\n',
- config_file=SUPERVISOR_CONF))
-
def start_vpp(self):
"""Start VPP inside a container."""
- self.execute('echo "{config}" >> {config_file}'.
- format(
- config='[program:vpp]\n'
- 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
- 'autostart=false\n'
- 'autorestart=false\n'
- 'redirect_stderr=true\n'
- 'priority=1',
- config_file=SUPERVISOR_CONF))
- self.execute('supervisorctl reload')
- self.execute('supervisorctl start vpp')
+ self.execute(
+ u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
+ u">/tmp/vppd.log 2>&1 < /dev/null &")
def restart_vpp(self):
"""Restart VPP service inside a container."""
- self.execute('supervisorctl restart vpp')
- self.execute('cat /tmp/supervisord.log')
+ self.execute(u"pkill vpp")
+ self.start_vpp()
+ self.execute(u"cat /tmp/vppd.log")
- def create_base_vpp_startup_config(self):
+ def create_base_vpp_startup_config(self, cpuset_cpus=None):
"""Create base startup configuration of VPP on container.
+ :param cpuset_cpus: List of CPU cores to allocate.
+
+ :type cpuset_cpus: list.
:returns: Base VPP startup configuration.
:rtype: VppConfigGenerator
"""
- cpuset_cpus = self.container.cpuset_cpus
-
+ if cpuset_cpus is None:
+ cpuset_cpus = self.container.cpuset_cpus
# Create config instance
vpp_config = VppConfigGenerator()
vpp_config.set_node(self.container.node)
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_nodaemon()
- vpp_config.add_unix_exec('/tmp/running.exec')
- vpp_config.add_socksvr()
- # We will pop the first core from the list to be a main core
- vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
- # If more cores in the list, the rest will be used as workers.
+ vpp_config.add_unix_exec(u"/tmp/running.exec")
+ vpp_config.add_statseg_per_node_counters(value=u"on")
if cpuset_cpus:
- corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+ # We will pop the first core from the list to be a main core
+ vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
+ # If more cores in the list, the rest will be used as workers.
+ corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
vpp_config.add_cpu_corelist_workers(corelist_workers)
+ vpp_config.add_buffers_per_numa(215040)
+ vpp_config.add_plugin(u"disable", u"default")
+ vpp_config.add_plugin(u"enable", u"memif_plugin.so")
+ vpp_config.add_heapsize(u"4G")
+ vpp_config.add_ip_heap_size(u"4G")
+ vpp_config.add_statseg_size(u"4G")
return vpp_config
"""Create startup configuration of VPP without DPDK on container.
"""
vpp_config = self.create_base_vpp_startup_config()
- vpp_config.add_plugin('disable', 'dpdk_plugin.so')
-
- # Apply configuration
- self.execute('mkdir -p /etc/vpp/')
- self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
- .format(config=vpp_config.get_config_str()))
-
- def create_vpp_startup_config_dpdk_dev(self, *devices):
- """Create startup configuration of VPP with DPDK on container.
-
- :param devices: List of PCI devices to add.
- :type devices: list
- """
- vpp_config = self.create_base_vpp_startup_config()
- vpp_config.add_dpdk_dev(*devices)
- vpp_config.add_dpdk_no_tx_checksum_offload()
- vpp_config.add_dpdk_log_level('debug')
- vpp_config.add_plugin('disable', 'default')
- vpp_config.add_plugin('enable', 'dpdk_plugin.so')
- vpp_config.add_plugin('enable', 'memif_plugin.so')
-
- # Apply configuration
- self.execute('mkdir -p /etc/vpp/')
- self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
- .format(config=vpp_config.get_config_str()))
-
- def create_vpp_startup_config_func_dev(self):
- """Create startup configuration of VPP on container for functional
- vpp_device tests.
- """
- # Create config instance
- vpp_config = VppConfigGenerator()
- vpp_config.set_node(self.container.node)
- vpp_config.add_unix_cli_listen()
- vpp_config.add_unix_nodaemon()
- vpp_config.add_unix_exec('/tmp/running.exec')
- vpp_config.add_socksvr()
- vpp_config.add_plugin('disable', 'dpdk_plugin.so')
# Apply configuration
self.execute('mkdir -p /etc/vpp/')
:type kwargs: dict
"""
running = '/tmp/running.exec'
-
template = '{res}/{tpl}'.format(
res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
self._configure_cgroup('lxc')
+ def build(self):
+ """Build container (compile)."""
+ raise NotImplementedError
+
def create(self):
"""Create/deploy an application inside a container on system.
' '.join('--set-var %s' % env for env in self.container.env))\
if self.container.env else ''
- cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
- "exit $?'".format(env=env, c=self.container, command=command)
+ cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}'"\
+ .format(env=env, c=self.container, command=command)
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
if int(ret) != 0:
:type command: str
:raises RuntimeError: If running the command in a container failed.
"""
- cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
- "exit $?'".format(c=self.container, command=command)
+ cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}'"\
+ .format(c=self.container, command=command)
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
if int(ret) != 0:
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""
import binascii
+import copy
import glob
import json
import shutil
:rtype: PapiSocketExecutor
:raises RuntimeError: If unverified or conflicting CRC is encountered.
"""
+ self.crc_checker_instance.report_initial_conflicts()
if history:
PapiHistory.add_to_papi_history(
self._node, csit_papi_command, **kwargs)
+ self.crc_checker_instance.check_api_name(csit_papi_command)
self._api_command_list.append(
- dict(api_name=csit_papi_command, api_args=kwargs))
+ dict(api_name=csit_papi_command, api_args=copy.deepcopy(kwargs)))
return self
def get_replies(self, err_msg="Failed to get replies."):
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from robot.api import logger
+from resources.libraries.python.Constants import Constants
+
def _str(text):
"""Convert from possible unicode without interpreting as number.
so make sure the calling libraries have appropriate robot library scope.
For usual testing, it means "GLOBAL" scope."""
- def __init__(self, directory):
+ def __init__(
+ self, directory, fail_on_mismatch=Constants.FAIL_ON_CRC_MISMATCH):
"""Initialize empty state, then register known collections.
This also scans directory for .api.json files
:type directory: str
"""
+ self.fail_on_mismatch = fail_on_mismatch
+ """If True, mismatch leads to test failure, by raising exception.
+ If False, the mismatch is logged, but the test is allowed to continue.
+ """
+
self._expected = dict()
"""Mapping from collection name to mapping from API name to CRC string.
- Colection name should be something useful for logging.
+ Collection name should be something useful for logging.
- Order of addition reflects the order colections should be queried.
+ Order of addition reflects the order collections should be queried.
If an incompatible CRC is found, affected collections are removed.
A CRC that would remove all does not, added to _reported instead,
- while causing a failure in single test."""
+ while causing a failure in single test (if fail_on_mismatch)."""
self._missing = dict()
"""Mapping from collection name to mapping from API name to CRC string.
Starts the same as _expected, but each time an encountered api,crc pair
- fits the expectation, the pair is removed from this mapping.
- Ideally, the active mappings will become empty.
+ fits the expectation, the pair is removed from all collections
+ within this mapping. Ideally, the active mappings will become empty.
If not, it is an error, VPP removed or renamed a message CSIT needs."""
self._found = dict()
self._register_all()
self._check_dir(directory)
+ def log_and_raise(self, exc_msg):
+ """Log to console, on fail_on_mismatch also raise runtime exception.
+
+ :param exc_msg: The message to include in log or exception.
+ :type exc_msg: str
+ :raises RuntimeError: With the message, if fail_on_mismatch.
+ """
+ logger.console("RuntimeError:\n{m}".format(m=exc_msg))
+ if self.fail_on_mismatch:
+ raise RuntimeError(exc_msg)
+
def _register_collection(self, collection_name, name_to_crc_mapping):
"""Add a named (copy of) collection of CRCs.
:param name_to_crc_mapping: Mapping from API names to CRCs.
:type collection_name: str or unicode
:type name_to_crc_mapping: dict from str/unicode to str/unicode
+ :raises RuntimeError: If the name of a collection is registered already.
"""
collection_name = _str(collection_name)
if collection_name in self._expected:
raise RuntimeError("Collection {cl!r} already registered.".format(
- cl=collection_name))
+ cl=collection_name)
+ )
mapping = {_str(k): _str(v) for k, v in name_to_crc_mapping.items()}
self._expected[collection_name] = mapping
self._missing[collection_name] = mapping.copy()
continue
return _str(item)
raise RuntimeError("No name found for message: {obj!r}".format(
- obj=msg_obj))
+ obj=msg_obj)
+ )
@staticmethod
def _get_crc(msg_obj):
if crc:
return _str(crc)
raise RuntimeError("No CRC found for message: {obj!r}".format(
- obj=msg_obj))
+ obj=msg_obj)
+ )
def _process_crc(self, api_name, crc):
"""Compare API to verified collections, update class state.
+ Here, API stands for (message name, CRC) pair.
+
Conflict is NOT when a collection does not recognize the API.
Such APIs are merely added to _found for later reporting.
Conflict is when a collection recognizes the API under a different CRC.
self._expected = new_expected
self._missing = {name: self._missing[name] for name in new_expected}
return
- # No new_expected means some colections knew the api_name,
+ # No new_expected means some collections knew the api_name,
# but CRC does not match any. This has to be reported.
self._reported[api_name] = crc
"""Parse every .api.json found under directory, remember conflicts.
As several collections are supported, each conflict invalidates
- one of them, failure happens only when no collections would be left.
+ some of them, failure happens only when no collections would be left.
In that case, set of collections just before the failure is preserved,
the _reported mapping is filled with conflicting APIs.
The _found mapping is filled with discovered api names and crcs.
msg_crc = self._get_crc(msg_obj)
self._process_crc(msg_name, msg_crc)
logger.debug("Surviving collections: {col!r}".format(
- col=self._expected.keys()))
+ col=self._expected.keys())
+ )
def report_initial_conflicts(self, report_missing=False):
"""Report issues discovered by _check_dir, if not done that already.
Missing reporting is disabled by default, because some messages
come from plugins that might not be enabled at runtime.
+ After the report, clear _reported, so that test cases report them again,
+ thus tracking which message is actually used (by which test).
+
:param report_missing: Whether to raise on missing messages.
:type report_missing: bool
- :raises RuntimeError: If CRC mismatch or missing messages are detected.
+ :raises RuntimeError: If CRC mismatch or missing messages are detected,
+ and fail_on_mismatch is True.
"""
if self._initial_conflicts_reported:
return
self._initial_conflicts_reported = True
if self._reported:
- raise RuntimeError("Dir check found incompatible API CRCs: {rep!r}"\
- .format(rep=self._reported))
+
+ reported_indented = json.dumps(
+ self._reported, indent=1, sort_keys=True,
+ separators=[",", ":"]
+ )
+ self._reported = dict()
+ self.log_and_raise(
+ "Incompatible API CRCs found in .api.json files:\n"
+ "{r_i}".format(r_i=reported_indented)
+ )
if not report_missing:
return
missing = {name: mapp for name, mapp in self._missing.items() if mapp}
if missing:
- raise RuntimeError("Dir check found missing API CRCs: {mis!r}"\
- .format(mis=missing))
+ missing_indented = json.dumps(
+ missing, indent=1, sort_keys=True, separators=[",", ":"])
+ self.log_and_raise(
+ "API CRCs missing from .api.json:\n"
+ "{m_i}".format(m_i=missing_indented)
+ )
def check_api_name(self, api_name):
- """Fail if the api_name has no known CRC associated.
+ """Fail if the api_name has no, or different from known CRC associated.
Do not fail if this particular failure has been already reported.
- Intended use: Call everytime an API call is queued or response received.
+ Intended use: Call during test (not in initialization),
+ every time an API call is queued or response received.
+
- :param api_name: VPP API messagee name to check.
+ :param api_name: VPP API message name to check.
:type api_name: str or unicode
:raises RuntimeError: If no verified CRC for the api_name is found.
"""
if new_expected:
# Some collections recognized the message name.
self._expected = new_expected
- return
crc = self._found.get(api_name, None)
+ matching = False
+ if crc is not None:
+ # Regardless of how many collections are remaining,
+ # verify the known CRC is on one of them.
+ for col, name_to_crc_mapping in self._expected.items():
+ if api_name not in name_to_crc_mapping:
+ continue
+ if name_to_crc_mapping[api_name] == crc:
+ matching = True
+ break
+ if matching:
+ return
self._reported[api_name] = crc
- # Disabled temporarily during CRC mismatch.
- #raise RuntimeError("No active collection has API {api!r}"
- # " CRC found {crc!r}".format(api=api_name, crc=crc))
+ self.log_and_raise(
+ "No active collection contains API {a_n!r} with CRC {crc!r}".format(
+ a_n=api_name, crc=crc)
+ )
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
files downloaded to the correct place.
"""
+from __future__ import print_function
import os.path as op
import sys
from resources.libraries.python.VppApiCrc import VppApiCrcChecker
-# TODO: Read FDIO_VPP_DIR environment variable, or some other input,
-# instead of using hardcoded relative path?
-API_DIR = op.normpath(op.join(
- op.dirname(op.abspath(__file__)), "..", "..", "..", "..",
- "build-root", "install-vpp-native", "vpp", "share", "vpp", "api"))
-CHECKER = VppApiCrcChecker(API_DIR)
-try:
- CHECKER.report_initial_conflicts(report_missing=True)
-except RuntimeError as err:
- sys.stderr.write("{err!r}\n".format(err=err))
- sys.stderr.write(
- "\n"
- "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"
- "\n"
- "VPP CSIT API CHECK FAIL!\n"
- "\n"
- "This means the patch under test has missing messages,\n"
- "or messages with unexpected CRCs compared to what CSIT needs.\n"
- "Either this Change and/or its ancestors were editing .api files,\n"
- "or your chain is not rebased upon the recent enough VPP codebase.\n"
- "\n"
- "Please rebase the patch to see if that fixes the problem.\n"
- "If that fails email csit-dev@lists.fd.io for a new\n"
- "operational branch supporting the api changes.\n"
- "\n"
- "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"
- )
- sys.exit(1)
-else:
- sys.stderr.write(
- "\n"
- "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"
- "\n"
- "VPP CSIT API CHECK PASS!\n"
- "\n"
- "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"
- )
+def main():
+ """Execute the logic, return the return code.
+
+ From current location, construct path to .api file subtree,
+ initialize and run the CRC checker, print result consequences
+ to stderr, return the return code to return from the script.
+
+ :returns: Return code to return. 0 if OK, 1 if CRC mismatch.
+ :rtype: int
+ """
+
+ # TODO: Read FDIO_VPP_DIR environment variable, or some other input,
+ # instead of using hardcoded relative path?
+
+ api_dir = op.normpath(op.join(
+ op.dirname(op.abspath(__file__)), "..", "..", "..", "..",
+ "build-root", "install-vpp-native", "vpp", "share", "vpp",
+ "api"
+ ))
+ checker = VppApiCrcChecker(api_dir)
+ try:
+ checker.report_initial_conflicts(report_missing=True)
+ except RuntimeError as err:
+ stderr_lines = [
+ "{err!r}".format(err=err),
+ "",
+ "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@",
+ "",
+ "VPP CSIT API CHECK FAIL!",
+ "",
+ "This means the patch under test has missing messages,",
+ "or messages with unexpected CRCs compared to what CSIT needs.",
+ "Either this Change and/or its ancestors were editing .api files,",
+ "or your chain is not rebased upon a recent enough VPP codebase.",
+ "",
+ "Please rebase the patch to see if that fixes the problem.",
+ "If that fails email csit-dev@lists.fd.io for a new",
+ "operational branch supporting the api changes.",
+ "",
+ "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@",
+ ]
+ ret_code = 1
+ else:
+ stderr_lines = [
+ "",
+ "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@",
+ "",
+ "VPP CSIT API CHECK PASS!",
+ "",
+ "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@",
+ ]
+ ret_code = 0
+ for stderr_line in stderr_lines:
+ print(stderr_line, file=sys.stderr)
+ return ret_code
+
+if __name__ == "__main__":
+ sys.exit(main())
--- /dev/null
+.terraform/
+.terraform.tfstate.lock.info
+terraform.tfstate
+terraform.tfstate.backup
--- /dev/null
+provider "aws" {
+ region = "eu-central-1"
+}
+
+variable "avail_zone" {
+ type = string
+ default = "eu-central-1a"
+}
+# Base VPC CIDRs
+variable "vpc_cidr_mgmt" {
+ type = string
+ default = "192.168.0.0/24"
+}
+variable "vpc_cidr_b" {
+ type = string
+ default = "192.168.10.0/24"
+}
+variable "vpc_cidr_c" {
+ type = string
+ default = "200.0.0.0/24"
+}
+variable "vpc_cidr_d" {
+ type = string
+ default = "192.168.20.0/24"
+}
+
+# Trex Dummy CIDRs
+variable "trex_dummy_cidr_port_0" {
+ type = string
+ default = "10.0.0.0/24"
+}
+variable "trex_dummy_cidr_port_1" {
+ type = string
+ default = "20.0.0.0/24"
+}
+
+# IPs
+variable "tg_if1_ip" {
+ type = string
+ default = "192.168.10.254"
+}
+variable "tg_if2_ip" {
+ type = string
+ default = "192.168.20.254"
+}
+variable "dut1_if1_ip" {
+ type = string
+ default = "192.168.10.11"
+}
+variable "dut1_if2_ip" {
+ type = string
+ default = "200.0.0.101"
+}
+variable "dut2_if1_ip" {
+ type = string
+ default = "200.0.0.102"
+}
+variable "dut2_if2_ip" {
+ type = string
+ default = "192.168.20.11"
+}
+variable "tg_mgmt_ip" {
+ type = string
+ default = "192.168.0.10"
+}
+variable "dut1_mgmt_ip" {
+ type = string
+ default = "192.168.0.11"
+}
+variable "dut2_mgmt_ip" {
+ type = string
+ default = "192.168.0.12"
+}
+
+# Instance Type
+variable "instance_type" {
+ type = string
+ default = "c5n.9xlarge"
+}
+
+resource "aws_vpc" "CSIT" {
+ cidr_block = var.vpc_cidr_mgmt
+}
+
+resource "aws_security_group" "CSIT" {
+ name = "CSIT"
+ description = "Allow inbound traffic"
+ vpc_id = aws_vpc.CSIT.id
+
+ ingress {
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ ingress {
+ from_port = 0
+ to_port = 0
+ protocol = -1
+ self = true
+ }
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ depends_on = [aws_vpc.CSIT]
+}
+
+resource "aws_vpc_ipv4_cidr_block_association" "b" {
+ vpc_id = aws_vpc.CSIT.id
+ cidr_block = var.vpc_cidr_b
+ depends_on = [aws_vpc.CSIT]
+}
+resource "aws_vpc_ipv4_cidr_block_association" "c" {
+ vpc_id = aws_vpc.CSIT.id
+ cidr_block = var.vpc_cidr_c
+ depends_on = [aws_vpc.CSIT]
+}
+resource "aws_vpc_ipv4_cidr_block_association" "d" {
+ vpc_id = aws_vpc.CSIT.id
+ cidr_block = var.vpc_cidr_d
+ depends_on = [aws_vpc.CSIT]
+}
+
+resource "aws_subnet" "mgmt" {
+ vpc_id = aws_vpc.CSIT.id
+ cidr_block = var.vpc_cidr_mgmt
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSIT]
+}
+
+resource "aws_subnet" "b" {
+ vpc_id = aws_vpc.CSIT.id
+ cidr_block = var.vpc_cidr_b
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.b]
+}
+
+resource "aws_subnet" "c" {
+ vpc_id = aws_vpc.CSIT.id
+ cidr_block = var.vpc_cidr_c
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.c]
+}
+
+resource "aws_subnet" "d" {
+ vpc_id = aws_vpc.CSIT.id
+ cidr_block = var.vpc_cidr_d
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.d]
+}
+
+resource "aws_internet_gateway" "CSIT" {
+ vpc_id = aws_vpc.CSIT.id
+ depends_on = [aws_vpc.CSIT]
+}
+
+resource "aws_key_pair" "CSIT" {
+ key_name = "CSIT"
+ public_key = file("~/.ssh/id_rsa.pub")
+}
+
+data "aws_ami" "ubuntu" {
+ most_recent = true
+
+ filter {
+ name = "name"
+ values = ["*hvm-ssd/ubuntu-bionic-18.04-amd64*"]
+ }
+
+ filter {
+ name = "virtualization-type"
+ values = ["hvm"]
+ }
+
+ owners = ["099720109477"] # Canonical
+}
+
+resource "aws_placement_group" "CSIT" {
+ name = "CSIT"
+ strategy = "cluster"
+}
+
+resource "aws_instance" "tg" {
+ ami = data.aws_ami.ubuntu.id
+ instance_type = var.instance_type
+# cpu_threads_per_core = 1
+# cpu_core_count = 18
+ key_name = aws_key_pair.CSIT.key_name
+ associate_public_ip_address = true
+ subnet_id = aws_subnet.mgmt.id
+ private_ip = var.tg_mgmt_ip
+ vpc_security_group_ids = [aws_security_group.CSIT.id]
+ depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
+ placement_group = aws_placement_group.CSIT.id
+ source_dest_check = false
+}
+
+resource "aws_instance" "dut1" {
+ ami = data.aws_ami.ubuntu.id
+# cpu_threads_per_core = 1
+# cpu_core_count = 18
+ instance_type = var.instance_type
+ key_name = aws_key_pair.CSIT.key_name
+ associate_public_ip_address = true
+ subnet_id = aws_subnet.mgmt.id
+ private_ip = var.dut1_mgmt_ip
+ vpc_security_group_ids = [aws_security_group.CSIT.id]
+ depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
+ placement_group = aws_placement_group.CSIT.id
+ source_dest_check = false
+}
+
+resource "aws_instance" "dut2" {
+ ami = data.aws_ami.ubuntu.id
+# cpu_threads_per_core = 1
+# cpu_core_count = 18
+ instance_type = var.instance_type
+ key_name = aws_key_pair.CSIT.key_name
+ associate_public_ip_address = true
+ subnet_id = aws_subnet.mgmt.id
+ private_ip = var.dut2_mgmt_ip
+ vpc_security_group_ids = [aws_security_group.CSIT.id]
+ depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
+ placement_group = aws_placement_group.CSIT.id
+ source_dest_check = false
+}
+
+resource "aws_route" "CSIT-igw" {
+ route_table_id = aws_vpc.CSIT.main_route_table_id
+ gateway_id = aws_internet_gateway.CSIT.id
+ destination_cidr_block = "0.0.0.0/0"
+ depends_on = [aws_vpc.CSIT, aws_internet_gateway.CSIT]
+}
+resource "aws_route" "dummy-trex-port-0" {
+ route_table_id = aws_vpc.CSIT.main_route_table_id
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ destination_cidr_block = var.trex_dummy_cidr_port_0
+ depends_on = [aws_vpc.CSIT, aws_instance.dut1]
+}
+resource "aws_route" "dummy-trex-port-1" {
+ route_table_id = aws_vpc.CSIT.main_route_table_id
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ destination_cidr_block = var.trex_dummy_cidr_port_1
+ depends_on = [aws_vpc.CSIT, aws_instance.dut2]
+}
+
+resource "null_resource" "deploy_tg" {
+ depends_on = [ aws_instance.tg ]
+ connection {
+ user = "ubuntu"
+ host = aws_instance.tg.public_ip
+ private_key = file("~/.ssh/id_rsa")
+ }
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = "../../testbed-setup/ansible/site_aws.yaml"
+ force_handlers = true
+ }
+ hosts = ["tg"]
+ extra_vars = {
+ ansible_python_interpreter = "/usr/bin/python3"
+ aws = true
+ }
+ }
+ }
+}
+resource "null_resource" "deploy_dut1" {
+ depends_on = [ aws_instance.dut1 ]
+ connection {
+ user = "ubuntu"
+ host = aws_instance.dut1.public_ip
+ private_key = file("~/.ssh/id_rsa")
+ }
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = "../../testbed-setup/ansible/site_aws.yaml"
+ force_handlers = true
+ }
+ hosts = ["sut"]
+ extra_vars = {
+ ansible_python_interpreter = "/usr/bin/python3"
+ aws = true
+ }
+ }
+ }
+}
+resource "null_resource" "deploy_dut2" {
+ depends_on = [ aws_instance.dut2 ]
+ connection {
+ user = "ubuntu"
+ host = aws_instance.dut2.public_ip
+ private_key = file("~/.ssh/id_rsa")
+ }
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = "../../testbed-setup/ansible/site_aws.yaml"
+ force_handlers = true
+ }
+ hosts = ["sut"]
+ extra_vars = {
+ ansible_python_interpreter = "/usr/bin/python3"
+ aws = true
+ }
+ }
+ }
+}
+
+resource "null_resource" "deploy_topology" {
+ depends_on = [ aws_instance.tg, aws_instance.dut1, aws_instance.dut2 ]
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = "../../testbed-setup/ansible/cloud_topology.yaml"
+ }
+ hosts = ["local"]
+ extra_vars = {
+ ansible_python_interpreter = "/usr/bin/python3"
+ cloud_topology = "aws"
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.aws_network_interface.dut1_if1.mac_address
+ dut1_if2_mac = data.aws_network_interface.dut1_if2.mac_address
+ dut2_if1_mac = data.aws_network_interface.dut2_if1.mac_address
+ dut2_if2_mac = data.aws_network_interface.dut2_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.dut1.public_ip
+ dut2_public_ip = aws_instance.dut2.public_ip
+ }
+ }
+ }
+}
+
+output "dbg_tg" {
+ value = "TG IP: ${aws_instance.tg.public_ip}"
+}
+
+output "dbg_dut1" {
+ value = "DUT1 IP: ${aws_instance.dut1.public_ip}"
+}
+
+output "dbg_dut2" {
+ value = "DUT2 IP: ${aws_instance.dut2.public_ip}"
+}
--- /dev/null
+resource "aws_network_interface" "dut1_if1" {
+ subnet_id = aws_subnet.b.id
+ source_dest_check = false
+ private_ip = var.dut1_if1_ip
+ private_ips = [var.dut1_if1_ip]
+ security_groups = [aws_security_group.CSIT.id]
+ attachment {
+ instance = aws_instance.dut1.id
+ device_index = 1
+ }
+ depends_on = [aws_vpc.CSIT, aws_subnet.b]
+}
+
+data "aws_network_interface" "dut1_if1" {
+ id = aws_network_interface.dut1_if1.id
+}
+
+resource "aws_network_interface" "dut1_if2" {
+ subnet_id = aws_subnet.c.id
+ source_dest_check = false
+ private_ip = var.dut1_if2_ip
+ private_ips = [var.dut1_if2_ip]
+ security_groups = [aws_security_group.CSIT.id]
+ attachment {
+ instance = aws_instance.dut1.id
+ device_index = 2
+ }
+ depends_on = [aws_vpc.CSIT]
+}
+
+data "aws_network_interface" "dut1_if2" {
+ id = aws_network_interface.dut1_if2.id
+}
+
+resource "aws_network_interface" "dut2_if1" {
+ subnet_id = aws_subnet.c.id
+ source_dest_check = false
+ private_ip = var.dut2_if1_ip
+ private_ips = [var.dut2_if1_ip]
+ security_groups = [aws_security_group.CSIT.id]
+ attachment {
+ instance = aws_instance.dut2.id
+ device_index = 1
+ }
+ depends_on = [aws_vpc.CSIT, aws_subnet.c]
+}
+
+data "aws_network_interface" "dut2_if1" {
+ id = aws_network_interface.dut2_if1.id
+}
+
+resource "aws_network_interface" "dut2_if2" {
+ subnet_id = aws_subnet.d.id
+ source_dest_check = false
+ private_ip = var.dut2_if2_ip
+ private_ips = [var.dut2_if2_ip]
+ security_groups = [aws_security_group.CSIT.id]
+ attachment {
+ instance = aws_instance.dut2.id
+ device_index = 2
+ }
+ depends_on = [aws_vpc.CSIT, aws_subnet.d]
+}
+
+data "aws_network_interface" "dut2_if2" {
+ id = aws_network_interface.dut2_if2.id
+}
+
+resource "aws_network_interface" "tg_if1" {
+ subnet_id = aws_subnet.b.id
+ source_dest_check = false
+ private_ip = var.tg_if1_ip
+ private_ips = [var.tg_if1_ip]
+ security_groups = [aws_security_group.CSIT.id]
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 1
+ }
+ depends_on = [aws_vpc.CSIT, aws_subnet.b]
+}
+
+data "aws_network_interface" "tg_if1" {
+ id = aws_network_interface.tg_if1.id
+}
+
+resource "aws_network_interface" "tg_if2" {
+ subnet_id = aws_subnet.d.id
+ source_dest_check = false
+ private_ip = var.tg_if2_ip
+ private_ips = [var.tg_if2_ip]
+ security_groups = [aws_security_group.CSIT.id]
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 2
+ }
+ depends_on = [aws_vpc.CSIT, aws_subnet.d]
+}
+
+data "aws_network_interface" "tg_if2" {
+ id = aws_network_interface.tg_if2.id
+}
--- /dev/null
+.terraform/
+.terraform.tfstate.lock.info
+terraform.tfstate
+terraform.tfstate.backup
--- /dev/null
+provider "azurerm" {
+ version = ">= 1.4.0"
+}
+
+# Variables
+
+variable "vpc_cidr_a" {
+ type = string
+ default = "172.16.0.0/24"
+}
+
+variable "vpc_cidr_b" {
+ type = string
+ default = "192.168.10.0/24"
+}
+
+variable "vpc_cidr_c" {
+ type = string
+ default = "200.0.0.0/24"
+}
+
+variable "vpc_cidr_d" {
+ type = string
+ default = "192.168.20.0/24"
+}
+
+variable "trex_dummy_cidr_port_0" {
+ type = string
+ default = "10.0.0.0/24"
+}
+
+variable "trex_dummy_cidr_port_1" {
+ type = string
+ default = "20.0.0.0/24"
+}
+
+# Create resource group and resources
+
+resource "azurerm_resource_group" "CSIT" {
+ name = "CSIT"
+ location = "East US"
+}
+
+resource "azurerm_virtual_network" "CSIT" {
+ name = "CSIT-network"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ location = azurerm_resource_group.CSIT.location
+ address_space = [ var.vpc_cidr_a,
+ var.vpc_cidr_b,
+ var.vpc_cidr_c,
+ var.vpc_cidr_d ]
+ depends_on = [ azurerm_resource_group.CSIT ]
+}
+
+resource "azurerm_subnet" "a" {
+ name = "subnet_a"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ virtual_network_name = azurerm_virtual_network.CSIT.name
+ address_prefix = var.vpc_cidr_a
+ depends_on = [ azurerm_resource_group.CSIT ]
+}
+
+resource "azurerm_subnet" "b" {
+ name = "subnet_b"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ virtual_network_name = azurerm_virtual_network.CSIT.name
+ address_prefix = var.vpc_cidr_b
+ depends_on = [ azurerm_resource_group.CSIT ]
+}
+
+resource "azurerm_subnet" "c" {
+ name = "subnet_c"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ virtual_network_name = azurerm_virtual_network.CSIT.name
+ address_prefix = var.vpc_cidr_c
+ depends_on = [ azurerm_resource_group.CSIT ]
+}
+
+resource "azurerm_subnet" "d" {
+ name = "subnet_d"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ virtual_network_name = azurerm_virtual_network.CSIT.name
+ address_prefix = var.vpc_cidr_d
+ depends_on = [ azurerm_resource_group.CSIT ]
+}
+
+# Create a security group of the Kiknos instances
+
+resource "azurerm_network_security_group" "CSIT" {
+ name = "CSIT"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ location = azurerm_resource_group.CSIT.location
+ security_rule {
+ name = "IpSec"
+ priority = 100
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Udp"
+ source_port_range = "*"
+ destination_port_range = "500"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+ security_rule {
+ name = "IpSec-NAT"
+ priority = 101
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Udp"
+ source_port_range = "*"
+ destination_port_range = "4500"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+ security_rule {
+ name = "SSH"
+ priority = 102
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "22"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+ security_rule {
+ name = "InboundAll"
+ priority = 103
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "*"
+ source_port_range = "*"
+ destination_port_range = "*"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+ security_rule {
+ name = "Outbound"
+ priority = 104
+ direction = "Outbound"
+ access = "Allow"
+ protocol = "*"
+ source_port_range = "*"
+ destination_port_range = "*"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+ depends_on = [azurerm_virtual_network.CSIT]
+}
+
+# Create public IPs
+
+resource "azurerm_public_ip" "tg_public_ip" {
+ name = "tg_public_ip"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ allocation_method = "Dynamic"
+ depends_on = [ azurerm_resource_group.CSIT ]
+}
+
+resource "azurerm_public_ip" "dut1_public_ip" {
+ name = "dut1_public_ip"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ allocation_method = "Dynamic"
+ depends_on = [ azurerm_resource_group.CSIT ]
+}
+
+resource "azurerm_public_ip" "dut2_public_ip" {
+ name = "dut2_public_ip"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ allocation_method = "Dynamic"
+ depends_on = [ azurerm_resource_group.CSIT ]
+}
+
+# Create network interface
+
+resource "azurerm_network_interface" "tg_mng" {
+ name = "tg_mng"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ network_security_group_id = azurerm_network_security_group.CSIT.id
+ ip_configuration {
+ primary = "true"
+ name = "tg_mng_ip"
+ subnet_id = azurerm_subnet.a.id
+ private_ip_address_allocation = "Static"
+ private_ip_address = "172.16.0.10"
+ public_ip_address_id = azurerm_public_ip.tg_public_ip.id
+ }
+ depends_on = [ azurerm_resource_group.CSIT,
+ azurerm_subnet.a,
+ azurerm_public_ip.tg_public_ip ]
+}
+
+resource "azurerm_network_interface" "dut1_mng" {
+ name = "dut1_mng"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ network_security_group_id = azurerm_network_security_group.CSIT.id
+ ip_configuration {
+ primary = "true"
+ name = "dut1_mng_ip"
+ subnet_id = azurerm_subnet.a.id
+ private_ip_address_allocation = "Static"
+ private_ip_address = "172.16.0.11"
+ public_ip_address_id = azurerm_public_ip.dut1_public_ip.id
+ }
+ depends_on = [ azurerm_resource_group.CSIT,
+ azurerm_subnet.a,
+ azurerm_public_ip.dut1_public_ip ]
+}
+
+resource "azurerm_network_interface" "dut2_mng" {
+ name = "dut2_mng"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ network_security_group_id = azurerm_network_security_group.CSIT.id
+ ip_configuration {
+ primary = "true"
+ name = "dut2_mng_ip"
+ subnet_id = azurerm_subnet.a.id
+ private_ip_address_allocation = "Static"
+ private_ip_address = "172.16.0.12"
+ public_ip_address_id = azurerm_public_ip.dut2_public_ip.id
+ }
+ depends_on = [ azurerm_resource_group.CSIT,
+ azurerm_subnet.a,
+ azurerm_public_ip.dut2_public_ip ]
+}
+
+resource "azurerm_route_table" "b" {
+ name = "b"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_resource_group.CSIT,
+ azurerm_subnet.b ]
+ disable_bgp_route_propagation = false
+ route {
+ name = "route-10"
+ address_prefix = "10.0.0.0/24"
+ next_hop_type = "VirtualAppliance"
+ next_hop_in_ip_address = "192.168.10.254"
+ }
+ route {
+ name = "route-20"
+ address_prefix = "20.0.0.0/24"
+ next_hop_type = "VirtualAppliance"
+ next_hop_in_ip_address = "192.168.10.11"
+ }
+ route {
+ name = "tg2"
+ address_prefix = "192.168.20.0/24"
+ next_hop_type = "VirtualAppliance"
+ next_hop_in_ip_address = "192.168.10.11"
+ }
+}
+
+resource "azurerm_route_table" "c" {
+ name = "c"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_resource_group.CSIT,
+ azurerm_subnet.c ]
+ disable_bgp_route_propagation = false
+ route {
+ name = "route-10"
+ address_prefix = "10.0.0.0/24"
+ next_hop_type = "VirtualAppliance"
+ next_hop_in_ip_address = "200.0.0.101"
+ }
+ route {
+ name = "route-20"
+ address_prefix = "20.0.0.0/24"
+ next_hop_type = "VirtualAppliance"
+ next_hop_in_ip_address = "200.0.0.102"
+ }
+ route {
+ name = "tg1"
+ address_prefix = "192.168.10.0/24"
+ next_hop_type = "VirtualAppliance"
+ next_hop_in_ip_address = "200.0.0.101"
+ }
+ route {
+ name = "tg2"
+ address_prefix = "192.168.20.0/24"
+ next_hop_type = "VirtualAppliance"
+ next_hop_in_ip_address = "200.0.0.102"
+ }
+}
+
+resource "azurerm_route_table" "d" {
+ name = "d"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_resource_group.CSIT,
+ azurerm_subnet.d ]
+ disable_bgp_route_propagation = false
+ route {
+ name = "route-10"
+ address_prefix = "10.0.0.0/24"
+ next_hop_type = "VirtualAppliance"
+ next_hop_in_ip_address = "192.168.20.11"
+ }
+ route {
+ name = "route-20"
+ address_prefix = "20.0.0.0/24"
+ next_hop_type = "VirtualAppliance"
+ next_hop_in_ip_address = "192.168.20.254"
+ }
+ route {
+ name = "tg1"
+ address_prefix = "192.168.10.0/24"
+ next_hop_type = "VirtualAppliance"
+ next_hop_in_ip_address = "192.168.20.11"
+ }
+}
+
+resource "azurerm_subnet_route_table_association" "b" {
+ subnet_id = azurerm_subnet.b.id
+ route_table_id = azurerm_route_table.b.id
+}
+
+resource "azurerm_subnet_route_table_association" "c" {
+ subnet_id = azurerm_subnet.c.id
+ route_table_id = azurerm_route_table.c.id
+}
+
+resource "azurerm_subnet_route_table_association" "d" {
+ subnet_id = azurerm_subnet.d.id
+ route_table_id = azurerm_route_table.d.id
+}
+
+resource "azurerm_virtual_machine" "tg" {
+ name = "tg"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ primary_network_interface_id = azurerm_network_interface.tg_mng.id
+ network_interface_ids = [ azurerm_network_interface.tg_mng.id,
+ azurerm_network_interface.tg_if1.id,
+ azurerm_network_interface.tg_if2.id ]
+ vm_size = "Standard_F32s_v2"
+ delete_os_disk_on_termination = true
+ delete_data_disks_on_termination = true
+ storage_os_disk {
+ name = "OsDiskTG"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ managed_disk_type = "StandardSSD_LRS"
+ }
+ storage_image_reference {
+ publisher = "Canonical"
+ offer = "UbuntuServer"
+ sku = "18.04-LTS"
+ version = "latest"
+ }
+ os_profile {
+ computer_name = "tg"
+ admin_username = "ubuntu"
+ }
+ os_profile_linux_config {
+ disable_password_authentication = true
+ ssh_keys {
+ path = "/home/ubuntu/.ssh/authorized_keys"
+ key_data = file("~/.ssh/id_rsa.pub")
+ }
+ }
+ depends_on = [ azurerm_resource_group.CSIT,
+ azurerm_network_interface.tg_mng ]
+}
+
+resource "azurerm_virtual_machine" "dut1" {
+ name = "dut1"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ primary_network_interface_id = azurerm_network_interface.dut1_mng.id
+ network_interface_ids = [ azurerm_network_interface.dut1_mng.id,
+ azurerm_network_interface.dut1_if1.id,
+ azurerm_network_interface.dut1_if2.id ]
+ vm_size = "Standard_F32s_v2"
+ delete_os_disk_on_termination = true
+ delete_data_disks_on_termination = true
+ storage_os_disk {
+ name = "OsDiskDUT1"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ managed_disk_type = "StandardSSD_LRS"
+ }
+ storage_image_reference {
+ publisher = "Canonical"
+ offer = "UbuntuServer"
+ sku = "18.04-LTS"
+ version = "latest"
+ }
+ os_profile {
+ computer_name = "dut1"
+ admin_username = "ubuntu"
+ }
+ os_profile_linux_config {
+ disable_password_authentication = true
+ ssh_keys {
+ path = "/home/ubuntu/.ssh/authorized_keys"
+ key_data = file("~/.ssh/id_rsa.pub")
+ }
+ }
+ depends_on = [ azurerm_resource_group.CSIT,
+ azurerm_network_interface.dut1_mng ]
+}
+
+resource "azurerm_virtual_machine" "dut2" {
+ name = "dut2"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ primary_network_interface_id = azurerm_network_interface.dut2_mng.id
+ network_interface_ids = [ azurerm_network_interface.dut2_mng.id,
+ azurerm_network_interface.dut2_if1.id,
+ azurerm_network_interface.dut2_if2.id ]
+ vm_size = "Standard_F32s_v2"
+ delete_os_disk_on_termination = true
+ delete_data_disks_on_termination = true
+ storage_os_disk {
+ name = "OsDiskDUT2"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ managed_disk_type = "StandardSSD_LRS"
+ }
+ storage_image_reference {
+ publisher = "Canonical"
+ offer = "UbuntuServer"
+ sku = "18.04-LTS"
+ version = "latest"
+ }
+ os_profile {
+ computer_name = "dut2"
+ admin_username = "ubuntu"
+ }
+ os_profile_linux_config {
+ disable_password_authentication = true
+ ssh_keys {
+ path = "/home/ubuntu/.ssh/authorized_keys"
+ key_data = file("~/.ssh/id_rsa.pub")
+ }
+ }
+ depends_on = [ azurerm_resource_group.CSIT,
+ azurerm_network_interface.dut2_mng ]
+}
+
+data "azurerm_public_ip" "tg_public_ip" {
+ name = "tg_public_ip"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_virtual_machine.tg ]
+}
+
+data "azurerm_public_ip" "dut1_public_ip" {
+ name = "dut1_public_ip"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_virtual_machine.dut1 ]
+}
+
+data "azurerm_public_ip" "dut2_public_ip" {
+ name = "dut2_public_ip"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_virtual_machine.dut2 ]
+}
+
+# Provisioning
+
+resource "null_resource" "deploy_tg" {
+ depends_on = [ azurerm_virtual_machine.tg,
+ azurerm_network_interface.tg_if1,
+ azurerm_network_interface.tg_if2 ]
+ connection {
+ user = "ubuntu"
+ host = data.azurerm_public_ip.tg_public_ip.ip_address
+ private_key = file("~/.ssh/id_rsa")
+ }
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = "../../testbed-setup/ansible/site_azure.yaml"
+ force_handlers = true
+ }
+ hosts = ["tg"]
+ extra_vars = {
+ ansible_python_interpreter = "/usr/bin/python3"
+ azure = true
+ }
+ }
+ }
+}
+
+resource "null_resource" "deploy_dut1" {
+ depends_on = [ azurerm_virtual_machine.dut1,
+ azurerm_network_interface.dut1_if1,
+ azurerm_network_interface.dut1_if2 ]
+ connection {
+ user = "ubuntu"
+ host = data.azurerm_public_ip.dut1_public_ip.ip_address
+ private_key = file("~/.ssh/id_rsa")
+ }
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = "../../testbed-setup/ansible/site_azure.yaml"
+ force_handlers = true
+ }
+ hosts = ["sut"]
+ extra_vars = {
+ ansible_python_interpreter = "/usr/bin/python3"
+ azure = true
+ }
+ }
+ }
+}
+
+resource "null_resource" "deploy_dut2" {
+ depends_on = [ azurerm_virtual_machine.dut2,
+ azurerm_network_interface.dut2_if1,
+ azurerm_network_interface.dut2_if2 ]
+ connection {
+ user = "ubuntu"
+ host = data.azurerm_public_ip.dut2_public_ip.ip_address
+ private_key = file("~/.ssh/id_rsa")
+ }
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = "../../testbed-setup/ansible/site_azure.yaml"
+ force_handlers = true
+ }
+ hosts = ["sut"]
+ extra_vars = {
+ ansible_python_interpreter = "/usr/bin/python3"
+ azure = true
+ }
+ }
+ }
+}
+
+resource "null_resource" "deploy_topology" {
+ depends_on = [ azurerm_virtual_machine.tg,
+ azurerm_network_interface.tg_if1,
+ azurerm_network_interface.tg_if2,
+ azurerm_virtual_machine.dut1,
+ azurerm_network_interface.dut1_if1,
+ azurerm_network_interface.dut1_if2,
+ azurerm_virtual_machine.dut2,
+ azurerm_network_interface.dut2_if1,
+ azurerm_network_interface.dut2_if2 ]
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = "../../testbed-setup/ansible/cloud_topology.yaml"
+ }
+ hosts = ["local"]
+ extra_vars = {
+ ansible_python_interpreter = "/usr/bin/python3"
+ cloud_topology = "azure"
+ tg_if1_mac = data.azurerm_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.azurerm_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.azurerm_network_interface.dut1_if1.mac_address
+ dut1_if2_mac = data.azurerm_network_interface.dut1_if2.mac_address
+ dut2_if1_mac = data.azurerm_network_interface.dut2_if1.mac_address
+ dut2_if2_mac = data.azurerm_network_interface.dut2_if2.mac_address
+ tg_public_ip = data.azurerm_public_ip.tg_public_ip.ip_address
+ dut1_public_ip = data.azurerm_public_ip.dut1_public_ip.ip_address
+ dut2_public_ip = data.azurerm_public_ip.dut2_public_ip.ip_address
+ }
+ }
+ }
+}
+
+output "dbg_tg" {
+ value = "TG IP: ${data.azurerm_public_ip.tg_public_ip.ip_address}"
+}
+
+output "dbg_dut1" {
+ value = "DUT1 IP: ${data.azurerm_public_ip.dut1_public_ip.ip_address}"
+}
+
+output "dbg_dut2" {
+ value = "DUT2 IP: ${data.azurerm_public_ip.dut2_public_ip.ip_address}"
+}
--- /dev/null
+# Create a network interface for the data-plane traffic
+
+resource "azurerm_network_interface" "dut1_if2" {
+ name = "dut1_if2"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ network_security_group_id = azurerm_network_security_group.CSIT.id
+ enable_ip_forwarding = "true"
+ enable_accelerated_networking = "true"
+
+ ip_configuration {
+ name = "dut1_if2"
+ subnet_id = azurerm_subnet.c.id
+ private_ip_address_allocation = "Static"
+ private_ip_address = "200.0.0.101"
+ }
+}
+
+data "azurerm_network_interface" "dut1_if2" {
+ name = "dut1_if2"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_virtual_machine.dut1 ]
+}
+
+resource "azurerm_network_interface" "dut2_if1" {
+ name = "dut2_if1"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ network_security_group_id = azurerm_network_security_group.CSIT.id
+ enable_ip_forwarding = "true"
+ enable_accelerated_networking = "true"
+
+ ip_configuration {
+ name = "dut2_if1"
+ subnet_id = azurerm_subnet.c.id
+ private_ip_address_allocation = "Static"
+ private_ip_address = "200.0.0.102"
+ }
+}
+
+data "azurerm_network_interface" "dut2_if1" {
+ name = "dut2_if1"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_virtual_machine.dut2 ]
+}
+
+resource "azurerm_network_interface" "dut1_if1" {
+ name = "dut1_if1"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ network_security_group_id = azurerm_network_security_group.CSIT.id
+ enable_ip_forwarding = "true"
+ enable_accelerated_networking = "true"
+
+ ip_configuration {
+ name = "dut1_if1"
+ subnet_id = azurerm_subnet.b.id
+ private_ip_address_allocation = "Static"
+ private_ip_address = "192.168.10.11"
+ }
+}
+
+data "azurerm_network_interface" "dut1_if1" {
+ name = "dut1_if1"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_virtual_machine.dut1 ]
+}
+
+resource "azurerm_network_interface" "dut2_if2" {
+ name = "dut2_if2"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ network_security_group_id = azurerm_network_security_group.CSIT.id
+ enable_ip_forwarding = "true"
+ enable_accelerated_networking = "true"
+
+ ip_configuration {
+ name = "dut2_if2"
+ subnet_id = azurerm_subnet.d.id
+ private_ip_address_allocation = "Static"
+ private_ip_address = "192.168.20.11"
+ }
+}
+
+data "azurerm_network_interface" "dut2_if2" {
+ name = "dut2_if2"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_virtual_machine.dut2 ]
+}
+
+resource "azurerm_network_interface" "tg_if1" {
+ name = "tg_if1"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ network_security_group_id = azurerm_network_security_group.CSIT.id
+ enable_ip_forwarding = "true"
+ enable_accelerated_networking = "true"
+
+ ip_configuration {
+ name = "tg1"
+ subnet_id = azurerm_subnet.b.id
+ private_ip_address_allocation = "Static"
+ private_ip_address = "192.168.10.254"
+ }
+}
+
+data "azurerm_network_interface" "tg_if1" {
+ name = "tg_if1"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_virtual_machine.tg ]
+}
+
+resource "azurerm_network_interface" "tg_if2" {
+ name = "tg_if2"
+ location = azurerm_resource_group.CSIT.location
+ resource_group_name = azurerm_resource_group.CSIT.name
+ network_security_group_id = azurerm_network_security_group.CSIT.id
+ enable_ip_forwarding = "true"
+ enable_accelerated_networking = "true"
+
+ ip_configuration {
+ name = "tg2"
+ subnet_id = azurerm_subnet.d.id
+ private_ip_address_allocation = "Static"
+ private_ip_address = "192.168.20.254"
+ }
+}
+
+data "azurerm_network_interface" "tg_if2" {
+ name = "tg_if2"
+ resource_group_name = azurerm_resource_group.CSIT.name
+ depends_on = [ azurerm_virtual_machine.tg ]
+}
--- /dev/null
+---
+# file: cloud_topology.yaml
+
+- hosts: localhost
+ gather_facts: false
+ roles:
+ - role: topology
+ tags: topology
+
roles:
- role: cobbler
tags: cobbler
+ - role: docker
+ tags: docker
---
# file: lf_inventory/group_vars/all.yaml
-# General variables
-ansible_python_interpreter: '/usr/bin/python2.7'
-# provision via cobbler
-provision_enabled: False
-# name_servers_search is used in /etc/hosts file on target machine.
-name_servers_search: 'linuxfoundation.org'
-# name_servers is used in /etc/netplan/01-netcfg.yaml
-name_servers: "199.204.44.24, 199.204.47.54"
-
-# Proxy settings: Uncomment and fill the proper values. These variables will be
-# set globally by writing into /etc/environment file on target machine.
-#proxy_env:
-# http_proxy: http://proxy.com:80
-# HTTP_PROXY: http://proxy.com:80
-# https_proxy: http://proxy.com:80
-# HTTPS_PROXY: http://proxy.com:80
-# ftp_proxy: http://proxy.com:80
-# FTP_PROXY: http://proxy.com:80
-# no_proxy: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
-# NO_PROXY: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
-
-# Docker settings.
-docker_edition: 'ce'
-docker_channel: 'edge'
-docker_version: '18.05.0'
-docker_users: ['testuser']
-docker_repository: 'deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_channel }}'
-docker_apt_package_name: '{{ docker_version }}~{{ docker_edition }}~3-0~{{ ansible_distribution | lower }}'
-docker_daemon_environment_http:
- - 'HTTP_PROXY={{ proxy_env.http_proxy }}'
- - 'NO_PROXY={{ proxy_env.no_proxy }}'
-docker_daemon_environment_https:
- - 'HTTPS_PROXY={{ proxy_env.https_proxy }}'
- - 'NO_PROXY={{ proxy_env.no_proxy }}'
-
-# Kubernetes settings.
-kubernetes_channel: 'main'
-kubernetes_version: '1.11.0-00'
-kubernetes_repository: 'deb http://apt.kubernetes.io/ kubernetes-xenial {{ kubernetes_channel }}'
-kubernetes_apt_package_name: '{{ kubernetes_version }}'
-
-# DPDK settings.
-dpdk:
- target_dir: '/opt'
- version: 'dpdk-19.02'
- url: 'https://fast.dpdk.org/rel'
- build_targets:
- aarch64: "arm64-armv8a"
- x86_64: "x86_64-native"
-
-# WRK settings.
-wrk:
- target_dir: '/opt'
- version: '4.0.2'
- url: 'https://github.com/wg/wrk/archive'
-
-# Calibration settings.
-jitter:
- directory: '/tmp/pma_tools'
- core: 7
- iterations: 30
+# Ansible interpreter (for PIP)
+ansible_python_interpreter: "/usr/bin/python3"
kernel:
watchdog_cpumask: "0,18"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_cimc_hostname: '10.30.50.16'
kernel:
watchdog_cpumask: "0,18"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_cimc_hostname: '10.30.50.20'
kernel:
watchdog_cpumask: "0,18"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_cimc_hostname: '10.30.50.24'
+++ /dev/null
----
-# file: host_vars/10.30.51.28.yaml
-
-hostname: "t4-virl1"
-virl_l2_start: "10.30.52.2"
-virl_l2_end: "10.30.52.253"
-virl_l2_gateway: "10.30.52.1"
-virl_l2_network: "10.30.52.0/24"
-virl_l2_ip: "10.30.52.254"
-virl_public_port: "eth0"
-
-inventory_cimc_hostname: '10.30.50.28'
-cobbler_profile: 'ubuntu-18.04.2-server-x86_64'
-cpu_microarchitecture: "haswell"
+++ /dev/null
----
-# file: host_vars/10.30.51.29.yaml
-
-hostname: "t4-virl2"
-virl_l2_start: "10.30.53.2"
-virl_l2_end: "10.30.53.253"
-virl_l2_gateway: "10.30.53.1"
-virl_l2_network: "10.30.53.0/24"
-virl_l2_ip: "10.30.53.254"
-virl_public_port: "eth4"
-
-inventory_cimc_hostname: '10.30.50.29'
-cobbler_profile: 'ubuntu-18.04.2-server-x86_64'
+++ /dev/null
----
-# file: host_vars/10.30.51.30.yaml
-
-hostname: "t4-virl3"
-virl_l2_start: "10.30.54.2"
-virl_l2_end: "10.30.54.253"
-virl_l2_gateway: "10.30.54.1"
-virl_l2_network: "10.30.54.0/24"
-virl_l2_ip: "10.30.54.254"
-virl_public_port: "eth0"
-
-inventory_cimc_hostname: '10.30.50.30'
-cobbler_profile: 'ubuntu-18.04.2-server-x86_64'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 36864
+ nr_hugepages: 65536
max_map_count: 20000
inventory_ipmi_hostname: '10.30.50.41'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.50.42'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.50.45'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.50.46'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.50.50'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 36864
+ nr_hugepages: 65536
max_map_count: 20000
inventory_ipmi_hostname: '10.30.50.51'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.50.52'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 36864
+ nr_hugepages: 65536
max_map_count: 20000
inventory_ipmi_hostname: '10.30.50.53'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.50.54'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.50.57'
kernel:
watchdog_cpumask: "0,28,56,84"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.55.10'
kernel:
watchdog_cpumask: "0"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.55.11'
kernel:
watchdog_cpumask: "0"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.55.12'
kernel:
watchdog_cpumask: "0"
vm:
- nr_hugepages: 4096
+ nr_hugepages: 8192
max_map_count: 20000
inventory_ipmi_hostname: '10.30.55.13'
--- /dev/null
+---
+# file: host_vars/10.32.8.18.yaml
+
+hostname: "s33-t27-sut1"
+grub:
+ isolcpus: "1-23,25-47,49-71,73-95"
+ nohz_full: "1-23,25-47,49-71,73-95"
+ rcu_nocbs: "1-23,25-47,49-71,73-95"
+sysctl:
+ kernel:
+ watchdog_cpumask: "0,24,48,72"
+ vm:
+ nr_hugepages: 65536
+ max_map_count: 20000
+
+inventory_ipmi_hostname: '10.30.55.18'
+cobbler_profile: 'ubuntu-18.04.2-server-x86_64'
+cpu_microarchitecture: "cascadelake"
--- /dev/null
+---
+# file: host_vars/10.32.8.19.yaml
+
+hostname: "s34-t27-tg1"
+grub:
+ isolcpus: "1-27,29-55,57-83,85-111"
+ nohz_full: "1-27,29-55,57-83,85-111"
+ rcu_nocbs: "1-27,29-55,57-83,85-111"
+sysctl:
+ kernel:
+ watchdog_cpumask: "0,28,56,84"
+ vm:
+ nr_hugepages: 8192
+ max_map_count: 20000
+
+inventory_ipmi_hostname: '10.30.55.19'
+cobbler_profile: 'ubuntu-18.04.2-server-x86_64'
+cpu_microarchitecture: "cascadelake"
--- /dev/null
+---
+# file: host_vars/10.32.8.20.yaml
+
+hostname: "s35-t28-sut1"
+grub:
+ isolcpus: "1-23,25-47,49-71,73-95"
+ nohz_full: "1-23,25-47,49-71,73-95"
+ rcu_nocbs: "1-23,25-47,49-71,73-95"
+sysctl:
+ kernel:
+ watchdog_cpumask: "0,24,48,72"
+ vm:
+ nr_hugepages: 65536
+ max_map_count: 20000
+
+inventory_ipmi_hostname: '10.30.55.20'
+cobbler_profile: 'ubuntu-18.04.2-server-x86_64'
+cpu_microarchitecture: "cascadelake"
--- /dev/null
+---
+# file: host_vars/10.32.8.21.yaml
+
+hostname: "s36-t28-tg1"
+grub:
+ isolcpus: "1-27,29-55,57-83,85-111"
+ nohz_full: "1-27,29-55,57-83,85-111"
+ rcu_nocbs: "1-27,29-55,57-83,85-111"
+sysctl:
+ kernel:
+ watchdog_cpumask: "0,28,56,84"
+ vm:
+ nr_hugepages: 8192
+ max_map_count: 20000
+
+inventory_ipmi_hostname: '10.30.55.21'
+cobbler_profile: 'ubuntu-18.04.2-server-x86_64'
+cpu_microarchitecture: "cascadelake"
--- /dev/null
+---
+# file: host_vars/10.32.8.22.yaml
+
+hostname: "s37-t29-sut1"
+grub:
+ isolcpus: "1-23,25-47,49-71,73-95"
+ nohz_full: "1-23,25-47,49-71,73-95"
+ rcu_nocbs: "1-23,25-47,49-71,73-95"
+sysctl:
+ kernel:
+ watchdog_cpumask: "0,24,48,72"
+ vm:
+ nr_hugepages: 65536
+ max_map_count: 20000
+
+inventory_ipmi_hostname: '10.30.55.22'
+cobbler_profile: 'ubuntu-18.04.2-server-x86_64'
+cpu_microarchitecture: "cascadelake"
--- /dev/null
+---
+# file: host_vars/10.32.8.23.yaml
+
+hostname: "s38-t29-tg1"
+grub:
+ isolcpus: "1-27,29-55,57-83,85-111"
+ nohz_full: "1-27,29-55,57-83,85-111"
+ rcu_nocbs: "1-27,29-55,57-83,85-111"
+sysctl:
+ kernel:
+ watchdog_cpumask: "0,28,56,84"
+ vm:
+ nr_hugepages: 8192
+ max_map_count: 20000
+
+inventory_ipmi_hostname: '10.30.55.23'
+cobbler_profile: 'ubuntu-18.04.2-server-x86_64'
+cpu_microarchitecture: "cascadelake"
10.30.51.57: #s10-t24-tg1 - skylake
10.30.51.60: #s16-t32-tg1 - skylake
10.32.8.10: #s28-t26t35-tg1 - skylake
+ 10.32.8.19: #s34-t27-tg1 - cascadelake
+ 10.32.8.21: #s36-t28-tg1 - cascadelake
+ 10.32.8.23: #s38-t29-tg1 - cascadelake
sut:
hosts:
10.30.51.17: #t1-sut1 - haswell
10.32.8.11: #s29-t26-sut1 - denverton
10.32.8.12: #s30-t35-sut1 - denverton
10.32.8.13: #s31-t35-sut2 - denverton
-
+ 10.32.8.18: #s33-t27-sut1 - cascadelake
+ 10.32.8.20: #s35-t28-sut1 - cascadelake
+ 10.32.8.22: #s37-t29-sut1 - cascadelake
vpp_device:
hosts:
10.30.51.50: #s1-t11-sut1 - skylake
---
-# file: group_vars/all.yaml
+# file: sample_inventory/group_vars/all.yaml
-# General variables
-ansible_python_interpreter: '/usr/bin/python2.7'
-# provision via cobbler
-provision_enabled: True
-# name_servers_search is used in /etc/hosts file on target machine.
-name_servers_search: 'linuxfoundation.org'
-# name_servers is used in /etc/netplan/01-netcfg.yaml
-name_servers: "199.204.44.24, 199.204.47.54"
-
-# Proxy settings: Uncomment and fill the proper values. These variables will be
-# set globally by writing into /etc/environment file on target machine.
-#proxy_env:
-# http_proxy: http://proxy.com:80
-# HTTP_PROXY: http://proxy.com:80
-# https_proxy: http://proxy.com:80
-# HTTPS_PROXY: http://proxy.com:80
-# ftp_proxy: http://proxy.com:80
-# FTP_PROXY: http://proxy.com:80
-# no_proxy: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
-# NO_PROXY: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
-
-# Docker settings.
-docker_edition: 'ce'
-docker_channel: 'edge'
-docker_version: '18.05.0'
-docker_users: ['testuser']
-docker_repository: 'deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_channel }}'
-docker_apt_package_name: '{{ docker_version }}~{{ docker_edition }}~3-0~{{ ansible_distribution | lower }}'
-docker_daemon_environment_http:
- - 'HTTP_PROXY={{ proxy_env.http_proxy }}'
- - 'NO_PROXY={{ proxy_env.no_proxy }}'
-docker_daemon_environment_https:
- - 'HTTPS_PROXY={{ proxy_env.https_proxy }}'
- - 'NO_PROXY={{ proxy_env.no_proxy }}'
-
-# Kubernetes settings.
-kubernetes_channel: 'main'
-kubernetes_version: '1.11.0-00'
-kubernetes_repository: 'deb http://apt.kubernetes.io/ kubernetes-xenial {{ kubernetes_channel }}'
-kubernetes_apt_package_name: '{{ kubernetes_version }}'
-
-# DPDK settings.
-dpdk:
- target_dir: '/opt'
- version: 'dpdk-19.02'
- url: 'https://fast.dpdk.org/rel'
- build_targets:
- aarch64: "arm64-armv8a"
- x86_64: "x86_64-native"
-
-# WRK settings.
-wrk:
- target_dir: '/opt'
- version: '4.0.2'
- url: 'https://github.com/wg/wrk/archive'
-
-# Calibration settings.
-jitter:
- directory: '/tmp/pma_tools'
- core: 7
- iterations: 30
+# Ansible interpreter (for PIP)
+ansible_python_interpreter: "python3"
all:
children:
- skylake: # Architecture: (options) skylake, haswell, taishan...
- children:
- tg:
- hosts:
- 1.1.1.1: null #t1-tg
- sut:
- hosts:
- 2.2.2.2: null #t1-sut1
- 3.3.3.3: null #t1-sut2
+ tg:
+ hosts:
+ 1.1.1.1: #t1-tg
+ sut:
+ hosts:
+ 2.2.2.2: #t1-sut1
+ 3.3.3.3: #t1-sut2
--- /dev/null
+---
+# file: roles/aws/defaults/main.yaml
+
--- /dev/null
+---
+# file: roles/aws/handlers/main.yaml
+
+- name: Reboot server
+ reboot:
+ reboot_timeout: 3600
+ tags:
+ - reboot-server
--- /dev/null
+---
+# file: roles/aws/tasks/main.yaml
+
+- name: AWS - Edit repositories
+ include_tasks: '{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml'
+ tags: edit-repo
+
+- name: AWS - Get vfio-pci Patcher Script
+ get_url:
+ url: "https://github.com/amzn/amzn-drivers/raw/master/userspace/dpdk/enav2-vfio-patch/vfio-wc-patch.sh"
+ dest: "/opt/vfio-wc-patch.sh"
+ mode: "744"
+ register: "vfio_patch_downloaded"
+ tags:
+ - vfio-aws-patch
+
+- name: AWS - Patch vfio-pci
+ shell: "/bin/bash /opt/vfio-wc-patch.sh"
+ when: "vfio_patch_downloaded"
+ tags:
+ - vfio-aws-patch
+
+- name: AWS - Load Kernel Modules By Default
+ lineinfile:
+ path: "/etc/modules"
+ state: "present"
+ line: "{{ item }}"
+ with_items:
+ - "vfio-pci"
+ - "igb_uio"
+ register: "modules_added"
+ tags:
+ - load-kernel-modules
+
+- name: AWS - Add Kernel Modules Options
+ lineinfile:
+ path: "/etc/modprobe.d/igb_uio.conf"
+ state: "present"
+ line: "{{ item }}"
+ create: "yes"
+ with_items:
+ - "options igb_uio wc_activate=1"
+ when: "modules_added"
+ register: "modules_added"
+ tags:
+ - load-kernel-modules
+
+- name: AWS - Reload systemd-modules
+ systemd:
+ name: "systemd-modules-load"
+ state: "restarted"
+ when: "modules_added"
+ tags:
+ - reload-systemd-modules
+
+- name: AWS - Performance Tuning - Adjust nr_hugepages
+ sysctl:
+ name: "vm.nr_hugepages"
+ value: "8192"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
--- /dev/null
+---
+# file: roles/aws/tasks/ubuntu_bionic.yaml
+
+- name: AWS - Enable deb-src APT Repositories
+ replace:
+ path: "/etc/apt/sources.list"
+ regexp: "^# deb-src "
+ replace: "deb-src "
+ tags:
+ - enable-src-repo
--- /dev/null
+---
+# file: roles/azure/defaults/main.yaml
+
--- /dev/null
+---
+# file: roles/azure/handlers/main.yaml
+
+- name: Reboot server
+ reboot:
+ reboot_timeout: 3600
+ tags:
+ - reboot-server
--- /dev/null
+---
+# file: roles/azure/tasks/main.yaml
+
+- name: Azure - Load Kernel Modules By Default
+ lineinfile:
+ path: "/etc/modules"
+ state: "present"
+ line: "{{ item }}"
+ with_items:
+ - "vfio-pci"
+ - "ib_uverbs"
+ - "mlx4_ib"
+ - "mlx5_ib"
+ tags:
+ - load-kernel-modules
--- /dev/null
+---
+# file: roles/calibration/defaults/main.yaml
+
+packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}"
+
+packages_base:
+ - []
+
+packages_by_distro:
+ ubuntu:
+ - "build-essential"
+
+packages_by_arch:
+ aarch64:
+ - []
+ x86_64:
+ - []
+
+pma_directory: "/tmp/pma_tools"
+jitter_core: 7
+jitter_iterations: 30
--- /dev/null
+---
+# file: roles/calibration/tasks/aarch64.yaml
---
# file: roles/calibration/tasks/main.yaml
-- name: Run Spectre Meltdown checker
- raw: 'wget -qO - https://meltdown.ovh | sudo bash -s - --no-color || true'
- register: spectre_meltdown_output
- tags: run-spectre-meltdown-checker
+- name: Print Ansible facts
+ debug: var=ansible_facts
-- debug: var=spectre_meltdown_output.stdout_lines
- tags: run-spectre-meltdown-checker
+- name: Calibration - Install Distribution - Release - Machine Prerequisites
+ package:
+ name: "{{ packages | flatten(levels=1) }}"
+ state: latest
+ update_cache: true
+ tags:
+ - install-dependencies
-- name: x86 specific
- import_tasks: x86_64.yaml
- when: ansible_machine == 'x86_64'
+- name: Calibration - Get Spectre Meltdown Checker
+ get_url:
+ url: "https://meltdown.ovh"
+ dest: "/opt/spectre-meltdown-checker.sh"
+ mode: "744"
+ tags:
+ - check-spectre-meltdown
+
+- name: Calibration - Run Spectre Meltdown Checker
+ shell: "/opt/spectre-meltdown-checker.sh --no-color || true"
+ async: 60
+ poll: 0
+ ignore_errors: yes
+ register: spectre_meltdown_async
+ tags:
+ - check-spectre-meltdown
+
+- name: Calibration - {{ ansible_machine }} Specific
+ include_tasks: "{{ ansible_machine }}.yaml"
+ tags:
+ - check-machine-specific
+ - check-jitter-tool
+
+- name: Calibration - Get BIOS info
+ shell: "dmidecode -t bios"
+ ignore_errors: yes
+ register: dmidecode_bios
+ tags:
+ - check-bios
+
+- debug: var=dmidecode_bios.stdout_lines
+ tags:
+ - check-bios
+
+- name: Check sync status
+ async_status:
+ jid: "{{ spectre_meltdown_async.ansible_job_id }}"
+ register: "spectre_meltdown_poll_results"
+ until: spectre_meltdown_poll_results.finished
+ retries: 30
+
+- debug: var=spectre_meltdown_poll_results.stdout_lines
+ tags:
+ - check-spectre-meltdown
---
# file: roles/calibration/tasks/x86_64.yaml
-- name: Clone Jitter tool
+- name: Calibration - Clone PMA Tool
git:
- repo: 'https://gerrit.fd.io/r/pma_tools'
- dest: '{{ jitter.directory }}'
- tags: run-jitter-tool
+ repo: "https://gerrit.fd.io/r/pma_tools"
+ dest: "{{ pma_directory }}"
+ tags:
+ - check-jitter-tool
-- name: Compile Jitter tool
- raw: 'cd {{ jitter.directory }}/jitter && make'
- tags: run-jitter-tool
+- name: Calibration - Compile PMA Tool
+ raw: "cd {{ pma_directory }}/jitter && make"
+ tags:
+ - check-jitter-tool
-- name: Run Jitter tool
- raw: '{{ jitter.directory }}/jitter/jitter -c {{ jitter.core }} -i {{ jitter.iterations }} -f'
+- name: Calibration - Run Jitter Tool
+ shell: "{{ pma_directory }}/jitter/jitter -c {{ jitter_core }} -i {{ jitter_iterations }} -f"
become: yes
- register: jitter_output
- tags: run-jitter-tool
+ async: 60
+ poll: 0
+ ignore_errors: yes
+ register: jitter_async
+ tags:
+ - check-jitter-tool
-- debug: var=jitter_output.stdout_lines
- tags: run-jitter-tool
+- name: Check sync status
+ async_status:
+ jid: "{{ jitter_async.ansible_job_id }}"
+ register: "jitter_poll_results"
+ until: jitter_poll_results.finished
+ retries: 30
+
+- debug: var=jitter_poll_results.stdout_lines
+ tags:
+ - check-jitter-tool
--- /dev/null
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+function die () {
+ # Print the message to standard error end exit with error code specified
+ # by the second argument.
+ #
+ # Hardcoded values:
+ # - The default error message.
+ # Arguments:
+ # - ${1} - The whole error message, be sure to quote. Optional
+ # - ${2} - the code to exit with, default: 1.
+
+ set +eu
+ warn "${1:-Unspecified run-time error occurred!}"
+ exit "${2:-1}"
+}
+
+
+function set_eligibility_off {
+ # Set Nomad eligibility to ineligible for scheduling. Fail otherwise.
+
+ set -euo pipefail
+
+ node_id="$(nomad node status | grep $(hostname) | cut -d ' ' -f 1)" || die
+ node_status="$(nomad node status | grep $(hostname))" || die
+
+ if [[ "${node_status}" != *"ineligible"* ]]; then
+ nomad node eligibility -disable "${node_id}" || die
+ node_status="$(nomad node status | grep $(hostname))" || die
+ if [[ "${node_status}" != *"ineligible"* ]]; then
+ die "Set eligibility off failed!"
+ fi
+ fi
+}
+
+
+function set_eligibility_on {
+ # Set Nomad eligibility to eligible for scheduling. Fail otherwise.
+
+ set -euo pipefail
+
+ node_id="$(nomad node status | grep $(hostname) | cut -d ' ' -f 1)" || die
+ node_status="$(nomad node status | grep $(hostname))" || die
+
+ if [[ "${node_status}" == *"ineligible"* ]]; then
+ nomad node eligibility -enable "${node_id}" || die
+ node_status="$(nomad node status | grep $(hostname))" || die
+ if [[ "${node_status}" == *"ineligible"* ]]; then
+ die "Set eligibility on failed!"
+ fi
+ fi
+}
+
+
+function restart_vfs_service {
+ # Stop and start VF serice. This will reinitialize VFs and driver mappings.
+
+ set -euo pipefail
+
+ warn "Restarting VFs service (this may take few minutes)..."
+ sudo service csit-initialize-vfs stop || die "Failed to stop VFs service!"
+ sudo service csit-initialize-vfs start || die "Failed to start VFs service!"
+}
+
+
+function wait_for_pending_containers {
+ # Wait in loop for defined amount of time for pending containers to
+ # gracefully quit them. If parameter force is specified. Force kill them.
+
+ # Arguments:
+ # - ${@} - Script parameters.
+
+ set -euo pipefail
+
+ retries=60
+ wait_time=60
+ containers=(docker ps --quiet --filter name=csit*)
+
+ for i in $(seq 1 ${retries}); do
+ mapfile -t pending_containers < <( ${containers[@]} ) || die
+ warn "Waiting for pending containers [${pending_containers[@]}] ..."
+ if [ ${#pending_containers[@]} -eq 0 ]; then
+ break
+ fi
+ sleep "${wait_time}" || die
+ done
+ if [ ${#pending_containers[@]} -ne 0 ]; then
+ if [[ "${1-}" == "force" ]]; then
+ warn "Force killing [${pending_containers[@]}] ..."
+ docker rm --force ${pending_containers[@]} || die
+ else
+ die "Still few containers running!"
+ fi
+ fi
+}
+
+
+function warn () {
+ # Print the message to standard error.
+ #
+ # Arguments:
+ # - ${@} - The text of the message.
+
+ echo "$@" >&2
+}
+
+
+set_eligibility_off || die
+wait_for_pending_containers "${@}" || die
+restart_vfs_service || die
+set_eligibility_on || die
--- /dev/null
+---
+# file: roles/cleanup/tasks/kill_containers.yaml
+
+- name: Kill containers
+ block:
+ - name: Kill container - Get running Docker containers
+ shell: "docker ps -aq"
+ register: running_containers
+ changed_when: no
+ tags: kill-containers
+
+ - name: Kill container - Remove all Docker containers
+ shell: "docker rm --force {{ item }}"
+ with_items: "{{ running_containers.stdout_lines }}"
+ tags: kill-containers
+
+ - name: Kill container - Get running LXC containers
+ shell: "lxc-ls"
+ register: running_containers
+ changed_when: no
+ tags: kill-containers
+
+ - name: Kill container - Remove all LXC containers
+ shell: "lxc-destroy --force -n {{ item }}"
+ with_items: "{{ running_containers.stdout_lines }}"
+ tags: kill-containers
+ rescue:
+ - fail:
+ msg: "Kill containers failed!"
--- /dev/null
+---
+# file: roles/cleanup/tasks/kill_process.yaml
+
+- name: Kill process - {{ process }}
+ block:
+ - name: Kill process - Get pid of {{ process }}
+ shell: "ps -ef | grep -v grep | grep -w {{ process }} | awk '{print $2}'"
+ when: >
+ process is defined and process != ""
+ register: running_processes
+ tags: kill-process
+
+ - name: Kill process - Safe kill {{ process }}
+ shell: "kill {{ item }}"
+ with_items: "{{ running_processes.stdout_lines }}"
+ tags: kill-process
+
+ - wait_for:
+ path: "/proc/{{ item }}/status"
+ state: absent
+ with_items: "{{ running_processes.stdout_lines }}"
+ ignore_errors: yes
+ register: killed_processes
+ tags: kill-process
+
+ - name: Kill process - Force kill {{ process }}
+ shell: "kill -9 {{ item }}"
+ with_items: "{{ killed_processes.results | select('failed') | map(attribute='item') | list }}"
+ tags: kill-process
+ rescue:
+ - fail:
+ msg: "Kill process {{ process }} failed!"
--- /dev/null
+---
+# file: roles/cleanup/tasks/main.yaml
+# purpose: Structured per server cleanup tasks.
+# - main:
+# - tg:
+# - Run tasks on TG servers only.
+# - Cleanup processes (T-Rex).
+# - sut:
+# - Run tasks on SUT servers only.
+# - Cleanup file leftovers (logs).
+# - Cleanup packages (VPP, Honeycomb).
+# - Cleanup processes (qemu, l3fwd, testpmd, docker, kubernetes)
+# - Cleanup interfaces.
+# - vpp_device
+# - Run tasks on vpp_device servers only.
+# - Reset SRIOV
+
+- name: tg specific
+ include_tasks: tg.yaml
+ when: "'tg' in group_names"
+ tags: cleanup
+
+- name: sut specific
+ include_tasks: sut.yaml
+ when: "'sut' in group_names"
+ tags: cleanup
+
+- name: vpp_device specific
+ include_tasks: vpp_device.yaml
+ when: "'vpp_device' in group_names"
+ tags: cleanup
--- /dev/null
+---
+# file: roles/cleanup/tasks/remove_package.yaml
+
+- name: Remove package - Fix corrupted apt
+ shell: 'dpkg --configure -a'
+ when: >
+ ansible_distribution == 'Ubuntu'
+ tags: remove-package
+
+- name: Remove package - {{ package }}
+ apt:
+ name: '{{ package }}'
+ force: yes
+ purge: yes
+ state: absent
+ failed_when: no
+ when: >
+ ansible_distribution == 'Ubuntu'
+ tags: remove-package
--- /dev/null
+---
+# file: roles/cleanup/tasks/sut.yaml
+
+- name: Host cleanup
+ block:
+ - name: Kill processes - qemu
+ import_tasks: kill_process.yaml
+ vars:
+ process: "qemu"
+ tags: kill-process
+
+ - name: Kill processes - l3fwd
+ import_tasks: kill_process.yaml
+ vars:
+ process: "l3fwd"
+ tags: kill-process
+
+ - name: Kill processes - testpmd
+ import_tasks: kill_process.yaml
+ vars:
+ process: "testpmd"
+ tags: kill-process
+
+ - name: Kill processes - iperf3
+ import_tasks: kill_process.yaml
+ vars:
+ process: "iperf3"
+ tags: kill-process
+
+ - name: Kill processes - vpp_echo
+ import_tasks: kill_process.yaml
+ vars:
+ process: "vpp_echo"
+ tags: kill-process
+
+ - name: Remove file or dir - Core zip file
+ file:
+ state: absent
+ path: "/tmp/*tar.lzo.lrz.xz*"
+ tags: remove-file-dir
+
+ - name: Remove file or dir - Core dump file
+ file:
+ state: absent
+ path: "/tmp/*core*"
+ tags: remove-file-dir
+
+ - name: Kill containers - Remove all containers
+ import_tasks: kill_containers.yaml
+ tags: kill-containers
+
+ - name: Kubernetes - Reset
+ raw: 'kubeadm reset --force'
+ tags: kill-kubernetes
+
+ - name: Remove packages - Remove VPP
+ import_tasks: remove_package.yaml
+ vars:
+ package: "*vpp*"
+ tags: remove-package
--- /dev/null
+---
+# file: roles/cleanup/tasks/tg.yaml
+
+- name: Kill processes - TRex
+ import_tasks: kill_process.yaml
+ vars:
+ process: "_t-rex"
+ when: docker_tg is undefined
+ tags: kill-process
+
+- name: Kill processes - WRK
+ import_tasks: kill_process.yaml
+ vars:
+ process: "wrk"
+ tags: kill-process
+ when: docker_tg is undefined
\ No newline at end of file
--- /dev/null
+---
+# file: roles/cleanup/tasks/vpp_device.yaml
+
+- name: Reset vpp_device binary
+ template:
+ src: 'files/reset_vppdevice.sh'
+ dest: '/usr/local/bin'
+ owner: 'root'
+ group: 'root'
+ mode: '644'
+ tags: reset-sriov
+
+- name: Reset vpp_device
+ raw: 'reset_vppdevice.sh --force'
+ tags: reset-sriov
subnet 10.30.51.0 netmask 255.255.255.0 {
option routers 10.30.51.1;
option domain-name "linuxfoundation.org";
- option domain-name-servers 199.204.44.24, 199.204.47.54;
+ option domain-name-servers 1.1.1.1, 8.8.8.8;
option subnet-mask 255.255.255.0;
range dynamic-bootp 10.30.51.2 10.30.51.254;
default-lease-time 600;
### Package selection
tasksel tasksel/first multiselect ubuntu-server
# Individual additional packages to install
-d-i pkgsel/include string openssh-server python2.7
+d-i pkgsel/include string openssh-server python2.7 python3.6
# Whether to upgrade packages after debootstrap.
# Allowed values: none, safe-upgrade, full-upgrade
#d-i pkgsel/upgrade select none
---
# file: roles/cobbler/tasks/main.yaml
-- name: Sync the cobbler docker directory
+- name: Cobbler - Sync the cobbler docker directory
synchronize:
- src: 'files'
- dest: '/home/{{ ansible_user }}/cobbler_docker'
+ src: "files"
+ dest: "/home/{{ ansible_user }}/cobbler_docker"
register: __cobbler_image_built
- tags: cobbler-build-image
+ tags:
+ - cobbler-build-image
-- name: Build the cobbler docker image
+- name: Cobbler - Build the cobbler docker image
docker_image:
- path: '/home/{{ ansible_user }}/cobbler_docker/files'
- name: 'csit/cobbler'
+ path: "/home/{{ ansible_user }}/cobbler_docker/files"
+ name: "csit/cobbler"
buildargs:
- cobbler_pass: '{{ cobbler_pass }}'
- cobbler_web_pass: '{{ cobbler_password }}'
- cobbler_ip_addr: '{{ inventory_hostname }}'
+ cobbler_pass: "{{ cobbler_pass }}"
+ cobbler_web_pass: "{{ cobbler_password }}"
+ cobbler_ip_addr: "{{ inventory_hostname }}"
when: __cobbler_image_built
- tags: cobbler-build-image
+ tags:
+ - cobbler-build-image
-- name: Run Cobbler image
+- name: Cobbler - Run Cobbler image
docker_container:
- name: 'cobbler'
- image: 'csit/cobbler'
- network_mode: 'host'
+ name: "cobbler"
+ image: "csit/cobbler"
+ network_mode: "host"
volumes:
- - '/mnt:/mnt:ro'
+ - "/mnt:/mnt:ro"
register: __cobbler_image_running
- tags: cobbler-run-image
+ tags:
+ - cobbler-run-image
-- name: Run cobbler setup get-loaders
- command: 'docker exec -i cobbler cobbler get-loaders'
+- name: Cobbler - Run cobbler setup get-loaders
+ command: "docker exec -i cobbler cobbler get-loaders"
when: __cobbler_image_running
- tags: cobbler-run-image
+ tags:
+ - cobbler-run-image
-- name: Run cobbler setup sync
- command: 'docker exec -i cobbler cobbler sync'
+- name: Cobbler - Run cobbler setup sync
+ command: "docker exec -i cobbler cobbler sync"
when: __cobbler_image_running
- tags: cobbler-run-image
+ tags:
+ - cobbler-run-image
-- name: Add Ubuntu 18.04.2 Server x86_64 to cobbler
- include_tasks: 'ubuntu-18.04.2-server-x86_64.yaml'
+- name: Cobbler - Add Ubuntu 18.04.2 Server x86_64 to cobbler
+ include_tasks: "ubuntu-18.04.2-server-x86_64.yaml"
when: __cobbler_image_running
- tags: cobbler-run-image
+ tags:
+ - cobbler-run-image
dest: '/mnt/ubuntu-18.04.2-server-amd64.iso'
checksum: 'sha256:a2cb36dc010d98ad9253ea5ad5a07fd6b409e3412c48f1860536970b073c98f5'
register: __iso_downloaded
- tags: cobbler-import-image
+ tags:
+ - cobbler-import-image
- name: Create directory for Ubuntu 18.04.2 Server x86_64 mount
file:
path: '/mnt/ubuntu-18.04.2-server-x86_64'
state: 'directory'
register: __mount_directory_created
- tags: cobbler-import-image
+ tags:
+ - cobbler-import-image
- name: Mount Ubuntu 18.04.2 Server x86_64 iso
mount:
opts: 'ro,loop'
state: mounted
when: __iso_downloaded and __mount_directory_created
- tags: cobbler-import-image
+ tags:
+ - cobbler-import-image
- name: Run cobbler distro import for Ubuntu 18.04.2 Server x86_64
command: |
--path=/mnt/ubuntu-18.04.2-server-x86_64
--name=ubuntu-18.04.2-server-x86_64
--kickstart=/var/lib/cobbler/kickstarts/ubuntu-18.04.2-server-x86_64.seed
- tags: cobbler-import-image
+ tags:
+ - cobbler-import-image
--- /dev/null
+---
+# file: roles/common/defaults/main.yaml
+
+packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}"
+
+packages_base:
+ - "autoconf"
+ - "cgroup-tools"
+ - "dkms"
+ - "iperf3"
+ - "linux-tools-common"
+ - "qemu-system"
+ - "socat"
+ - "unzip"
+ - "virtualenv"
+
+packages_by_distro:
+ ubuntu:
+ - "build-essential"
+ - "libpcap-dev"
+ - "python-all"
+ - "python-apt"
+ - "python-cffi"
+ - "python-cffi-backend"
+ - "python-dev"
+ - "python-pip"
+ - "python-setuptools"
+ - "python3-all"
+ - "python3-apt"
+ - "python3-cffi"
+ - "python3-cffi-backend"
+ - "python3-dev"
+ - "python3-pip"
+ - "python3-setuptools"
+
+packages_by_arch:
+ aarch64:
+ - "gfortran"
+ - "libblas-dev"
+ - "liblapack-dev"
+ x86_64:
+ - []
+
+# Proxy settings: Uncomment and fill the proper values. These variables will be
+# set globally by writing into /etc/environment file on target machine.
+#proxy_env:
+# http_proxy: http://proxy.com:80
+# HTTP_PROXY: http://proxy.com:80
+# https_proxy: http://proxy.com:80
+# HTTPS_PROXY: http://proxy.com:80
+# ftp_proxy: http://proxy.com:80
+# FTP_PROXY: http://proxy.com:80
+# no_proxy: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
+# NO_PROXY: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
+
+++ /dev/null
-deb http://ca.ports.ubuntu.com/ubuntu-ports/ bionic main restricted
-deb-src http://ca.ports.ubuntu.com/ubuntu-ports/ bionic main restricted
-
-## Major bug fix updates produced after the final release of the
-## distribution.
-deb http://ca.ports.ubuntu.com/ubuntu-ports/ bionic-updates main restricted
-deb-src http://ca.ports.ubuntu.com/ubuntu-ports/ bionic-updates main restricted
-
-## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
-## team. Also, please note that software in universe WILL NOT receive any
-## review or updates from the Ubuntu security team.
-deb http://ca.ports.ubuntu.com/ubuntu-ports/ bionic universe
-deb-src http://ca.ports.ubuntu.com/ubuntu-ports/ bionic universe
-deb http://ca.ports.ubuntu.com/ubuntu-ports/ bionic-updates universe
-deb-src http://ca.ports.ubuntu.com/ubuntu-ports/ bionic-updates universe
-
-## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
-## team, and may not be under a free licence. Please satisfy yourself as to
-## your rights to use the software. Also, please note that software in
-## multiverse WILL NOT receive any review or updates from the Ubuntu
-## security team.
-deb http://ca.ports.ubuntu.com/ubuntu-ports/ bionic multiverse
-deb-src http://ca.ports.ubuntu.com/ubuntu-ports/ bionic multiverse
-deb http://ca.ports.ubuntu.com/ubuntu-ports/ bionic-updates multiverse
-deb-src http://ca.ports.ubuntu.com/ubuntu-ports/ bionic-updates multiverse
-
-## N.B. software from this repository may not have been tested as
-## extensively as that contained in the main release, although it includes
-## newer versions of some applications which may provide useful features.
-## Also, please note that software in backports WILL NOT receive any review
-## or updates from the Ubuntu security team.
-deb http://ca.ports.ubuntu.com/ubuntu-ports/ bionic-backports main restricted universe multiverse
-deb-src http://ca.ports.ubuntu.com/ubuntu-ports/ bionic-backports main restricted universe multiverse
-
-deb http://ports.ubuntu.com/ubuntu-ports bionic-security main restricted
-deb-src http://ports.ubuntu.com/ubuntu-ports bionic-security main restricted
-deb http://ports.ubuntu.com/ubuntu-ports bionic-security universe
-deb-src http://ports.ubuntu.com/ubuntu-ports bionic-security universe
-deb http://ports.ubuntu.com/ubuntu-ports bionic-security multiverse
-deb-src http://ports.ubuntu.com/ubuntu-ports bionic-security multiverse
-
-## Uncomment the following two lines to add software from Canonical's
-## 'partner' repository.
-## This software is not part of Ubuntu, but is offered by Canonical and the
-## respective vendors as a service to Ubuntu users.
-# deb http://archive.canonical.com/ubuntu bionic partner
-# deb-src http://archive.canonical.com/ubuntu bionic partner
+++ /dev/null
-# deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted
-
-# deb http://us.archive.ubuntu.com/ubuntu/ bionic-updates main restricted
-# deb http://security.ubuntu.com/ubuntu bionic-security main restricted
-
-# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
-# newer versions of the distribution.
-deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted
-deb-src http://us.archive.ubuntu.com/ubuntu/ bionic main restricted
-
-## Major bug fix updates produced after the final release of the
-## distribution.
-deb http://us.archive.ubuntu.com/ubuntu/ bionic-updates main restricted
-deb-src http://us.archive.ubuntu.com/ubuntu/ bionic-updates main restricted
-
-## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
-## team. Also, please note that software in universe WILL NOT receive any
-## review or updates from the Ubuntu security team.
-deb http://us.archive.ubuntu.com/ubuntu/ bionic universe
-deb-src http://us.archive.ubuntu.com/ubuntu/ bionic universe
-deb http://us.archive.ubuntu.com/ubuntu/ bionic-updates universe
-deb-src http://us.archive.ubuntu.com/ubuntu/ bionic-updates universe
-
-## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
-## team, and may not be under a free licence. Please satisfy yourself as to
-## your rights to use the software. Also, please note that software in
-## multiverse WILL NOT receive any review or updates from the Ubuntu
-## security team.
-deb http://us.archive.ubuntu.com/ubuntu/ bionic multiverse
-deb-src http://us.archive.ubuntu.com/ubuntu/ bionic multiverse
-deb http://us.archive.ubuntu.com/ubuntu/ bionic-updates multiverse
-deb-src http://us.archive.ubuntu.com/ubuntu/ bionic-updates multiverse
-
-## N.B. software from this repository may not have been tested as
-## extensively as that contained in the main release, although it includes
-## newer versions of some applications which may provide useful features.
-## Also, please note that software in backports WILL NOT receive any review
-## or updates from the Ubuntu security team.
-deb http://us.archive.ubuntu.com/ubuntu/ bionic-backports main restricted universe multiverse
-deb-src http://us.archive.ubuntu.com/ubuntu/ bionic-backports main restricted universe multiverse
-
-deb http://security.ubuntu.com/ubuntu bionic-security main restricted
-deb-src http://security.ubuntu.com/ubuntu bionic-security main restricted
-deb http://security.ubuntu.com/ubuntu bionic-security universe
-deb-src http://security.ubuntu.com/ubuntu bionic-security universe
-deb http://security.ubuntu.com/ubuntu bionic-security multiverse
-deb-src http://security.ubuntu.com/ubuntu bionic-security multiverse
-
-## Uncomment the following two lines to add software from Canonical's
-## 'partner' repository.
-## This software is not part of Ubuntu, but is offered by Canonical and the
-## respective vendors as a service to Ubuntu users.
-# deb http://archive.canonical.com/ubuntu bionic partner
-# deb-src http://archive.canonical.com/ubuntu bionic partner
-
-## Uncomment the following two lines to add software from Ubuntu's
-## 'extras' repository.
-## This software is not part of Ubuntu, but is offered by third-party
-## developers who want to ship their latest software.
-# deb http://extras.ubuntu.com/ubuntu bionic main
-# deb-src http://extras.ubuntu.com/ubuntu bionic main
+++ /dev/null
----
-# file: roles/common/handlers/ipmi.yaml
-
-- name: Boot from network
- ipmi_boot:
- name: '{{ inventory_ipmi_hostname }}'
- user: '{{ inventory_ipmi_username }}'
- password: '{{ inventory_ipmi_password }}'
- bootdev: network
- delegate_to: localhost
- tags: boot-network
-
-- name: Boot from storage
- ipmi_boot:
- name: '{{ inventory_ipmi_hostname }}'
- user: '{{ inventory_ipmi_username }}'
- password: '{{ inventory_ipmi_password }}'
- bootdev: hd
- delegate_to: localhost
- tags: boot-storage
-
-- name: Power up server
- ipmi_power:
- name: '{{ inventory_ipmi_hostname }}'
- user: '{{ inventory_ipmi_username }}'
- password: '{{ inventory_ipmi_password }}'
- state: on
- delegate_to: localhost
- tags: power-up
-
-- name: Power down server
- ipmi_power:
- name: '{{ inventory_ipmi_hostname }}'
- user: '{{ inventory_ipmi_username }}'
- password: '{{ inventory_ipmi_password }}'
- state: off
- delegate_to: localhost
- tags: power-down
-
-- name: Power cycle server
- ipmi_power:
- name: '{{ inventory_ipmi_hostname }}'
- user: '{{ inventory_ipmi_username }}'
- password: '{{ inventory_ipmi_password }}'
- state: boot
- delegate_to: localhost
- tags: power-cycle
---
# file: roles/common/handlers/main.yaml
-- name: IPMI specific
- import_tasks: ipmi.yaml
- when: inventory_ipmi_hostname is defined
- tags: ipmi-handlers
-
-- name: CIMC specific
- import_tasks: cimc.yaml
- when: inventory_cimc_hostname is defined
- tags: cimc-handlers
-
-- name: Update GRUB
- command: update-grub
- tags: update-grub
-
- name: Reboot server
reboot:
reboot_timeout: 3600
- tags: reboot-server
-
-- name: Wait for server to restart
- wait_for:
- host: '{{ inventory_hostname }}'
- search_regex: OpenSSH
- port: 22
- delay: 60
- timeout: 3600
- tags: reboot-server
+ tags:
+ - reboot-server
+++ /dev/null
----
-# file: roles/common/tasks/kernel_install.yaml
-
-- name: Backup remote initramfs modules
- copy:
- src: '/etc/initramfs-tools/modules'
- dest: '/tmp/initramfs_modules.bkp'
- remote_src: yes
- tags: install-kernel-image
-
-- name: Update remote initramfs modules
- copy:
- src: '../files/initramfs_modules'
- dest: '/etc/initramfs-tools/modules'
- tags: install-kernel-image
-
-- name: Backup remote initramfs resume config
- copy:
- src: '/etc/initramfs-tools/conf.d/resume'
- dest: '/tmp/initramfs-resume.bkp'
- remote_src: yes
- tags: install-kernel-image
-
-- name: Update remote initramfs resume config
- copy:
- src: '../files/initramfs_resume'
- dest: '/etc/initramfs-tools/conf.d/resume'
- tags: install-kernel-image
-
-- name: Create target kernel dir
- file:
- path: '/opt/boot'
- state: 'directory'
- tags: install-kernel-image
-
-- name: Build initrd image
- shell: 'update-initramfs -k {{ ansible_kernel }} -c -b /opt/boot'
- tags: install-kernel-image
-
-- name: Copy corresponding kernel img
- copy:
- src: '/boot/vmlinuz-{{ ansible_kernel }}'
- dest: '/opt/boot/vmlinuz-{{ ansible_kernel }}'
- remote_src: yes
- tags: install-kernel-image
-
-- name: Restore remote initramfs modules
- copy:
- src: '/tmp/initramfs_modules.bkp'
- dest: '/etc/initramfs-tools/modules'
- remote_src: yes
- tags: install-kernel-image
-
-- name: Remove remote backup initramfs modules
- file:
- path: '/tmp/initramfs_modules.bkp'
- state: 'absent'
- tags: install-kernel-image
-
-- name: Restore remote initramfs resume config
- copy:
- src: '/tmp/initramfs-resume.bkp'
- dest: '/etc/initramfs-tools/conf.d/resume'
- remote_src: yes
- tags: install-kernel-image
-
-- name: Remove remote backup initramfs resume config
- file:
- path: '/tmp/initramfs-resume.bkp'
- state: 'absent'
- tags: install-kernel-image
---
# file: roles/common/tasks/main.yaml
-- name: Ensure the system exists in Cobbler
- cobbler_system:
- host: '{{ cobbler_hostname }}'
- port: 60080
- interfaces:
- br1:
- ipaddress: '{{ ansible_default_ipv4.address }}'
- macaddress: '{{ ansible_default_ipv4.macaddress }}'
- name: '{{ hostname }}'
- password: '{{ cobbler_password }}'
- properties:
- hostname: '{{ hostname }}'
- gateway: '{{ ansible_default_ipv4.gateway }}'
- profile: '{{ cobbler_profile }}'
- name_servers: '{{ name_servers }}'
- name_servers_search: '{{ name_servers_search }}'
- kickstart: '/var/lib/cobbler/kickstarts/{{ cobbler_profile }}.seed'
- kernel_options: '"interface={{ ansible_default_ipv4.interface }}"'
- netboot_enabled: yes
- username: '{{ cobbler_username }}'
- use_ssl: no
- validate_certs: no
- when: provision_enabled
- delegate_to: localhost
- tags: cobbler-include
-
-- name: Commit Cobbler changes
- cobbler_sync:
- host: '{{ cobbler_hostname }}'
- port: 60080
- password: '{{ cobbler_password }}'
- username: '{{ cobbler_username }}'
- use_ssl: no
- validate_certs: no
- run_once: yes
- when: provision_enabled
- delegate_to: localhost
- register: __included_in_cobbler
- notify:
- - 'Boot from network'
- - 'Reboot server'
- tags: cobbler-include
-
-- meta: flush_handlers
-
- name: Add permanent proxy settings
lineinfile:
- path: '/etc/environment'
- state: 'present'
- line: '{{ item.key }}={{ item.value }}'
- with_dict: '{{ proxy_env }}'
+ path: "/etc/environment"
+ state: "present"
+ line: "{{ item.key }}={{ item.value }}"
+ with_dict: "{{ proxy_env }}"
when: proxy_env is defined
+ tags:
+ - set-proxy
-- name: Install distribution - release - machine prerequisites
- include_tasks: '{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml'
+- name: Install Distribution - Release - Machine Prerequisites
+ package:
+ name: "{{ packages | flatten(levels=1) }}"
+ state: latest
+ update_cache: true
+ tags:
+ - install-dependencies
+
+- name: Install CSIT PIP requirements
+ pip:
+ name:
+ - "ecdsa==0.13.3"
+ - "paramiko==2.6.0"
+ - "pycrypto==2.6.1"
+ - "pypcap==1.2.3"
+ - "PyYAML==5.1.1"
+ - "requests==2.22.0"
+ - "robotframework==3.1.2"
+ - "scapy==2.4.3"
+ - "scp==0.13.2"
+ - "ansible==2.7.8"
+ - "dill==0.2.8.2"
+ - "numpy==1.17.3"
+ - "hdrhistogram==0.6.1"
+ - "pandas==0.25.3"
+ - "plotly==4.1.1"
+ - "PTable==0.9.2"
+ - "Sphinx==2.2.1"
+ - "sphinx-rtd-theme==0.4.0"
+ - "sphinxcontrib-programoutput==0.15"
+ - "sphinxcontrib-robotdoc==0.11.0"
+ - "alabaster==0.7.12"
+ - "Babel==2.7.0"
+ - "bcrypt==3.1.7"
+ - "certifi==2019.9.11"
+ - "cffi==1.13.2"
+ - "chardet==3.0.4"
+ - "cryptography==2.8"
+ - "docutils==0.15.2"
+ - "future==0.18.2"
+ - "idna==2.8"
+ - "imagesize==1.1.0"
+ - "Jinja2==2.10.3"
+ - "MarkupSafe==1.1.1"
+ - "packaging==19.2"
+ - "pbr==5.4.3"
+ - "pycparser==2.19"
+ - "Pygments==2.4.2"
+ - "PyNaCl==1.3.0"
+ - "pyparsing==2.4.4"
+ - "python-dateutil==2.8.1"
+ - "pytz==2019.3"
+ - "retrying==1.3.3"
+ - "six==1.13.0"
+ - "snowballstemmer==2.0.0"
+ - "sphinxcontrib-applehelp==1.0.1"
+ - "sphinxcontrib-devhelp==1.0.1"
+ - "sphinxcontrib-htmlhelp==1.0.2"
+ - "sphinxcontrib-jsmath==1.0.1"
+ - "sphinxcontrib-qthelp==1.0.2"
+ - "sphinxcontrib-serializinghtml==1.1.3"
+ - "urllib3==1.25.6"
+ tags:
+ - install-pip
+
+- name: Install CSIT PIP requirements - SciPy workaround
+ pip:
+ name:
+ - "scipy==1.1.0"
+ tags:
+ - install-pip
- name: Set sudoers admin
lineinfile:
- path: '/etc/sudoers'
- state: 'present'
- regexp: '^%admin ALL='
- line: '%admin ALL=(ALL) ALL'
- validate: '/usr/sbin/visudo -cf %s'
- tags: set-sudoers
+ path: "/etc/sudoers"
+ state: "present"
+ regexp: "^%admin ALL="
+ line: "%admin ALL=(ALL) ALL"
+ validate: "/usr/sbin/visudo -cf %s"
+ tags:
+ - set-sudoers
- name: Set sudoers sudo
lineinfile:
- path: '/etc/sudoers'
- state: 'present'
- regexp: '^%sudo'
- line: '%sudo ALL=(ALL:ALL) NOPASSWD: ALL'
- validate: '/usr/sbin/visudo -cf %s'
- tags: set-sudoers
-
-- name: Copy grub file
- template:
- src: 'files/grub_{{ ansible_machine }}'
- dest: '/etc/default/grub'
- owner: 'root'
- group: 'root'
- mode: '644'
- notify:
- - 'Update GRUB'
- - 'Reboot server'
- tags: copy-grub
+ path: "/etc/sudoers"
+ state: "present"
+ regexp: "^%sudo"
+ line: "%sudo ALL=(ALL:ALL) NOPASSWD: ALL"
+ validate: "/usr/sbin/visudo -cf %s"
+ tags:
+ - set-sudoers
- meta: flush_handlers
+++ /dev/null
----
-# file: roles/common/tasks/ubuntu_bionic.yaml
-
-- name: Copy apt sources file
- template:
- src: 'files/apt-sources_{{ ansible_distribution_release }}_{{ ansible_machine }}.list'
- dest: '/etc/apt/sources.list'
- tags: copy-apt-sources
-
-- name: Install CSIT dependencies
- apt:
- name:
- - 'python-apt'
- - 'python-setuptools'
- - 'git'
- - 'crudini'
- - 'expect'
- - 'socat'
- - 'qemu-system'
- - 'build-essential'
- state: 'present'
- cache_valid_time: 3600
- install_recommends: False
- tags: install-csit-dependencies
--- /dev/null
+---
+# file: roles/docker/defaults/main.yaml
+
+# Version options.
+docker_edition: 'ce'
+docker_version: '19.03.3'
+docker_apt_package: '5:{{ docker_version }}~3-0~{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}'
+docker_apt_package_state: present
+
+# Service options.
+docker_service_state: started
+docker_service_enabled: true
+docker_restart_handler_state: restarted
+
+# Used only for Debian/Ubuntu. Switch 'stable' to 'edge' if needed.
+docker_apt_release_channel: 'edge'
+docker_apt_repository: 'deb https://download.docker.com/linux/{{ ansible_distribution|lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}'
+docker_apt_repository_state: present
+
+# A list of users who will be added to the docker group.
+docker_users:
+ - 'testuser'
+
+# Proxy settings.
+docker_daemon_environment_http:
+ - 'HTTP_PROXY={{ proxy_env.http_proxy }}'
+ - 'NO_PROXY={{ proxy_env.no_proxy }}'
+docker_daemon_environment_https:
+ - 'HTTPS_PROXY={{ proxy_env.https_proxy }}'
+ - 'NO_PROXY={{ proxy_env.no_proxy }}'
--- /dev/null
+---
+# file roles/docker/handlers/main.yaml
+
+- name: Restart Docker
+ service:
+ name: 'docker'
+ state: '{{ docker_restart_handler_state }}'
+ tags: restart-docker
--- /dev/null
+---
+# file: roles/docker/tasks/main.yaml
+
+- name: Docker - Install distribution - release - machine prerequisites
+ include_tasks: '{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml'
+ tags: install-docker
+
+- name: Docker - Create Service Directory
+ file:
+ path: '/etc/systemd/system/docker.service.d'
+ state: 'directory'
+ tags: install-docker
+
+- name: Docker - Setup HTTP Proxy
+ template:
+ src: 'templates/docker.service.proxy.http'
+ dest: '/etc/systemd/system/docker.service.d/http-proxy.conf'
+ owner: 'root'
+ group: 'root'
+ mode: '0644'
+ when: >
+ proxy_env is defined and
+ proxy_env.http_proxy is defined
+ tags: install-docker
+
+- name: Docker - Setup HTTPS Proxy
+ template:
+ src: 'templates/docker.service.proxy.https'
+ dest: '/etc/systemd/system/docker.service.d/https-proxy.conf'
+ owner: 'root'
+ group: 'root'
+ mode: '0644'
+ when: >
+ proxy_env is defined and
+ proxy_env.https_proxy is defined
+ tags: install-docker
+
+- name: Docker - Reload systemd daemon and restart service
+ command: 'systemctl daemon-reload'
+ notify:
+ - 'Restart Docker'
+ when: >
+ proxy_env is defined and
+ proxy_env.http_proxy is defined
+ tags: install-docker
+
+- name: Docker - Set specific users to docker group
+ user:
+ name: '{{ item }}'
+ groups: 'docker'
+ append: True
+ with_items: '{{ docker_users }}'
+ when: docker_users
+ tags: install-docker
--- /dev/null
+---
+# file: roles/docker/tasks/ubuntu_bionic.yaml
+
+- name: Docker repository - Dependencies
+ apt:
+ name:
+ - 'apt-transport-https'
+ - 'ca-certificates'
+ - 'software-properties-common'
+ state: 'present'
+ cache_valid_time: 3600
+ install_recommends: False
+ tags: install-docker
+
+- name: Docker repository - Add an Apt signing key
+ apt_key:
+ url: 'https://download.docker.com/linux/ubuntu/gpg'
+ state: 'present'
+ tags: install-docker
+
+- name: Docker repository - Install APT repository
+ apt_repository:
+ repo: '{{ docker_apt_repository }}'
+ state: '{{ docker_apt_repository_state }}'
+ update_cache: True
+ tags: install-docker
+
+- name: Install Docker
+ apt:
+ name: 'docker-{{ docker_edition }}={{ docker_apt_package }}'
+ state: '{{ docker_apt_package_state }}'
+ force: True
+ tags: install-docker
--- /dev/null
+---
+# file: roles/dpdk/defaults/main.yaml
+
+packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}"
+
+packages_base:
+ - []
+
+packages_by_distro:
+ ubuntu:
+ - "build-essential"
+ - "libnuma-dev"
+
+packages_by_arch:
+ aarch64:
+ - []
+ x86_64:
+ - []
+
+dpdk_target_dir: "/opt"
+dpdk_version:
+ - "19.02"
+ - "20.02"
+dpdk_url: "https://fast.dpdk.org/rel"
+dpdk_build_targets:
+ "19.02":
+ aarch64: "arm64-armv8a-linuxapp-gcc"
+ x86_64: "x86_64-native-linuxapp-gcc"
+ "20.02":
+ aarch64: "arm64-armv8a-linux-gcc"
+ x86_64: "x86_64-native-linux-gcc"
--- /dev/null
+---
+# file: roles/dpdk/tasks/main.yaml
+
+- name: DPDK - Install Distribution - Release - Machine Prerequisites
+ package:
+ name: "{{ packages | flatten(levels=1) }}"
+ state: latest
+ update_cache: true
+ tags:
+ - install-dependencies
+
+- name: DPDK - Download Release Archive
+ get_url:
+ url: "{{ dpdk_url }}/dpdk-{{ item }}.tar.xz"
+ dest: "{{ dpdk_target_dir }}/dpdk-{{ item }}.tar.xz"
+ mode: 0644
+ loop: "{{ dpdk_version }}"
+ register: "dpdk_downloaded"
+ tags:
+ - install-dpdk
+
+- name: DPDK - Extract Release Archive
+ unarchive:
+ remote_src: true
+ src: "{{ dpdk_target_dir }}/dpdk-{{ item }}.tar.xz"
+ dest: "{{ dpdk_target_dir }}/"
+ creates: "{{ dpdk_target_dir }}/dpdk-{{ item }}"
+ loop: "{{ dpdk_version }}"
+ when: "dpdk_downloaded"
+ register: "dpdk_extracted"
+ tags:
+ - install-dpdk
+
+- name: DPDK - Compile Release I
+ become: yes
+ command: "make install T={{ dpdk_build_targets[item][ansible_machine] }} DESTDIR={{ dpdk_target_dir }}/dpdk-{{ item }} chdir={{ dpdk_target_dir }}/dpdk-{{ item }}"
+ loop: "{{ dpdk_version }}"
+ when: "dpdk_extracted"
+ register: "dpdk_compiled"
+ tags:
+ - install-dpdk
+
+- name: DPDK - Link igb_uio Module
+ shell: "ln -fs {{ dpdk_target_dir }}/dpdk-{{ item }}/{{ dpdk_build_targets[item][ansible_machine] }}/kmod/igb_uio.ko /lib/modules/`uname -r`/igb_uio.ko && depmod -a"
+ ignore_errors: "yes"
+ loop: "{{ dpdk_version }}"
+ when: "dpdk_compiled"
+ tags:
+ - install-dpdk
--- /dev/null
+---
+# file: roles/iperf/defaults/main.yaml
+
+packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}"
+
+packages_base:
+ - []
+
+packages_by_distro:
+ ubuntu:
+ - "build-essential"
+ - "lib32z1"
+
+packages_by_arch:
+ aarch64:
+ - []
+ x86_64:
+ - []
+
+iperf_target_dir: "/opt"
+iperf_version:
+ - "3.7"
--- /dev/null
+---
+# file: roles/iperf/tasks/main.yaml
+
+- name: iPerf - Install Distribution - Release - Machine Prerequisites
+ package:
+ name: "{{ packages | flatten(levels=1) }}"
+ state: latest
+ update_cache: true
+ tags:
+ - install-dependencies
+
+- name: iPerf - Get Release Archive
+ get_url:
+ url: "https://downloads.es.net/pub/iperf/iperf-{{ item }}.tar.gz"
+ dest: "{{ iperf_target_dir }}/iperf-{{ item }}.tar.gz"
+ mode: 0644
+ loop: "{{ iperf_version }}"
+ tags:
+ - install-iperf
+
+- name: iPerf - Extract Release Archive
+ unarchive:
+ remote_src: true
+ src: "{{ iperf_target_dir }}/iperf-{{ item }}.tar.gz"
+ dest: "{{ iperf_target_dir }}/"
+ creates: "{{ iperf_target_dir }}/iperf-{{ item }}/src"
+ loop: "{{ iperf_version }}"
+ tags:
+ - install-iperf
+
+- name: iPerf - Compile Release I
+ command: "./configure"
+ args:
+ chdir: "{{ iperf_target_dir }}/iperf-{{ item }}/"
+ loop: "{{ iperf_version }}"
+ tags:
+ - install-iperf
+
+- name: iPerf - Compile Release II
+ command: "make"
+ args:
+ chdir: "{{ iperf_target_dir }}/iperf-{{ item }}/"
+ loop: "{{ iperf_version }}"
+ tags:
+ - install-iperf
+
+- name: iPerf - Compile Release III
+ command: "make install"
+ args:
+ chdir: "{{ iperf_target_dir }}/iperf-{{ item }}/"
+ loop: "{{ iperf_version }}"
+ tags:
+ - install-iperf
--- /dev/null
+---
+# file: roles/kernel_vm/tasks/main.yaml
+
+- name: Kernel VM - Backup remote initramfs modules
+ copy:
+ src: "/etc/initramfs-tools/modules"
+ dest: "/tmp/initramfs_modules.bkp"
+ remote_src: yes
+ ignore_errors: yes
+ register: __initramfs_modules_backuped
+ tags:
+ - install-kernel-image
+
+- name: Kernel VM - Backup remote initramfs resume config
+ copy:
+ src: "/etc/initramfs-tools/conf.d/resume"
+ dest: "/tmp/initramfs-resume.bkp"
+ remote_src: yes
+ ignore_errors: yes
+ register: __initramfs_resume_backuped
+ tags:
+ - install-kernel-image
+
+- name: Kernel VM - Update remote initramfs modules
+ copy:
+ src: "../files/initramfs_modules"
+ dest: "/etc/initramfs-tools/modules"
+ tags:
+ - install-kernel-image
+
+- name: Kernel VM - Update remote initramfs resume config
+ copy:
+ src: "../files/initramfs_resume"
+ dest: "/etc/initramfs-tools/conf.d/resume"
+ tags:
+ - install-kernel-image
+
+- name: Kernel VM - Create target kernel dir
+ file:
+ path: "/opt/boot"
+ state: "directory"
+ tags:
+ - install-kernel-image
+
+- name: Kernel VM - Build initrd image
+ shell: "update-initramfs -k {{ ansible_kernel }} -c -b /opt/boot"
+ tags:
+ - install-kernel-image
+
+- name: Kernel VM - Copy corresponding kernel img
+ copy:
+ src: "/boot/vmlinuz-{{ ansible_kernel }}"
+ dest: "/opt/boot/vmlinuz-{{ ansible_kernel }}"
+ remote_src: yes
+ tags:
+ - install-kernel-image
+
+- name: Kernel VM - Restore remote initramfs modules
+ copy:
+ src: "/tmp/initramfs_modules.bkp"
+ dest: "/etc/initramfs-tools/modules"
+ remote_src: yes
+ ignore_errors: yes
+ when: __initramfs_modules_backuped
+ tags:
+ - install-kernel-image
+
+- name: Kernel VM - Remove remote backup initramfs modules
+ file:
+ path: "/tmp/initramfs_modules.bkp"
+ state: "absent"
+ when: __initramfs_modules_backuped
+ tags:
+ - install-kernel-image
+
+- name: Kernel VM - Restore remote initramfs resume config
+ copy:
+ src: "/tmp/initramfs-resume.bkp"
+ dest: "/etc/initramfs-tools/conf.d/resume"
+ remote_src: yes
+ ignore_errors: yes
+ when: __initramfs_resume_backuped
+ tags:
+ - install-kernel-image
+
+- name: Kernel VM - Remove remote backup initramfs resume config
+ file:
+ path: "/tmp/initramfs-resume.bkp"
+ state: "absent"
+ when: __initramfs_resume_backuped
+ tags:
+ - install-kernel-image
--- /dev/null
+---
+# file: roles/kubernetes/defaults/main.yaml
+
+# Version options.
+kubernetes_version: "1.11.0-00"
+kubernetes_apt_package_state: present
+
+# Service options.
+kubernetes_service_state: started
+kubernetes_service_enabled: true
+kubernetes_restart_handler_state: restarted
+
+# APT options.
+kubernetes_apt_repository: "deb http://apt.kubernetes.io/ kubernetes-xenial main"
+kubernetes_apt_repository_state: present
--- /dev/null
+---
+# file: roles/kubernetes/tasks/main.yaml
+
+- name: Kubernetes - Install distribution - release - machine prerequisites
+ include_tasks: '{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml'
+ tags: install-kubernetes
+
+- name: Kubernetes - Apply kubelet parameter
+ lineinfile:
+ path: '/etc/default/kubelet'
+ state: 'present'
+ regexp: '^KUBELET_EXTRA_ARGS=*'
+ line: 'KUBELET_EXTRA_ARGS=--feature-gates HugePages=false'
+ tags: install-kubernetes
--- /dev/null
+---
+# file: roles/kubernetes/tasks/ubuntu_bionic.yaml
+
+- name: Kubernetes repository - Dependencies
+ apt:
+ name:
+ - 'apt-transport-https'
+ - 'ca-certificates'
+ - 'software-properties-common'
+ state: 'present'
+ cache_valid_time: 3600
+ install_recommends: False
+ tags: install-kubernetes
+
+- name: Kubernetes repository - Add an Apt signing key
+ apt_key:
+ url: 'https://packages.cloud.google.com/apt/doc/apt-key.gpg'
+ state: 'present'
+ tags: install-kubernetes
+
+- name: Kubernetes repository - Install APT repository
+ apt_repository:
+ repo: '{{ kubernetes_apt_repository }}'
+ state: '{{ kubernetes_apt_repository_state }}'
+ update_cache: True
+ tags: install-kubernetes
+
+- name: Kubernetes - Install
+ apt:
+ name:
+ - 'kubernetes-cni=0.6.0-00'
+ - 'kubeadm={{ kubernetes_version }}'
+ - 'kubectl={{ kubernetes_version }}'
+ - 'kubelet={{ kubernetes_version }}'
+ state: '{{ kubernetes_apt_package_state }}'
+ force: True
+ tags: install-kubernetes
--- /dev/null
+---
+# file: roles/mellanox/defaults/main.yaml
+
+packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}"
+
+packages_base:
+ - []
+
+packages_by_distro:
+ ubuntu:
+ - "build-essential"
+
+packages_by_arch:
+ aarch64:
+ - []
+ x86_64:
+ - []
+
+mellanox_version: "4.6-1.0.1.1"
--- /dev/null
+---
+# file: roles/mellanox/tasks/main.yaml
+
+- name: Mellanox Install - Check Presence of Mellanox Hardware
+ shell: "lspci | grep Mellanox | awk '{print $1}'"
+ register: mellanox_pcis
+ failed_when: no
+ changed_when: no
+ tags:
+ - install-mellanox
+
+- name: Mellanox Install - Get OFED
+ get_url:
+ url: "http://content.mellanox.com/ofed/MLNX_OFED-{{ mellanox_version }}/MLNX_OFED_LINUX-{{ mellanox_version }}-{{ ansible_distribution|lower }}{{ ansible_distribution_version }}-{{ ansible_machine }}.tgz"
+ dest: "/opt/MLNX_OFED_LINUX-{{ mellanox_version }}-{{ ansible_distribution|lower }}{{ ansible_distribution_version }}-{{ ansible_machine }}.tgz"
+ mode: 0644
+ when: mellanox_pcis.stdout_lines | length > 0
+ tags:
+ - install-mellanox
+
+- name: Mellanox Install - Extract OFED
+ unarchive:
+ remote_src: true
+ src: "/opt/MLNX_OFED_LINUX-{{ mellanox_version }}-{{ ansible_distribution|lower }}{{ ansible_distribution_version }}-{{ ansible_machine }}.tgz"
+ dest: "/opt/"
+ creates: "/opt/MLNX_OFED_LINUX-{{ mellanox_version }}-{{ ansible_distribution|lower }}{{ ansible_distribution_version }}-{{ ansible_machine }}"
+ register: mellanox_firmware_extracted
+ when: mellanox_pcis.stdout_lines | length > 0
+ tags:
+ - install-mellanox
+
+- name: Mellanox Install - Install OFED
+ command: "./mlnxofedinstall --with-mft --dpdk --force --upstream-libs"
+ args:
+ chdir: "/opt/MLNX_OFED_LINUX-{{ mellanox_version }}-{{ ansible_distribution|lower }}{{ ansible_distribution_version }}-{{ ansible_machine }}"
+ when: mellanox_pcis.stdout_lines | length > 0 and mellanox_firmware_extracted
+ tags:
+ - install-mellanox
+
+- name: Mellanox Install - Switch Infiniband to Ethernet
+ command: "mlxconfig --yes --dev {{ item }} set LINK_TYPE_P1=2 LINK_TYPE_P2=2"
+ with_items: "{{ mellanox_pcis.stdout_lines }}"
+ tags:
+ - install-mellanox
+
--- /dev/null
+---
+# file: roles/performance_tuning/defaults/main.yaml
+
+packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}"
+
+packages_base:
+ - "cpufrequtils"
+
+packages_by_distro:
+ ubuntu:
+ - []
+
+packages_by_arch:
+ aarch64:
+ - []
+ x86_64:
+ - []
+
+grub_cmdline_linux:
+ aarch64:
+ - "GRUB_CMDLINE_LINUX=\"isolcpus={{ grub.isolcpus }} nohz_full={{ grub.nohz_full }} rcu_nocbs={{ grub.rcu_nocbs }} intel_iommu=on nmi_watchdog=0 audit=0 nosoftlockup processor.max_cstate=1\""
+ x86_64:
+ - "GRUB_CMDLINE_LINUX=\"isolcpus={{ grub.isolcpus }} nohz_full={{ grub.nohz_full }} rcu_nocbs={{ grub.rcu_nocbs }} numa_balancing=disable intel_pstate=disable intel_iommu=on iommu=pt nmi_watchdog=0 audit=0 nosoftlockup processor.max_cstate=1 intel_idle.max_cstate=1 hpet=disable tsc=reliable mce=off\""
--- /dev/null
+---
+# file roles/performance_tuning/handlers/main.yaml
+
+- name: Update GRUB
+ command: update-grub
+ tags:
+ - update-grub
+
+- name: Reboot server
+ reboot:
+ reboot_timeout: 3600
+ tags:
+ - reboot-server
--- /dev/null
+---
+# file: roles/performance_tuning/tasks/main.yaml
+
+- name: Performance Tuning - Install Distribution - Release - Machine Prerequisites
+ package:
+ name: "{{ packages | flatten(levels=1) }}"
+ state: latest
+ update_cache: true
+ tags:
+ - install-dependencies
+
+- name: Performance Tuning - Configure {{ ansible_machine }} Kernel Parameters
+ lineinfile:
+ path: "/etc/default/grub"
+ state: "present"
+ regexp: "^GRUB_CMDLINE_LINUX="
+ line: "{{ grub_cmdline_linux[ansible_machine] | join() }}"
+ notify:
+ - "Update GRUB"
+ - "Reboot server"
+ tags:
+ - set-grub
+
+- name: Performance Tuning - Turbo Boost
+ import_tasks: turbo_boost.yaml
+ when: >
+ cpu_microarchitecture == "skylake" or
+ cpu_microarchitecture == "cascadelake"
+ tags:
+ - turbo-boost
+
+- name: Performance Tuning - Adjust nr_hugepages
+ # change the minimum size of the hugepage pool.
+ # 2G VPP, 4GB per VNF/CNF, 2G reserve
+ sysctl:
+ name: "vm.nr_hugepages"
+ value: "{{ sysctl.vm.nr_hugepages }}"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust max_map_count
+ # this file contains the maximum number of memory map areas a process
+ # may have. memory map areas are used as a side-effect of calling
+ # malloc, directly by mmap and mprotect, and also when loading shared
+ # libraries.
+ #
+ # while most applications need less than a thousand maps, certain
+ # programs, particularly malloc debuggers, may consume lots of them,
+ # e.g., up to one or two maps per allocation.
+ # must be greater than or equal to (2 * vm.nr_hugepages).
+ sysctl:
+ name: "vm.max_map_count"
+ value: "{{ sysctl.vm.nr_hugepages * 4 }}"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust hugetlb_shm_group
+ # hugetlb_shm_group contains group id that is allowed to create sysv
+ # shared memory segment using hugetlb page.
+ sysctl:
+ name: "vm.hugetlb_shm_group"
+ value: "1000"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust swappiness
+ # this control is used to define how aggressive the kernel will swap
+ # memory pages. higher values will increase agressiveness, lower values
+ # decrease the amount of swap. a value of 0 instructs the kernel not to
+ # initiate swap until the amount of free and file-backed pages is less
+ # than the high water mark in a zone.
+ sysctl:
+ name: "vm.swappiness"
+ value: "0"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust shmmax
+ # shared memory max must be greator or equal to the total size of hugepages.
+ # for 2mb pages, totalhugepagesize = vm.nr_hugepages * 2 * 1024 * 1024
+ # if the existing kernel.shmmax setting (cat /sys/proc/kernel/shmmax)
+ # is greater than the calculated totalhugepagesize then set this parameter
+ # to current shmmax value.
+ sysctl:
+ name: "kernel.shmmax"
+ value: "{{ sysctl.vm.nr_hugepages * 2 * 1024 * 1024 }}"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust watchdog_cpumask
+ # this value can be used to control on which cpus the watchdog may run.
+ # the default cpumask is all possible cores, but if no_hz_full is
+ # enabled in the kernel config, and cores are specified with the
+ # nohz_full= boot argument, those cores are excluded by default.
+ # offline cores can be included in this mask, and if the core is later
+ # brought online, the watchdog will be started based on the mask value.
+ #
+ # typically this value would only be touched in the nohz_full case
+ # to re-enable cores that by default were not running the watchdog,
+ # if a kernel lockup was suspected on those cores.
+ sysctl:
+ name: "kernel.watchdog_cpumask"
+ value: "{{ sysctl.kernel.watchdog_cpumask }}"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust randomize_va_space
+ # this option can be used to select the type of process address
+ # space randomization that is used in the system, for architectures
+ # that support this feature.
+ # 0 - turn the process address space randomization off. this is the
+ # default for architectures that do not support this feature anyways,
+ # and kernels that are booted with the "norandmaps" parameter.
+ sysctl:
+ name: "kernel.randomize_va_space"
+ value: "0"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Copy Cpufrequtils File
+ copy:
+ src: "files/cpufrequtils"
+ dest: "/etc/default/cpufrequtils"
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ tags:
+ - copy-cpufrequtils
+
+- name: Performance Tuning - Set Ondemand Service To Disable
+ service:
+ name: "ondemand"
+ enabled: "no"
+ tags:
+ - set-ondemand
+
+- name: Performance Tuning - Load Kernel Modules By Default
+ lineinfile:
+ path: "/etc/modules"
+ state: "present"
+ line: "{{ item }}"
+ with_items:
+ - "vfio-pci"
+ tags:
+ - load-kernel-modules
+
+- meta: flush_handlers
--- /dev/null
+---
+# file: roles/performance_tuning/tasks/turbo_boost.yaml
+
+- name: Turbo Boost - Install msr-tools
+ package:
+ name:
+ - "msr-tools"
+ state: latest
+ update_cache: true
+ tags:
+ - turbo-boost
+
+- name: Turbo Boost - Load msr By Default
+ lineinfile:
+ path: "/etc/modules"
+ state: "present"
+ line: "msr"
+ tags:
+ - turbo-boost
+
+- name: Turbo Boost - Custom Startup Service Hook
+ copy:
+ src: "files/disable-turbo-boost.service"
+ dest: "/etc/systemd/system/disable-turbo-boost.service"
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ tags:
+ - turbo-boost
+
+- name: Turbo Boost - Custom Startup Service Hook Enable
+ service:
+ name: "disable-turbo-boost"
+ enabled: yes
+ tags:
+ - turbo-boost
+++ /dev/null
----
-# file: roles/sut/tasks/dpdk.yaml
-
-- name: Download DPDK release archive
- get_url:
- url: '{{ dpdk.url }}/{{ dpdk.version }}.tar.xz'
- dest: '{{ dpdk.target_dir }}/{{ dpdk.version }}.tar.xz'
- mode: 0644
- register: 'linux__dpdk_downloaded'
- tags: install-dpdk
-
-- name: Extract DPDK release archive
- become: yes
- shell: 'cd {{ dpdk.target_dir }}; tar xf {{ dpdk.target_dir }}/{{ dpdk.version }}.tar.xz'
- when: 'linux__dpdk_downloaded'
- register: 'linux__dpdk_extracted'
- tags: install-dpdk
-
-- name: Compile DPDK release
- become: yes
- command: 'make install T={{ dpdk.build_targets[ansible_machine] }}-linuxapp-gcc DESTDIR={{ dpdk.target_dir }}/{{ dpdk.version }} chdir={{ dpdk.target_dir }}/{{ dpdk.version }}'
- when: 'linux__dpdk_extracted'
- tags: install-dpdk
+++ /dev/null
----
-# file: roles/sut/tasks/main.yaml
-
-- name: Install distribution - release - machine prerequisites
- include_tasks: '{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml'
- tags: [ install-csit-dependencies, install-kernel-image ]
-
-- name: Copy 80-vpp.conf
- file:
- src: '/dev/null'
- dest: '/etc/sysctl.d/80-vpp.conf'
- state: 'link'
- become: yes
- tags: copy-80-vpp
-
-- name: Install DPDK
- include_tasks: 'dpdk.yaml'
- tags: install-dpdk
+++ /dev/null
----
-# file: roles/sut/tasks/ubuntu_bionic.yaml
-
-- name: Install CSIT dependencies
- apt:
- name:
- - 'dkms'
- - 'pkg-config'
- - 'libglib2.0-dev'
- - 'autoconf'
- - 'libtool'
- - 'screen'
- - 'libmbedcrypto1'
- - 'libmbedtls10'
- - 'libmbedx509-0'
- - 'lxc'
- - 'openjdk-8-jdk'
- - 'libpixman-1-dev'
- - 'python-cffi'
- - 'python-cffi-backend'
- - 'python3-cffi'
- - 'python3-cffi-backend'
- - 'libnuma-dev'
- - 'lzop'
- - 'lrzip'
- state: 'present'
- install_recommends: False
- tags: install-csit-dependencies
-
-- name: Kernel VM install
- include_tasks: '../../common/tasks/kernel_install.yaml'
- tags: install-kernel-image
#!/usr/bin/env bash
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"start" )
# Run TG
for cnt in $(seq 1 ${2:-1}); do
+ docker network create --driver bridge csit-nw-tg${cnt} || true
# If the IMAGE is not already loaded then docker run will pull the
# IMAGE, and all image dependencies, before it starts the container.
dcr_image="snergster/csit-sut:latest"
dcr_stc_params+="--volume /dev:/dev "
# Mount /opt/boot/ where VM kernel and initrd are located.
dcr_stc_params+="--volume /opt:/opt "
+ # Mount /usr/local/bin/wrk where WRK is located.
+ dcr_stc_params+="--volume /usr/local/bin/wrk:/usr/local/bin/wrk "
# Mount host hugepages for VMs.
dcr_stc_params+="--volume /dev/hugepages:/dev/hugepages "
params=(${dcr_stc_params} --name csit-tg-"${cnt}" "${dcr_image}")
- docker run "${params[@]}"
+ docker run --network=csit-nw-tg${cnt} "${params[@]}"
done
;;
"stop" )
- docker rm --force $(docker ps --all --quiet --filter name=csit-tg)
+ docker rm --force $(docker ps --all --quiet --filter name=csit)
+ docker network rm $(docker network ls --filter name=csit --quiet)
;;
esac
- name: Start csit-initialize-docker-tg.service
systemd:
enabled: yes
- state: 'started'
- name: 'csit-initialize-docker-tg.service'
- tags: docker-tg
+ state: "started"
+ name: "csit-initialize-docker-tg.service"
+ tags:
+ - docker-tg
---
# file: roles/tg/tasks/main.yaml
-- name: Install distribution - release - machine prerequisites
- include_tasks: '{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml'
-
-- name: Install WRK
- include_tasks: 'wrk.yaml'
- tags: install-wrk
-<<<<<<< HEAD (89d1c9 Report: Show TSH results)
-=======
-
-- name: Install TRex
- include_tasks: 'trex.yaml'
- tags: install-trex
-
-- name: Copy csit-initialize-docker-tg.sh
+- name: TG - Copy csit-initialize-docker-tg.sh
copy:
- src: 'files/csit-initialize-docker-tg.sh'
- dest: '/usr/local/bin/csit-initialize-docker-tg.sh'
- owner: 'root'
- group: 'root'
- mode: '744'
+ src: "files/csit-initialize-docker-tg.sh"
+ dest: "/usr/local/bin/csit-initialize-docker-tg.sh"
+ owner: "root"
+ group: "root"
+ mode: "744"
when: docker_tg is defined
- tags: docker-tg
+ tags:
+ - docker-tg
-- name: Start csit-initialize-docker-tg.service
+- name: TG - Start csit-initialize-docker-tg.service
copy:
- src: 'files/csit-initialize-docker-tg.service'
- dest: '/etc/systemd/system/'
- owner: 'root'
- group: 'root'
- mode: '644'
+ src: "files/csit-initialize-docker-tg.service"
+ dest: "/etc/systemd/system/"
+ owner: "root"
+ group: "root"
+ mode: "644"
notify:
- - 'Start csit-initialize-docker-tg.service'
+ - "Start csit-initialize-docker-tg.service"
when: docker_tg is defined
- tags: docker-tg
+ tags:
+ - docker-tg
- meta: flush_handlers
->>>>>>> CHANGE (96eaab Add: Use containers for shared TG)
+++ /dev/null
----
-# file: roles/tg/tasks/ubuntu_bionic.yaml
-
-- name: Install CSIT dependencies
- apt:
- name:
- - 'unzip'
- - 'libssl-dev'
- state: 'present'
- install_recommends: False
- tags: install-csit-dependencies
+++ /dev/null
----
-# file: roles/tg/tasks/wrk.yaml
-
-- name: Download WRK release archive
- get_url:
- url: '{{ wrk.url }}/{{ wrk.version }}.tar.gz'
- dest: '{{ wrk.target_dir }}/{{ wrk.version }}.tar.gz'
- mode: 0644
- register: 'linux__wrk_downloaded'
- tags: install-wrk
-
-- name: Extract WRK release archive
- become: yes
- unarchive:
- src: '{{ wrk.target_dir }}/{{ wrk.version }}.tar.gz'
- dest: '{{ wrk.target_dir }}/wrk-{{ wrk.version }}'
- remote_src: yes
- when: 'linux__wrk_downloaded'
- register: 'linux__wrk_extracted'
- tags: install-wrk
-
-- name: Compile WRK release
- become: yes
- shell: 'cd {{ wrk.target_dir }}/wrk-{{ wrk.version }}; make'
- when: 'linux__wrk_extracted'
- register: 'linux__wrk_compiled'
- tags: install-wrk
-
-- name: Move WRK binary
- become: yes
- command: 'mv {{ wrk.target_dir }}/wrk-{{ wrk.version }}/wrk /usr/local/bin/'
- when: 'linux__wrk_compiled'
- tags: install-wrk
+++ /dev/null
-# change the minimum size of the hugepage pool.
-# 2G VPP, 16x2G for Fullbox VM, 2G reserve
-vm.nr_hugepages={{ sysctl.vm.nr_hugepages }}
-
-# this file contains the maximum number of memory map areas a process
-# may have. memory map areas are used as a side-effect of calling
-# malloc, directly by mmap and mprotect, and also when loading shared
-# libraries.
-#
-# while most applications need less than a thousand maps, certain
-# programs, particularly malloc debuggers, may consume lots of them,
-# e.g., up to one or two maps per allocation.
-# must be greater than or equal to (2 * vm.nr_hugepages).
-vm.max_map_count={{ sysctl.vm.max_map_count }}
-
-# hugetlb_shm_group contains group id that is allowed to create sysv
-# shared memory segment using hugetlb page.
-vm.hugetlb_shm_group=0
-
-# this control is used to define how aggressive the kernel will swap
-# memory pages. higher values will increase agressiveness, lower values
-# decrease the amount of swap. a value of 0 instructs the kernel not to
-# initiate swap until the amount of free and file-backed pages is less
-# than the high water mark in a zone.
-vm.swappiness=0
-
-# this parameter can be used to control the nmi watchdog
-# (i.e. the hard lockup detector) on x86 systems.
-#
-# 0 - disable the hard lockup detector
-# 1 - enable the hard lockup detector
-#
-# the hard lockup detector monitors each cpu for its ability to respond to
-# timer interrupts. the mechanism utilizes cpu performance counter registers
-# that are programmed to generate non-maskable interrupts (nmis) periodically
-# while a cpu is busy. hence, the alternative name 'nmi watchdog'.
-#
-# the nmi watchdog is disabled by default if the kernel is running as a guest
-# in a kvm virtual machine. this default can be overridden by adding
-#kernel. nmi_watchdog=1
-
-# shared memory max must be greator or equal to the total size of hugepages.
-# for 2mb pages, totalhugepagesize = vm.nr_hugepages * 2 * 1024 * 1024
-# if the existing kernel.shmmax setting (cat /sys/proc/kernel/shmmax)
-# is greater than the calculated totalhugepagesize then set this parameter
-# to current shmmax value.
-kernel.shmmax=8589934592
-
-# this option can be used to select the type of process address
-# space randomization that is used in the system, for architectures
-# that support this feature.
-# 0 - turn the process address space randomization off. this is the
-# default for architectures that do not support this feature anyways,
-# and kernels that are booted with the "norandmaps" parameter.
-kernel.randomize_va_space=0
-
-# this parameter can be used to control the soft lockup detector.
-#
-# 0 - disable the soft lockup detector
-# 1 - enable the soft lockup detector
-#
-# the soft lockup detector monitors cpus for threads that are hogging the cpus
-# without rescheduling voluntarily, and thus prevent the 'watchdog/n' threads
-# from running. the mechanism depends on the cpus ability to respond to timer
-# interrupts which are needed for the 'watchdog/n' threads to be woken up by
-# the watchdog timer function, otherwise the nmi watchdog - if enabled - can
-# detect a hard lockup condition.
-#kernel.soft_watchdog=0
-
-# this value can be used to control on which cpus the watchdog may run.
-# the default cpumask is all possible cores, but if no_hz_full is
-# enabled in the kernel config, and cores are specified with the
-# nohz_full= boot argument, those cores are excluded by default.
-# offline cores can be included in this mask, and if the core is later
-# brought online, the watchdog will be started based on the mask value.
-#
-# typically this value would only be touched in the nohz_full case
-# to re-enable cores that by default were not running the watchdog,
-# if a kernel lockup was suspected on those cores.
-kernel.watchdog_cpumask={{ sysctl.kernel.watchdog_cpumask }}
+++ /dev/null
-#Configuration for the irqbalance daemon
-
-#Should irqbalance be enabled?
-ENABLED="0"
-#Balance the IRQs only once?
-ONESHOT="0"
+++ /dev/null
-# This file describes the network interfaces available on your system
-# For more information, see netplan(5).
-network:
- version: 2
- renderer: networkd
- ethernets:
- {{ ansible_default_ipv4["interface"] }}:
- addresses: [ {{ (ansible_default_ipv4.address + '/' + ansible_default_ipv4.netmask) | ipaddr('host/prefix') }} ]
- dhcp4: false
- gateway4: {{ ansible_default_ipv4["gateway"] }}
- nameservers:
- addresses: [ {{ name_servers }} ]
+++ /dev/null
----
-# file roles/tg_sut/handlers/main.yaml
-
-- name: Restart Docker
- service:
- name: 'docker'
- state: 'restarted'
- tags: restart-docker
-
-- name: Reboot server
- reboot:
- reboot_timeout: 3600
- tags: reboot-server
-
-- name: Wait for server to restart
- wait_for:
- host: '{{ inventory_hostname }}'
- search_regex: OpenSSH
- port: 22
- delay: 60
- timeout: 3600
- tags: reboot-server
+++ /dev/null
----
-# file: roles/tg_sut/tasks/aarch64.yaml
-
-- name: Configure aarch64 kernel parameters
- lineinfile:
- path: '/etc/default/grub'
- state: 'present'
- regexp: '^GRUB_CMDLINE_LINUX='
- line: 'GRUB_CMDLINE_LINUX="isolcpus={{ grub.isolcpus }} nohz_full={{ grub.nohz_full }} rcu_nocbs={{ grub.rcu_nocbs }} intel_iommu=on nmi_watchdog=0 audit=0 nosoftlockup processor.max_cstate=1"'
- notify: ['Update GRUB']
- tags: set-grub
-
-- name: Install SciPy dependencies
- apt:
- name:
- - 'gfortran'
- - 'libblas-dev'
- - 'liblapack-dev'
- state: 'present'
- install_recommends: False
- tags: install-pip
-
-- name: Install CSIT PIP requirements without SciPy
- pip:
- name:
- - 'docopt==0.6.2'
- - 'ecdsa==0.13'
- - 'enum34==1.1.2'
- - 'ipaddress==1.0.16'
- - 'paramiko==1.16.0'
- - 'pexpect==4.6.0'
- - 'pycrypto==2.6.1'
- - 'pykwalify==1.5.0'
- - 'pypcap==1.1.5'
- - 'python-dateutil==2.4.2'
- - 'PyYAML==3.11'
- - 'requests==2.9.1'
- - 'robotframework==2.9.2'
- - 'scapy==2.3.1'
- - 'scp==0.10.2'
- - 'six==1.12.0'
- - 'dill==0.2.8.2'
- - 'numpy==1.14.5'
- tags: install-pip
-
-- name: Install CSIT PIP requirements - SciPy workaround
- pip:
- name:
- - 'scipy==1.1.0'
- tags: install-pip
+++ /dev/null
----
-# file: roles/tg_sut/tasks/main.yaml
-
-- name: Install distribution - release - machine prerequisites
- include_tasks: '{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml'
-
-- name: Machine specifics
- include_tasks: '{{ ansible_machine }}.yaml'
-
-- name: Skylake specific
- import_tasks: skylake.yaml
- when: cpu_microarchitecture == "skylake"
-
-- name: Copy netplan network config file
- template:
- src: 'files/netplan_config'
- dest: '/etc/netplan/01-netcfg.yaml'
- owner: 'root'
- group: 'root'
- mode: '0644'
- tags: copy-interface-file
-
-- name: Copy CSIT sysctl file
- template:
- src: 'files/90-csit'
- dest: '/etc/sysctl.d/90-csit.conf'
- owner: 'root'
- group: 'root'
- mode: '0644'
- tags: copy-90-csit
-
-- name: Copy IRQ load balancing file
- copy:
- src: 'files/irqbalance'
- dest: '/etc/default/irqbalance'
- owner: 'root'
- group: 'root'
- mode: '0644'
- tags: copy-irq
-
-- name: Copy cpufrequtils file
- copy:
- src: 'files/cpufrequtils'
- dest: '/etc/default/cpufrequtils'
- owner: 'root'
- group: 'root'
- mode: '0644'
- tags: copy-cpufrequtils
-
-- name: Set ondemand service to disable
- service:
- name: 'ondemand'
- enabled: 'no'
- tags: set-ondemand
-
-- name: Install VPP PIP requirements
- pip:
- name:
- - 'aenum==2.1.2'
- tags: install-pip
-
-- name: Load vfio-pci by default
- lineinfile:
- path: '/etc/modules'
- state: 'present'
- line: 'vfio-pci'
- tags: load-vfio-pci
+++ /dev/null
----
-# file: roles/tg_sut/tasks/skylake.yaml
-
-- name: Install msr-tools
- apt:
- name:
- - 'msr-tools'
- state: 'present'
- install_recommends: False
- tags: install-msr
-
-- name: Disable Skylake Turbo-Boost by service
- copy:
- src: 'files/disable-turbo-boost.service'
- dest: '/etc/systemd/system/disable-turbo-boost.service'
- owner: 'root'
- group: 'root'
- mode: '0644'
- tags: disable-turbo-boost
-
-- name: Disable Skylake Turbo-Boost by service on startup
- service:
- name: disable-turbo-boost
- enabled: yes
- tags: disable-turbo-boost
+++ /dev/null
----
-# file: roles/tg_sut/tasks/ubuntu_bionic.yaml
-
-- name: Install CSIT dependencies
- apt:
- name:
- - 'python-dev'
- - 'python-virtualenv'
- - 'python-pip'
- - 'libpcap-dev'
- - 'cpufrequtils'
- - 'cgroup-bin'
- - 'zlib1g-dev'
- - 'apt-transport-https'
- - 'ca-certificates'
- - 'software-properties-common'
- - 'libnuma-dev'
- - 'lzop'
- - 'lrzip'
- state: 'present'
- install_recommends: False
- tags: install-csit-dependencies
-
-- name: Add an Apt signing key, for docker-ce repository
- apt_key:
- url: https://download.docker.com/linux/ubuntu/gpg
- state: 'present'
- tags: install-docker
-
-- name: Install Docker APT repository
- apt_repository:
- repo: '{{ docker_repository }}'
- state: 'present'
- update_cache: True
- tags: install-docker
-
-- name: Install Docker
- apt:
- name: 'docker-{{ docker_edition }}={{ docker_apt_package_name }}'
- state: 'present'
- force: yes
- tags: install-docker
-
-- name: Creates Docker service directory
- file:
- path: '/etc/systemd/system/docker.service.d'
- state: 'directory'
-
-- name: Setup Docker http proxy
- template:
- src: 'templates/docker.service.proxy.http'
- dest: '/etc/systemd/system/docker.service.d/http-proxy.conf'
- owner: 'root'
- group: 'root'
- mode: '0644'
- register: docker_register_systemd_service
- when: proxy_env is defined and proxy_env.http_proxy is defined
- tags: copy-docker
-
-- name: Setup Docker https proxy
- template:
- src: 'templates/docker.service.proxy.https'
- dest: '/etc/systemd/system/docker.service.d/https-proxy.conf'
- owner: 'root'
- group: 'root'
- mode: '0644'
- register: docker_register_systemd_service
- when: proxy_env is defined and proxy_env.https_proxy is defined
- tags: copy-docker
-
-- name: Reload systemd daemon
- command: 'systemctl daemon-reload'
- notify: ['Restart Docker']
- when: (docker_register_systemd_service and
- docker_register_systemd_service is changed)
- tags: restart-docker
-
-- name: Set specific users to docker group
- user:
- name: '{{ item }}'
- groups: 'docker'
- append: True
- with_items: '{{ docker_users }}'
- when: docker_users
- tags: set-docker
-
-- name: Add an Apt signing key, for Kubernetes repository
- apt_key:
- url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
- state: 'present'
- tags: install-kubernetes
-
-- name: Install kubernetes APT repository
- apt_repository:
- repo: '{{ kubernetes_repository }}'
- state: 'present'
- update_cache: True
- tags: install-kubernetes
-
-- name: Install Kubernetes
- apt:
- name:
- - 'kubernetes-cni=0.6.0-00'
- - 'kubeadm={{ kubernetes_apt_package_name }}'
- - 'kubectl={{ kubernetes_apt_package_name }}'
- - 'kubelet={{ kubernetes_apt_package_name }}'
- state: 'present'
- force: yes
- tags: install-kubernetes
-
-- name: Apply kubelet parameter
- lineinfile:
- path: '/etc/default/kubelet'
- state: 'present'
- regexp: '^KUBELET_EXTRA_ARGS=*'
- line: 'KUBELET_EXTRA_ARGS=--feature-gates HugePages=false'
- tags: install-kubernetes
+++ /dev/null
----
-# file: roles/tg_sut/tasks/x86_64.yaml
-
-- name: Load msr by default
- lineinfile:
- path: '/etc/modules'
- state: 'present'
- line: 'msr'
- tags: disable-turbo-boost
-
-- name: Configure x86_64 kernel parameters
- lineinfile:
- path: '/etc/default/grub'
- state: 'present'
- regexp: '^GRUB_CMDLINE_LINUX='
- line: 'GRUB_CMDLINE_LINUX="isolcpus={{ grub.isolcpus }} nohz_full={{ grub.nohz_full }} rcu_nocbs={{ grub.rcu_nocbs }} numa_balancing=disable intel_pstate=disable intel_iommu=on iommu=pt nmi_watchdog=0 audit=0 nosoftlockup processor.max_cstate=1 intel_idle.max_cstate=1 hpet=disable tsc=reliable mce=off"'
- notify:
- - 'Update GRUB'
- - 'Reboot server'
- tags: set-grub
-
-- meta: flush_handlers
-
-- name: Install CSIT PIP requirements
- pip:
- name:
- - 'docopt==0.6.2'
- - 'ecdsa==0.13'
- - 'enum34==1.1.2'
- - 'ipaddress==1.0.16'
- - 'paramiko==1.16.0'
- - 'pexpect==4.6.0'
- - 'pycrypto==2.6.1'
- - 'pykwalify==1.5.0'
- - 'pypcap==1.1.5'
- - 'python-dateutil==2.4.2'
- - 'PyYAML==3.11'
- - 'requests==2.9.1'
- - 'robotframework==2.9.2'
- - 'scapy==2.3.1'
- - 'scp==0.10.2'
- - 'six==1.12.0'
- - 'dill==0.2.8.2'
- - 'numpy==1.14.5'
- - 'scipy==1.1.0'
- tags: install-pip
-
--- /dev/null
+---
+# file: roles/topology/tasks/main.yaml
+
+- name: Create topology file
+ template:
+ src: 'templates/topology_{{ cloud_topology }}.j2'
+ dest: '../../../../topologies/available/{{ cloud_topology }}_3n_skx_testbed.yaml'
+ tags:
+ - create-topology-file
--- /dev/null
+---
+# file: roles/trex/defaults/main.yaml
+
+packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}"
+
+packages_base:
+ - []
+
+packages_by_distro:
+ ubuntu:
+ - "build-essential"
+ - "libmnl-dev"
+ - "libnuma-dev"
+ - "libpcap-dev"
+ - "librdmacm-dev"
+ - "librdmacm1"
+ - "libssl-dev"
+ - "pciutils"
+ - "python3-pip"
+ - "zlib1g-dev"
+
+packages_by_arch:
+ aarch64:
+ - []
+ x86_64:
+ - []
+
+trex_target_dir: "/opt"
+trex_url: "https://github.com/cisco-system-traffic-generator/trex-core/archive/"
+trex_version:
+ - "2.54"
+ - "2.73"
--- /dev/null
+diff --git a/linux_dpdk/ws_main.py b/linux_dpdk/ws_main.py
+index e8d0cd51..a0c01adb 100755
+--- a/linux_dpdk/ws_main.py
++++ b/linux_dpdk/ws_main.py
+@@ -209,7 +209,7 @@ def check_ofed(ctx):
+
+ ofed_ver= 42
+ ofed_ver_show= '4.2'
+-
++ return True
+ if not os.path.isfile(ofed_info):
+ ctx.end_msg('not found', 'YELLOW')
+ return False
+@@ -1552,8 +1552,6 @@ class build_option:
+ flags += ['-DNDEBUG'];
+ else:
+ flags += ['-UNDEBUG'];
+- if bld.env.OFED_OK:
+- flags += ['-DHAVE_IBV_MLX4_WQE_LSO_SEG=1']
+ return (flags)
+
+ def get_bnxt_flags(self):
+diff --git a/src/dpdk/drivers/net/mlx4/mlx4_autoconf.h b/src/dpdk/drivers/net/mlx4/mlx4_autoconf.h
+index b3d68683..35474409 100644
+--- a/src/dpdk/drivers/net/mlx4/mlx4_autoconf.h
++++ b/src/dpdk/drivers/net/mlx4/mlx4_autoconf.h
+@@ -1,3 +1,6 @@
+-#ifndef HAVE_IBV_MLX4_WQE_LSO_SEG
+-#define HAVE_IBV_MLX4_WQE_LSO_SEG
+-#endif
++/* HAVE_IBV_MLX4_BUF_ALLOCATORS is not defined. */
++
++/* HAVE_IBV_MLX4_UAR_MMAP_OFFSET is not defined. */
++
++/* HAVE_IBV_MLX4_WQE_LSO_SEG is not defined. */
++
+diff --git a/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h b/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h
+index 8770fdde..75db5ae8 100644
+--- a/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h
++++ b/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h
+@@ -1,54 +1,362 @@
+-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+-#define HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+-#endif
++/* HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT is not defined. */
+
+-#ifndef HAVE_IBV_FLOW_DV_SUPPORT
+-#define HAVE_IBV_FLOW_DV_SUPPORT
+-#endif
++#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
++#define HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1
++#endif /* HAVE_IBV_DEVICE_TUNNEL_SUPPORT */
+
+-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+-#define HAVE_IBV_DEVICE_COUNTERS_SET_V45
+-#endif
++/* HAVE_IBV_DEVICE_MPLS_SUPPORT is not defined. */
+
+-#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
+-#define HAVE_IBV_FLOW_DEVX_COUNTERS
+-#endif
++#ifndef HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING
++#define HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING 1
++#endif /* HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING */
+
+-#ifndef HAVE_IBV_MLX4_WQE_LSO_SEG
+-#define HAVE_IBV_MLX4_WQE_LSO_SEG
+-#endif
++/* HAVE_IBV_WQ_FLAG_RX_END_PADDING is not defined. */
+
++#ifndef HAVE_IBV_MLX5_MOD_SWP
++#define HAVE_IBV_MLX5_MOD_SWP 1
++#endif /* HAVE_IBV_MLX5_MOD_SWP */
+
+-#ifdef SUPPORTED_40000baseKR4_Full
++#ifndef HAVE_IBV_MLX5_MOD_MPW
++#define HAVE_IBV_MLX5_MOD_MPW 1
++#endif /* HAVE_IBV_MLX5_MOD_MPW */
++
++#ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
++#define HAVE_IBV_MLX5_MOD_CQE_128B_COMP 1
++#endif /* HAVE_IBV_MLX5_MOD_CQE_128B_COMP */
++
++#ifndef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
++#define HAVE_IBV_MLX5_MOD_CQE_128B_PAD 1
++#endif /* HAVE_IBV_MLX5_MOD_CQE_128B_PAD */
++
++/* HAVE_IBV_FLOW_DV_SUPPORT is not defined. */
++
++/* HAVE_MLX5DV_DR is not defined. */
++
++/* HAVE_MLX5DV_DR_ESWITCH is not defined. */
++
++/* HAVE_IBV_DEVX_OBJ is not defined. */
++
++/* HAVE_IBV_FLOW_DEVX_COUNTERS is not defined. */
++
++#ifndef HAVE_ETHTOOL_LINK_MODE_25G
++#define HAVE_ETHTOOL_LINK_MODE_25G 1
++#endif /* HAVE_ETHTOOL_LINK_MODE_25G */
++
++#ifndef HAVE_ETHTOOL_LINK_MODE_50G
++#define HAVE_ETHTOOL_LINK_MODE_50G 1
++#endif /* HAVE_ETHTOOL_LINK_MODE_50G */
++
++#ifndef HAVE_ETHTOOL_LINK_MODE_100G
++#define HAVE_ETHTOOL_LINK_MODE_100G 1
++#endif /* HAVE_ETHTOOL_LINK_MODE_100G */
++
++/* HAVE_IBV_DEVICE_COUNTERS_SET_V42 is not defined. */
++
++/* HAVE_IBV_DEVICE_COUNTERS_SET_V45 is not defined. */
++
++#ifndef HAVE_RDMA_NL_NLDEV
++#define HAVE_RDMA_NL_NLDEV 1
++#endif /* HAVE_RDMA_NL_NLDEV */
++
++#ifndef HAVE_RDMA_NLDEV_CMD_GET
++#define HAVE_RDMA_NLDEV_CMD_GET 1
++#endif /* HAVE_RDMA_NLDEV_CMD_GET */
++
++#ifndef HAVE_RDMA_NLDEV_CMD_PORT_GET
++#define HAVE_RDMA_NLDEV_CMD_PORT_GET 1
++#endif /* HAVE_RDMA_NLDEV_CMD_PORT_GET */
++
++#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_INDEX
++#define HAVE_RDMA_NLDEV_ATTR_DEV_INDEX 1
++#endif /* HAVE_RDMA_NLDEV_ATTR_DEV_INDEX */
++
++#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_NAME
++#define HAVE_RDMA_NLDEV_ATTR_DEV_NAME 1
++#endif /* HAVE_RDMA_NLDEV_ATTR_DEV_NAME */
++
++#ifndef HAVE_RDMA_NLDEV_ATTR_PORT_INDEX
++#define HAVE_RDMA_NLDEV_ATTR_PORT_INDEX 1
++#endif /* HAVE_RDMA_NLDEV_ATTR_PORT_INDEX */
++
++/* HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX is not defined. */
++
++#ifndef HAVE_IFLA_NUM_VF
++#define HAVE_IFLA_NUM_VF 1
++#endif /* HAVE_IFLA_NUM_VF */
++
++#ifndef HAVE_IFLA_EXT_MASK
++#define HAVE_IFLA_EXT_MASK 1
++#endif /* HAVE_IFLA_EXT_MASK */
++
++#ifndef HAVE_IFLA_PHYS_SWITCH_ID
++#define HAVE_IFLA_PHYS_SWITCH_ID 1
++#endif /* HAVE_IFLA_PHYS_SWITCH_ID */
++
++#ifndef HAVE_IFLA_PHYS_PORT_NAME
++#define HAVE_IFLA_PHYS_PORT_NAME 1
++#endif /* HAVE_IFLA_PHYS_PORT_NAME */
++
++#ifndef HAVE_IFLA_VXLAN_COLLECT_METADATA
++#define HAVE_IFLA_VXLAN_COLLECT_METADATA 1
++#endif /* HAVE_IFLA_VXLAN_COLLECT_METADATA */
++
++#ifndef HAVE_TCA_CHAIN
++#define HAVE_TCA_CHAIN 1
++#endif /* HAVE_TCA_CHAIN */
++
++#ifndef HAVE_TCA_FLOWER_ACT
++#define HAVE_TCA_FLOWER_ACT 1
++#endif /* HAVE_TCA_FLOWER_ACT */
++
++#ifndef HAVE_TCA_FLOWER_FLAGS
++#define HAVE_TCA_FLOWER_FLAGS 1
++#endif /* HAVE_TCA_FLOWER_FLAGS */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE
++#define HAVE_TCA_FLOWER_KEY_ETH_TYPE 1
++#endif /* HAVE_TCA_FLOWER_KEY_ETH_TYPE */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST
++#define HAVE_TCA_FLOWER_KEY_ETH_DST 1
++#endif /* HAVE_TCA_FLOWER_KEY_ETH_DST */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK
++#define HAVE_TCA_FLOWER_KEY_ETH_DST_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_ETH_DST_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC
++#define HAVE_TCA_FLOWER_KEY_ETH_SRC 1
++#endif /* HAVE_TCA_FLOWER_KEY_ETH_SRC */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK
++#define HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO
++#define HAVE_TCA_FLOWER_KEY_IP_PROTO 1
++#endif /* HAVE_TCA_FLOWER_KEY_IP_PROTO */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC
++#define HAVE_TCA_FLOWER_KEY_IPV4_SRC 1
++#endif /* HAVE_TCA_FLOWER_KEY_IPV4_SRC */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK
++#define HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST
++#define HAVE_TCA_FLOWER_KEY_IPV4_DST 1
++#endif /* HAVE_TCA_FLOWER_KEY_IPV4_DST */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK
++#define HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC
++#define HAVE_TCA_FLOWER_KEY_IPV6_SRC 1
++#endif /* HAVE_TCA_FLOWER_KEY_IPV6_SRC */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK
++#define HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST
++#define HAVE_TCA_FLOWER_KEY_IPV6_DST 1
++#endif /* HAVE_TCA_FLOWER_KEY_IPV6_DST */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK
++#define HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC
++#define HAVE_TCA_FLOWER_KEY_TCP_SRC 1
++#endif /* HAVE_TCA_FLOWER_KEY_TCP_SRC */
++
++#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK
++#define HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST
++#define HAVE_TCA_FLOWER_KEY_TCP_DST 1
++#endif /* HAVE_TCA_FLOWER_KEY_TCP_DST */
++
++#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK
++#define HAVE_TCA_FLOWER_KEY_TCP_DST_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_TCP_DST_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC
++#define HAVE_TCA_FLOWER_KEY_UDP_SRC 1
++#endif /* HAVE_TCA_FLOWER_KEY_UDP_SRC */
++
++#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK
++#define HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST
++#define HAVE_TCA_FLOWER_KEY_UDP_DST 1
++#endif /* HAVE_TCA_FLOWER_KEY_UDP_DST */
++
++#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK
++#define HAVE_TCA_FLOWER_KEY_UDP_DST_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_UDP_DST_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID
++#define HAVE_TCA_FLOWER_KEY_VLAN_ID 1
++#endif /* HAVE_TCA_FLOWER_KEY_VLAN_ID */
++
++#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO
++#define HAVE_TCA_FLOWER_KEY_VLAN_PRIO 1
++#endif /* HAVE_TCA_FLOWER_KEY_VLAN_PRIO */
++
++#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE
++#define HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE 1
++#endif /* HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE */
++
++#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS
++#define HAVE_TCA_FLOWER_KEY_TCP_FLAGS 1
++#endif /* HAVE_TCA_FLOWER_KEY_TCP_FLAGS */
++
++#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK
++#define HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IP_TOS
++#define HAVE_TCA_FLOWER_KEY_IP_TOS 1
++#endif /* HAVE_TCA_FLOWER_KEY_IP_TOS */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IP_TOS_MASK
++#define HAVE_TCA_FLOWER_KEY_IP_TOS_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_IP_TOS_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IP_TTL
++#define HAVE_TCA_FLOWER_KEY_IP_TTL 1
++#endif /* HAVE_TCA_FLOWER_KEY_IP_TTL */
++
++#ifndef HAVE_TCA_FLOWER_KEY_IP_TTL_MASK
++#define HAVE_TCA_FLOWER_KEY_IP_TTL_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_IP_TTL_MASK */
++
++#ifndef HAVE_TC_ACT_GOTO_CHAIN
++#define HAVE_TC_ACT_GOTO_CHAIN 1
++#endif /* HAVE_TC_ACT_GOTO_CHAIN */
++
++#ifndef HAVE_TC_ACT_VLAN
++#define HAVE_TC_ACT_VLAN 1
++#endif /* HAVE_TC_ACT_VLAN */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_KEY_ID
++#define HAVE_TCA_FLOWER_KEY_ENC_KEY_ID 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_KEY_ID */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC
++#define HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
++#define HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST
++#define HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
++#define HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC
++#define HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
++#define HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST
++#define HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
++#define HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
++#define HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
++#define HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT
++#define HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT */
++
++#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
++#define HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK 1
++#endif /* HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK */
++
++/* HAVE_TCA_FLOWER_KEY_ENC_IP_TOS is not defined. */
++
++/* HAVE_TCA_FLOWER_KEY_ENC_IP_TOS_MASK is not defined. */
++
++/* HAVE_TCA_FLOWER_KEY_ENC_IP_TTL is not defined. */
++
++/* HAVE_TCA_FLOWER_KEY_ENC_IP_TTL_MASK is not defined. */
++
++#ifndef HAVE_TC_ACT_TUNNEL_KEY
++#define HAVE_TC_ACT_TUNNEL_KEY 1
++#endif /* HAVE_TC_ACT_TUNNEL_KEY */
++
++#ifndef HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT
++#define HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT 1
++#endif /* HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT */
++
++/* HAVE_TCA_TUNNEL_KEY_ENC_TOS is not defined. */
++
++/* HAVE_TCA_TUNNEL_KEY_ENC_TTL is not defined. */
++
++#ifndef HAVE_TCA_TUNNEL_KEY_NO_CSUM
++#define HAVE_TCA_TUNNEL_KEY_NO_CSUM 1
++#endif /* HAVE_TCA_TUNNEL_KEY_NO_CSUM */
++
++#ifndef HAVE_TC_ACT_PEDIT
++#define HAVE_TC_ACT_PEDIT 1
++#endif /* HAVE_TC_ACT_PEDIT */
++
++#ifndef HAVE_SUPPORTED_40000baseKR4_Full
+ #define HAVE_SUPPORTED_40000baseKR4_Full 1
+-#endif
++#endif /* HAVE_SUPPORTED_40000baseKR4_Full */
+
+-#ifdef SUPPORTED_40000baseCR4_Full
++#ifndef HAVE_SUPPORTED_40000baseCR4_Full
+ #define HAVE_SUPPORTED_40000baseCR4_Full 1
+-#endif
++#endif /* HAVE_SUPPORTED_40000baseCR4_Full */
+
+-#ifdef SUPPORTED_40000baseSR4_Full
++#ifndef HAVE_SUPPORTED_40000baseSR4_Full
+ #define HAVE_SUPPORTED_40000baseSR4_Full 1
+-#endif
++#endif /* HAVE_SUPPORTED_40000baseSR4_Full */
+
+-#ifdef SUPPORTED_40000baseLR4_Full
++#ifndef HAVE_SUPPORTED_40000baseLR4_Full
+ #define HAVE_SUPPORTED_40000baseLR4_Full 1
+-#endif
++#endif /* HAVE_SUPPORTED_40000baseLR4_Full */
+
+-#ifdef SUPPORTED_56000baseKR4_Full
++#ifndef HAVE_SUPPORTED_56000baseKR4_Full
+ #define HAVE_SUPPORTED_56000baseKR4_Full 1
+-#endif
++#endif /* HAVE_SUPPORTED_56000baseKR4_Full */
+
+-#ifdef SUPPORTED_56000baseCR4_Full
++#ifndef HAVE_SUPPORTED_56000baseCR4_Full
+ #define HAVE_SUPPORTED_56000baseCR4_Full 1
+-#endif
++#endif /* HAVE_SUPPORTED_56000baseCR4_Full */
+
+-#ifdef SUPPORTED_56000baseSR4_Full
++#ifndef HAVE_SUPPORTED_56000baseSR4_Full
+ #define HAVE_SUPPORTED_56000baseSR4_Full 1
+-#endif
++#endif /* HAVE_SUPPORTED_56000baseSR4_Full */
+
+-#ifdef SUPPORTED_56000baseLR4_Full
++#ifndef HAVE_SUPPORTED_56000baseLR4_Full
+ #define HAVE_SUPPORTED_56000baseLR4_Full 1
+-#endif
++#endif /* HAVE_SUPPORTED_56000baseLR4_Full */
+
++#ifndef HAVE_STATIC_ASSERT
++#define HAVE_STATIC_ASSERT 1
++#endif /* HAVE_STATIC_ASSERT */
+
+diff --git a/src/dpdk/drivers/net/tap/rte_eth_tap.c b/src/dpdk/drivers/net/tap/rte_eth_tap.c
+index bc889c19..47a2b68f 100644
+--- a/src/dpdk/drivers/net/tap/rte_eth_tap.c
++++ b/src/dpdk/drivers/net/tap/rte_eth_tap.c
+@@ -34,8 +34,8 @@
+ #include <unistd.h>
+ #include <arpa/inet.h>
+ #include <net/if.h>
+-#include <linux_tap/if_tun.h>
+-#include <linux_tap/if_ether.h>
++#include <linux/if_tun.h>
++#include <linux/if_ether.h>
+ #include <fcntl.h>
+ #include <ctype.h>
+
+diff --git a/src/dpdk/drivers/net/tap/rte_eth_tap.h b/src/dpdk/drivers/net/tap/rte_eth_tap.h
+index 66cd3441..dc3579ac 100644
+--- a/src/dpdk/drivers/net/tap/rte_eth_tap.h
++++ b/src/dpdk/drivers/net/tap/rte_eth_tap.h
+@@ -11,7 +11,7 @@
+ #include <inttypes.h>
+ #include <net/if.h>
+
+-#include <linux_tap/if_tun.h>
++#include <linux/if_tun.h>
+
+ #include <rte_ethdev_driver.h>
+ #include <rte_ether.h>
+diff --git a/src/dpdk/drivers/net/tap/tap_autoconf.h b/src/dpdk/drivers/net/tap/tap_autoconf.h
+index dddd4ae6..d5880608 100644
+--- a/src/dpdk/drivers/net/tap/tap_autoconf.h
++++ b/src/dpdk/drivers/net/tap/tap_autoconf.h
+@@ -1,14 +1,24 @@
+ #ifndef HAVE_TC_FLOWER
+ #define HAVE_TC_FLOWER 1
+-#endif
++#endif /* HAVE_TC_FLOWER */
+
++#ifndef HAVE_TC_VLAN_ID
++#define HAVE_TC_VLAN_ID 1
++#endif /* HAVE_TC_VLAN_ID */
+
+ #ifndef HAVE_TC_BPF
+ #define HAVE_TC_BPF 1
+-#endif
++#endif /* HAVE_TC_BPF */
+
+-#ifndef HAVE_TC_VLAN_ID
+-#define HAVE_TC_VLAN_ID 1
+-#endif
++#ifndef HAVE_TC_BPF_FD
++#define HAVE_TC_BPF_FD 1
++#endif /* HAVE_TC_BPF_FD */
++
++#ifndef HAVE_TC_ACT_BPF
++#define HAVE_TC_ACT_BPF 1
++#endif /* HAVE_TC_ACT_BPF */
+
++#ifndef HAVE_TC_ACT_BPF_FD
++#define HAVE_TC_ACT_BPF_FD 1
++#endif /* HAVE_TC_ACT_BPF_FD */
+
+diff --git a/src/dpdk/drivers/net/tap/tap_netlink.h b/src/dpdk/drivers/net/tap/tap_netlink.h
+index 900ce375..faa73ba1 100644
+--- a/src/dpdk/drivers/net/tap/tap_netlink.h
++++ b/src/dpdk/drivers/net/tap/tap_netlink.h
+@@ -8,8 +8,8 @@
+
+ #include <ctype.h>
+ #include <inttypes.h>
+-#include <linux_tap/rtnetlink.h>
+-#include <linux_tap/netlink.h>
++#include <linux/rtnetlink.h>
++#include <linux/netlink.h>
+ #include <stdio.h>
+
+ #include <rte_log.h>
+diff --git a/src/dpdk/drivers/net/tap/tap_tcmsgs.h b/src/dpdk/drivers/net/tap/tap_tcmsgs.h
+index 782de540..8cedea84 100644
+--- a/src/dpdk/drivers/net/tap/tap_tcmsgs.h
++++ b/src/dpdk/drivers/net/tap/tap_tcmsgs.h
+@@ -7,13 +7,13 @@
+ #define _TAP_TCMSGS_H_
+
+ #include <tap_autoconf.h>
+-#include <linux_tap/if_ether.h>
+-#include <linux_tap/rtnetlink.h>
+-#include <linux_tap/pkt_sched.h>
+-#include <linux_tap/pkt_cls.h>
+-#include <linux_tap/tc_act/tc_mirred.h>
+-#include <linux_tap/tc_act/tc_gact.h>
+-#include <linux_tap/tc_act/tc_skbedit.h>
++#include <linux/if_ether.h>
++#include <linux/rtnetlink.h>
++#include <linux/pkt_sched.h>
++#include <linux/pkt_cls.h>
++#include <linux/tc_act/tc_mirred.h>
++#include <linux/tc_act/tc_gact.h>
++#include <linux/tc_act/tc_skbedit.h>
+ #ifdef HAVE_TC_ACT_BPF
+ #include <linux/tc_act/tc_bpf.h>
+ #endif
--- /dev/null
+---
+# file: roles/trex/tasks/main.yaml
+
+- name: T-Rex - Distribution - Release - Machine Prerequisites
+ package:
+ name: "{{ packages | flatten(levels=1) }}"
+ state: latest
+ update_cache: true
+ tags:
+ - install-dependencies
+
+- name: T-Rex - Get Release Archive
+ get_url:
+ url: "{{ trex_url }}/v{{ item }}.tar.gz"
+ dest: "{{ trex_target_dir }}/trex-core-{{ item }}.tar.gz"
+ validate_certs: False
+ mode: 0644
+ loop: "{{ trex_version }}"
+ register: trex_downloaded
+ tags:
+ - install-trex
+
+- name: T-Rex - Ensure Directory Exists
+ file:
+ path: "{{ trex_target_dir }}/trex-core-{{ item }}"
+ state: "directory"
+ loop: "{{ trex_version }}"
+ tags:
+ - install-trex
+
+- name: T-Rex - Extract Release Archive
+ unarchive:
+ remote_src: true
+ src: "{{ trex_target_dir }}/trex-core-{{ item }}.tar.gz"
+ dest: "{{ trex_target_dir }}/"
+ creates: "{{ trex_target_dir }}/trex-core-{{ item }}/linux_dpdk/"
+ loop: "{{ trex_version }}"
+ register: trex_extracted
+ tags:
+ - install-trex
+
+- name: T-Rex - Azure patch I
+ patch:
+ src: "files/t-rex.patch"
+ basedir: "{{ trex_target_dir }}/trex-core-{{ item }}"
+ strip: 1
+ loop: "{{ trex_version }}"
+ when:
+ - azure is defined
+ tags:
+ - install-trex
+
+- name: T-Rex - Compile Release I
+ command: "./b configure"
+ args:
+ chdir: "{{ trex_target_dir }}/trex-core-{{ item }}/linux_dpdk/"
+ loop: "{{ trex_version }}"
+ when: trex_extracted
+ tags:
+ - install-trex
+
+- name: T-Rex - Compile Release II
+ command: "./b build"
+ args:
+ chdir: "{{ trex_target_dir }}/trex-core-{{ item }}/linux_dpdk/"
+ loop: "{{ trex_version }}"
+ when: trex_extracted
+ tags:
+ - install-trex
+
+- name: T-Rex - Compile Release III
+ command: "make"
+ args:
+ chdir: "{{ trex_target_dir }}/trex-core-{{ item }}/scripts/ko/src"
+ loop: "{{ trex_version }}"
+ when: trex_extracted
+ tags:
+ - install-trex
+
+- name: T-Rex - Compile Release IV
+ command: "make install"
+ args:
+ chdir: "{{ trex_target_dir }}/trex-core-{{ item }}/scripts/ko/src"
+ loop: "{{ trex_version }}"
+ when: trex_extracted
+ tags:
+ - install-trex
--- /dev/null
+---
+# file: roles/user_add/defaults/main.yaml
+
+user_pass: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1"
+
--- /dev/null
+---
+# file: roles/user_add/handlers/main.yaml
+
+- name: Restart sshd
+ service:
+ name: sshd
+ state: restarted
+ tags:
+ - restart-sshd
+
--- /dev/null
+---
+# file: roles/user_add/tasks/main.yaml
+
+- name: Add testuser account
+ user:
+ name: "testuser"
+ state: present
+ shell: "/bin/bash"
+ password: "{{ user_pass }}"
+ tags:
+ - add-user
+
+- name: Allow password login
+ lineinfile:
+ dest: "/etc/ssh/sshd_config"
+ regexp: "^PasswordAuthentication no"
+ line: "PasswordAuthentication yes"
+ notify:
+ - "Restart sshd"
+ tags:
+ - allow-password-login
+
+- name: Add visudo entry
+ lineinfile:
+ dest: "/etc/sudoers"
+ state: present
+ line: "testuser ALL=(ALL) NOPASSWD: ALL"
+ validate: "visudo -cf %s"
+ tags:
+ - allow-sudo
+
--- /dev/null
+---
+# file: roles/vexx_hosts/defaults/main.yaml
+
+# provision via cobbler
+provision_enabled: False
+# name_servers is used in /etc/netplan/01-netcfg.yaml
+name_servers: "1.1.1.1, 8.8.8.8"
+
+# Proxy settings: Uncomment and fill the proper values. These variables will be
+# set globally by writing into /etc/environment file on target machine.
+#proxy_env:
+# http_proxy: http://proxy.com:80
+# HTTP_PROXY: http://proxy.com:80
+# https_proxy: http://proxy.com:80
+# HTTPS_PROXY: http://proxy.com:80
+# ftp_proxy: http://proxy.com:80
+# FTP_PROXY: http://proxy.com:80
+# no_proxy: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
+# NO_PROXY: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
---
-# file: roles/common/handlers/cimc.yaml
+# file: roles/vexx_hosts/handlers/cimc.yaml
- name: Boot from network
imc_rest:
- hostname: '{{ inventory_cimc_hostname }}'
- username: '{{ inventory_cimc_username }}'
- password: '{{ inventory_cimc_password }}'
+ hostname: "{{ inventory_cimc_hostname }}"
+ username: "{{ inventory_cimc_username }}"
+ password: "{{ inventory_cimc_password }}"
validate_certs: no
content: |
<!-- Configure PXE boot -->
<lsbootLan dn="sys/rack-unit-1/boot-policy/lan-read-only" access="read-only" order="1" prot="pxe" type="lan"/>
</inConfig></configConfMo>
delegate_to: localhost
- tags: boot-network
+ tags:
+ - boot-network
- name: Boot from storage
imc_rest:
- hostname: '{{ inventory_cimc_hostname }}'
- username: '{{ inventory_cimc_username }}'
- password: '{{ inventory_cimc_password }}'
+ hostname: "{{ inventory_cimc_hostname }}"
+ username: "{{ inventory_cimc_username }}"
+ password: "{{ inventory_cimc_password }}"
validate_certs: no
content: |
<configConfMo><inConfig>
<lsbootStorage dn="sys/rack-unit-1/boot-policy/storage-read-write" access="read-write" order="1" type="storage"/>
</inConfig></configConfMo>
delegate_to: localhost
- tags: boot-storage
+ tags:
+ - boot-storage
- name: Power up server
imc_rest:
- hostname: '{{ inventory_cimc_hostname }}'
- username: '{{ inventory_cimc_username }}'
- password: '{{ inventory_cimc_password }}'
+ hostname: "{{ inventory_cimc_hostname }}"
+ username: "{{ inventory_cimc_username }}"
+ password: "{{ inventory_cimc_password }}"
validate_certs: no
content: |
<configConfMo><inConfig>
<computeRackUnit dn="sys/rack-unit-1" adminPower="up"/>
</inConfig></configConfMo>
delegate_to: localhost
- tags: power-up
+ tags:
+ - power-up
- name: Power down server
imc_rest:
- hostname: '{{ inventory_cimc_hostname }}'
- username: '{{ inventory_cimc_username }}'
- password: '{{ inventory_cimc_password }}'
+ hostname: "{{ inventory_cimc_hostname }}"
+ username: "{{ inventory_cimc_username }}"
+ password: "{{ inventory_cimc_password }}"
validate_certs: no
content: |
<configConfMo><inConfig>
<computeRackUnit dn="sys/rack-unit-1" adminPower="down"/>
</inConfig></configConfMo>
delegate_to: localhost
- tags: power-down
+ tags:
+ - power-down
- name: Power cycle server
imc_rest:
- hostname: '{{ inventory_cimc_hostname }}'
- username: '{{ inventory_cimc_username }}'
- password: '{{ inventory_cimc_password }}'
+ hostname: "{{ inventory_cimc_hostname }}"
+ username: "{{ inventory_cimc_username }}"
+ password: "{{ inventory_cimc_password }}"
validate_certs: no
content: |
<!-- Power cycle server -->
<computeRackUnit dn="sys/rack-unit-1" adminPower="cycle-immediate"/>
</inConfig></configConfMo>
delegate_to: localhost
- tags: power-cycle
+ tags:
+ - power-cycle
--- /dev/null
+---
+# file: roles/vexx_hosts/handlers/ipmi.yaml
+
+- name: Boot from network
+ ipmi_boot:
+ name: "{{ inventory_ipmi_hostname }}"
+ user: "{{ inventory_ipmi_username }}"
+ password: "{{ inventory_ipmi_password }}"
+ bootdev: network
+ delegate_to: localhost
+ tags:
+ - boot-network
+
+- name: Boot from storage
+ ipmi_boot:
+ name: "{{ inventory_ipmi_hostname }}"
+ user: "{{ inventory_ipmi_username }}"
+ password: "{{ inventory_ipmi_password }}"
+ bootdev: hd
+ delegate_to: localhost
+ tags:
+ - boot-storage
+
+- name: Power up server
+ ipmi_power:
+ name: "{{ inventory_ipmi_hostname }}"
+ user: "{{ inventory_ipmi_username }}"
+ password: "{{ inventory_ipmi_password }}"
+ state: on
+ delegate_to: localhost
+ tags:
+ - power-up
+
+- name: Power down server
+ ipmi_power:
+ name: "{{ inventory_ipmi_hostname }}"
+ user: "{{ inventory_ipmi_username }}"
+ password: "{{ inventory_ipmi_password }}"
+ state: off
+ delegate_to: localhost
+ tags:
+ - power-down
+
+- name: Power cycle server
+ ipmi_power:
+ name: "{{ inventory_ipmi_hostname }}"
+ user: "{{ inventory_ipmi_username }}"
+ password: "{{ inventory_ipmi_password }}"
+ state: boot
+ delegate_to: localhost
+ tags:
+ - power-cycle
--- /dev/null
+---
+# file: roles/vexx_hosts/handlers/main.yaml
+
+- name: IPMI specific
+ import_tasks: ipmi.yaml
+ when: inventory_ipmi_hostname is defined
+ tags:
+ - ipmi-handlers
+
+- name: CIMC specific
+ import_tasks: cimc.yaml
+ when: inventory_cimc_hostname is defined
+ tags:
+ - cimc-handlers
+
+- name: Reboot server
+ reboot:
+ reboot_timeout: 3600
+ tags:
+ - reboot-server
+
+- name: Wait for server to restart
+ wait_for:
+ host: "{{ inventory_hostname }}"
+ search_regex: OpenSSH
+ port: 22
+ delay: 60
+ timeout: 3600
+ tags:
+ - reboot-server
--- /dev/null
+---
+# file: roles/vexx_hosts/tasks/main.yaml
+
+- name: Ensure the system exists in Cobbler
+ cobbler_system:
+ host: "{{ cobbler_hostname }}"
+ port: 60080
+ interfaces:
+ br1:
+ ipaddress: "{{ ansible_default_ipv4.address }}"
+ macaddress: "{{ ansible_default_ipv4.macaddress }}"
+ name: "{{ hostname }}"
+ password: "{{ cobbler_password }}"
+ properties:
+ hostname: "{{ hostname }}"
+ gateway: "{{ ansible_default_ipv4.gateway }}"
+ profile: "{{ cobbler_profile }}"
+ name_servers: "{{ name_servers }}"
+ kickstart: "/var/lib/cobbler/kickstarts/{{ cobbler_profile }}.seed"
+ kernel_options: '"interface={{ ansible_default_ipv4.interface }}"'
+ netboot_enabled: yes
+ username: "{{ cobbler_username }}"
+ use_ssl: no
+ validate_certs: no
+ when: provision_enabled
+ delegate_to: localhost
+ tags:
+ - cobbler-include
+
+- name: Commit Cobbler changes
+ cobbler_sync:
+ host: "{{ cobbler_hostname }}"
+ port: 60080
+ password: "{{ cobbler_password }}"
+ username: "{{ cobbler_username }}"
+ use_ssl: no
+ validate_certs: no
+ run_once: yes
+ when: provision_enabled
+ delegate_to: localhost
+ register: __included_in_cobbler
+ notify:
+ - "Boot from network"
+ - "Reboot server"
+ tags:
+ - cobbler-include
+
+- meta: flush_handlers
--- /dev/null
+---
+# file: roles/sut/defaults/main.yaml
+
+packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}"
+
+packages_base:
+ - "gdb"
+ - "libtool"
+ - "lxc"
+ - "pkg-config"
+ - "screen"
+
+packages_by_distro:
+ ubuntu:
+ - "build-essential"
+ - "libglib2.0-dev"
+ - "libmbedcrypto1"
+ - "libmbedtls10"
+ - "libmbedx509-0"
+ - "libnuma-dev"
+ - "libpixman-1-dev"
+
+packages_by_arch:
+ aarch64:
+ - []
+ x86_64:
+ - []
--- /dev/null
+---
+# file: roles/sut/tasks/main.yaml
+
+- name: SUT - Install Distribution - Release - Machine Prerequisites
+ package:
+ name: "{{ packages | flatten(levels=1) }}"
+ state: latest
+ update_cache: true
+ tags:
+ - install-dependencies
+
+- name: SUT - Copy 80-vpp.conf
+ file:
+ src: "/dev/null"
+ dest: "/etc/sysctl.d/80-vpp.conf"
+ state: "link"
+ become: yes
+ tags:
+ - copy-80-vpp
enabled: yes
state: started
name: csit-initialize-vfs.service
- tags: start-vf-service
+ tags:
+ - start-vf-service
- name: Update GRUB
command: update-grub
- tags: update-grub
+ tags:
+ - update-grub
- name: Reboot server
reboot:
reboot_timeout: 3600
- tags: reboot-server
-
-- name: Wait for server to restart
- wait_for:
- host: '{{ inventory_hostname }}'
- search_regex: OpenSSH
- port: 22
- delay: 60
- timeout: 3600
- tags: reboot-server
+ tags:
+ - reboot-server
---
# file: roles/vpp_device/tasks/main.yaml
-- name: Load vfio-pci by default
+- name: VPP_device - Load Kernel Modules By Default
lineinfile:
- path: '/etc/modules'
- state: 'present'
- line: 'vfio-pci'
- tags: load-vfio-pci
+ path: "/etc/modules"
+ state: "present"
+ line: "{{ item }}"
+ with_items:
+ - "vfio-pci"
+ tags:
+ - load-kernel-modules
-- name: Copy csit-initialize-vfs.sh
+- name: VPP_device - Copy csit-initialize-vfs.sh
copy:
- src: 'files/csit-initialize-vfs.sh'
- dest: '/usr/local/bin/'
- owner: 'root'
- group: 'root'
- mode: '744'
- tags: copy-vf-script
+ src: "files/csit-initialize-vfs.sh"
+ dest: "/usr/local/bin/"
+ owner: "root"
+ group: "root"
+ mode: "744"
+ tags:
+ - copy-vf-script
-- name: Copy csit-initialize-vfs-data.sh
+- name: VPP_device - Copy csit-initialize-vfs-data.sh
copy:
- src: 'files/{{ vfs_data_file }}'
- dest: '/usr/local/bin/csit-initialize-vfs-data.sh'
- owner: 'root'
- group: 'root'
- mode: '744'
+ src: "files/{{ vfs_data_file }}"
+ dest: "/usr/local/bin/csit-initialize-vfs-data.sh"
+ owner: "root"
+ group: "root"
+ mode: "744"
tags: copy-vf-data-script
- when: vfs_data_file is defined
+ when:
+ - vfs_data_file is defined
-- name: Copy default csit-initialize-vfs-data.sh
+- name: VPP_device - Copy default csit-initialize-vfs-data.sh
copy:
- src: 'files/csit-initialize-vfs-default.sh'
- dest: '/usr/local/bin/csit-initialize-vfs-data.sh'
- owner: 'root'
- group: 'root'
- mode: '744'
+ src: "files/csit-initialize-vfs-default.sh"
+ dest: "/usr/local/bin/csit-initialize-vfs-data.sh"
+ owner: "root"
+ group: "root"
+ mode: "744"
tags: copy-vf-data-script
- when: vfs_data_file is not defined
+ when:
+ - vfs_data_file is not defined
-- name: Start csit-initialize-vfs.service
+- name: VPP_device - Start csit-initialize-vfs.service
copy:
- src: 'files/csit-initialize-vfs.service'
- dest: '/etc/systemd/system/'
- owner: 'root'
- group: 'root'
- mode: '644'
+ src: "files/csit-initialize-vfs.service"
+ dest: "/etc/systemd/system/"
+ owner: "root"
+ group: "root"
+ mode: "644"
notify:
- - 'Start csit-initialize-vfs.service'
- tags: start-vf-service
+ - "Start csit-initialize-vfs.service"
+ tags:
+ - start-vf-service
- meta: flush_handlers
-- name: Set hugepages in GRUB
+- name: VPP_device - Set hugepages in GRUB
lineinfile:
- path: '/etc/default/grub'
- state: 'present'
- regexp: '^GRUB_CMDLINE_LINUX='
+ path: "/etc/default/grub"
+ state: "present"
+ regexp: "^GRUB_CMDLINE_LINUX="
line: 'GRUB_CMDLINE_LINUX="hugepagesz=2M hugepages={{ grub.nr_hugepages }}"'
notify:
- - 'Update GRUB'
- - 'Reboot server'
- tags: set-grub
+ - "Update GRUB"
+ - "Reboot server"
+ tags:
+ - set-grub
- meta: flush_handlers
-
-- name: Kernel VM install
- include_tasks: '../../common/tasks/kernel_install.yaml'
- tags: install-kernel-image
--- /dev/null
+---
+# file: roles/wrk/defaults/main.yaml
+
+packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}"
+
+packages_base:
+ - []
+
+packages_by_distro:
+ ubuntu:
+ - "build-essential"
+
+packages_by_arch:
+ aarch64:
+ - []
+ x86_64:
+ - []
+
+wrk_target_dir: "/opt"
+wrk_version:
+ - "4.0.2"
--- /dev/null
+---
+# file: roles/wrk/tasks/main.yaml
+
+- name: WRK - Install Distribution - Release - Machine Prerequisites
+ package:
+ name: "{{ packages | flatten(levels=1) }}"
+ state: latest
+ update_cache: true
+ tags:
+ - install-dependencies
+
+- name: WRK - Get Release Archive
+ get_url:
+ url: "https://github.com/wg/wrk/archive/{{ item }}.tar.gz"
+ dest: "{{ wrk_target_dir }}/wrk-{{ item }}.tar.gz"
+ mode: 0644
+ loop: "{{ wrk_version }}"
+ register: wrk_downloaded
+ tags:
+ - install-wrk
+
+- name: WRK - Extract Release Archive
+ unarchive:
+ remote_src: true
+ src: "{{ wrk_target_dir }}/wrk-{{ item }}.tar.gz"
+ dest: "{{ wrk_target_dir }}/"
+ creates: "{{ wrk_target_dir }}/wrk-{{ item }}/src"
+ loop: "{{ wrk_version }}"
+ register: wrk_extracted
+ tags:
+ - install-wrk
+
+- name: WRK - Compile Release I
+ command: "make"
+ args:
+ chdir: "{{ wrk_target_dir }}/wrk-{{ item }}"
+ loop: "{{ wrk_version }}"
+ when: wrk_extracted
+ register: wrk_compiled
+ tags:
+ - install-wrk
+
+- name: WRK - Copy Binary
+ command: "cp {{ wrk_target_dir }}/wrk-{{ item }}/wrk /usr/local/bin/"
+ loop: "{{ wrk_version }}"
+ when: wrk_compiled
+ tags:
+ - install-wrk
--- /dev/null
+---
+# file: site_aws.yaml
+
+- import_playbook: tg_aws.yaml
+ tags: tg
+
+- import_playbook: sut_aws.yaml
+ tags: sut
--- /dev/null
+---
+# file: site_azure.yaml
+
+- import_playbook: tg_azure.yaml
+ tags: tg
+
+- import_playbook: sut_azure.yaml
+ tags: sut
roles:
- role: common
tags: common
- - role: sut
- tags: sut
- - role: tg_sut
- tags: tg_sut
+ - role: vexx_hosts
+ tags: vexx_hosts
+ - role: vpp
+ tags: vpp
+ - role: dpdk
+ tags: dpdk
+ - role: docker
+ tags: docker
+ - role: kubernetes
+ tags: kubernetes
+ - role: mellanox
+ tags: mellanox
+ - role: kernel_vm
+ tags: kernel_vm
+ - role: performance_tuning
+ tags: performance_tuning
+ - role: cleanup
+ tags: cleanup
- role: calibration
tags: calibration
--- /dev/null
+---
+# file: sut_aws.yaml
+
+- hosts: sut
+ become: yes
+ become_user: root
+ roles:
+ - role: user_add
+ tags: user_add
+ - role: common
+ tags: common
+ - role: vpp
+ tags: vpp
+ - role: dpdk
+ tags: dpdk
+ - role: aws
+ tags: aws
+ - role: iperf
+ tags: iperf
+ - role: docker
+ tags: docker
+ - role: cleanup
+ tags: cleanup
+ - role: calibration
+ tags: calibration
--- /dev/null
+---
+# file: sut_azure.yaml
+
+- hosts: sut
+ become: yes
+ become_user: root
+ roles:
+ - role: user_add
+ tags: user_add
+ - role: common
+ tags: common
+ - role: vpp
+ tags: vpp
+ - role: azure
+ tags: azure
+ - role: iperf
+ tags: iperf
+ - role: docker
+ tags: docker
+ - role: dpdk
+ tags: dpdk
+ - role: cleanup
+ tags: cleanup
+ - role: calibration
+ tags: calibration
--- /dev/null
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/3_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 3-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "{{ tg_public_ip }}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 50GE port1 on ENA NIC.
+ mac_address: {{ tg_if1_mac }}
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-50G
+ port2:
+ # tg_instance/p2 - 50GE port2 on ENA NIC.
+ mac_address: {{ tg_if2_mac }}
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-50G
+ DUT1:
+ type: DUT
+ host: "{{ dut1_public_ip }}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 50GE port1 on ENA NIC.
+ mac_address: {{ dut1_if1_mac }}
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-50G
+ port2:
+ # dut1_instance/p2 - 50GE port2 on ENA NIC.
+ mac_address: {{ dut1_if2_mac }}
+ pci_address: "0000:00:07.0"
+ link: link21
+ model: Amazon-Nitro-50G
+ DUT2:
+ type: DUT
+ host: "{{ dut2_public_ip }}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
+ interfaces:
+ port1:
+ # dut2_instance/p1 - 50GE port1 on ENA NIC.
+ mac_address: {{ dut2_if1_mac }}
+ pci_address: "0000:00:06.0"
+ link: link21
+ model: Amazon-Nitro-50G
+ port2:
+ # dut2_instance/p2 - 50GE port1 on ENA NIC.
+ mac_address: {{ dut2_if2_mac }}
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-50G
+
--- /dev/null
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/3_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 3-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "{{ tg_public_ip }}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 40GE port1 on Mellanox NIC.
+ mac_address: "{{ tg_if1_mac | lower | replace('-',':') }}"
+ pci_address: "0002:00:02.0"
+ link: link1
+ model: azure-mlx-40g
+ port2:
+ # tg_instance/p2 - 40GE port2 on Mellanox NIC.
+ mac_address: "{{ tg_if2_mac | lower | replace('-',':') }}"
+ pci_address: "0003:00:02.0"
+ link: link2
+ model: azure-mlx-40g
+ DUT1:
+ type: DUT
+ host: "{{ dut1_public_ip }}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 40GE port1 on Mellanox NIC.
+ mac_address: "{{ dut1_if1_mac | lower | replace('-',':') }}"
+ pci_address: "0002:00:02.0"
+ link: link1
+ model: azure-mlx-40g
+ port2:
+ # dut2_instance/p1 - 40GE port2 on Mellanox NIC.
+ mac_address: "{{ dut1_if2_mac | lower | replace('-',':') }}"
+ pci_address: "0003:00:02.0"
+ link: link21
+ model: azure-mlx-40g
+ DUT2:
+ type: DUT
+ host: "{{ dut2_public_ip }}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 40GE port1 on Mellanox NIC.
+ mac_address: "{{ dut2_if1_mac | lower | replace('-',':') }}"
+ pci_address: "0002:00:02.0"
+ link: link21
+ model: azure-mlx-40g
+ port2:
+ # dut2_instance/p1 - 40GE port2 on Mellanox NIC.
+ mac_address: "{{ dut2_if2_mac | lower | replace('-',':') }}"
+ pci_address: "0003:00:02.0"
+ link: link2
+ model: azure-mlx-40g
roles:
- role: common
tags: common
+ - role: vexx_hosts
+ tags: vexx_hosts
- role: tg
tags: tg
- - role: tg_sut
- tags: tg_sut
+ - role: iperf
+ tags: iperf
+ - role: trex
+ tags: trex
+ - role: wrk
+ tags: wrk
+ - role: docker
+ tags: docker
+ - role: mellanox
+ tags: mellanox
+ - role: performance_tuning
+ tags: performance_tuning
+ - role: cleanup
+ tags: cleanup
- role: calibration
tags: calibration
--- /dev/null
+---
+# file: tg_aws.yaml
+
+- hosts: tg
+ become: yes
+ become_user: root
+ roles:
+ - role: user_add
+ tags: user_add
+ - role: common
+ tags: common
+ - role: dpdk
+ tags: dpdk
+ - role: aws
+ tags: aws
+ - role: tg
+ tags: tg
+ - role: iperf
+ tags: iperf
+ - role: trex
+ tags: trex
+ - role: wrk
+ tags: wrk
+ - role: docker
+ tags: docker
+ - role: cleanup
+ tags: cleanup
+ - role: calibration
+ tags: calibration
--- /dev/null
+---
+# file: tg_azure.yaml
+
+- hosts: tg
+ become: yes
+ become_user: root
+ roles:
+ - role: user_add
+ tags: user_add
+ - role: common
+ tags: common
+ - role: azure
+ tags: azure
+ - role: tg
+ tags: tg
+ - role: iperf
+ tags: iperf
+ - role: trex
+ tags: trex
+ - role: wrk
+ tags: wrk
+ - role: docker
+ tags: docker
+ - role: cleanup
+ tags: cleanup
+ - role: calibration
+ tags: calibration
roles:
- role: common
tags: common
+ - role: vexx_hosts
+ tags: vexx_hosts
+ - role: docker
+ tags: docker
- role: vpp_device
tags: vpp_device
+ - role: kernel_vm
+ tags: kernel_vm
+ - role: cleanup
+ tags: cleanup
--- /dev/null
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/2_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 2-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "10.32.8.19"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # s34-t27-tg1-c2/p1 - 10GE port1 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e9:88"
+ pci_address: "0000:18:00.0"
+ ip4_address: "172.16.10.2"
+ driver: i40e
+ link: link1
+ model: Intel-X710
+ port2:
+ # s34-t27-tg1-c2/p2 - 10GE port2 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e9:89"
+ pci_address: "0000:18:00.1"
+ ip4_address: "172.16.20.2"
+ driver: i40e
+ link: link2
+ model: Intel-X710
+ port3:
+ # s34-t27-tg1-c2/p3 - 10GE port3 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e9:8a"
+ pci_address: "0000:18:00.2"
+ ip4_address: "172.16.30.2"
+ driver: i40e
+ link: link3
+ model: Intel-X710
+ port4:
+ # s34-t27-tg1-c2/p4 - 10GE port4 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e9:8b"
+ pci_address: "0000:18:00.3"
+ ip4_address: "172.16.40.2"
+ driver: i40e
+ link: link4
+ model: Intel-X710
+ port5:
+ # s34-t27-tg1-c4/p1 - 25GE port1 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:dd:d4:28"
+ pci_address: "0000:3b:00.0"
+ ip4_address: "172.16.50.2"
+ driver: i40e
+ link: link5
+ model: Intel-XXV710
+ port6:
+ # s34-t27-tg1-c4/p2 - 25GE port2 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:dd:d4:29"
+ pci_address: "0000:3b:00.1"
+ ip4_address: "172.16.60.2"
+ driver: i40e
+ link: link6
+ model: Intel-XXV710
+ port7:
+ # s34-t27-tg1-c9/p1 - 100GE-port1 ConnectX5-2p100GE.
+ mac_address: "ec:0d:9a:8c:c6:a6"
+ pci_address: "0000:5e:00.0"
+ ip4_address: "172.16.70.2"
+ driver: mlx5_core
+ link: link7
+ model: Mellanox-CX556A
+ port8:
+ # s34-t27-tg1-c9/p2 - 100GE-port2 ConnectX5-2p100GE.
+ mac_address: "ec:0d:9a:8c:c6:a7"
+ pci_address: "0000:5e:00.1"
+ ip4_address: "172.16.70.1"
+ driver: mlx5_core
+ link: link8
+ model: Mellanox-CX556A
+ DUT1:
+ type: DUT
+ host: "10.32.8.18"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
+ interfaces:
+ port1:
+ # s33-t27-sut1-c2/p1 - 10GE port1 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e9:98"
+ pci_address: "0000:18:00.0"
+ ip4_address: "172.16.10.1"
+ driver: i40e
+ link: link1
+ model: Intel-X710
+ port2:
+ # s33-t27-sut1-c2/p2 - 10GE port2 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e9:99"
+ pci_address: "0000:18:00.1"
+ ip4_address: "172.16.20.1"
+ driver: i40e
+ link: link2
+ model: Intel-X710
+ port3:
+ # s33-t27-sut1-c2/p3 - 10GE port3 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e9:9a"
+ pci_address: "0000:18:00.2"
+ ip4_address: "172.16.30.1"
+ driver: i40e
+ link: link3
+ model: Intel-X710
+ port4:
+ # s33-t27-sut1-c2/p4 - 10GE port4 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e9:9b"
+ pci_address: "0000:18:00.3"
+ ip4_address: "172.16.40.1"
+ driver: i40e
+ link: link4
+ model: Intel-X710
+ port5:
+ # s33-t27-sut1-c2/p1 - 25GE port1 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:cf:69:e8"
+ pci_address: "0000:3b:00.0"
+ ip4_address: "172.16.50.1"
+ driver: i40e
+ link: link5
+ model: Intel-XXV710
+ port6:
+ # s33-t27-sut1-c2/p2 - 25GE port2 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:cf:69:e9"
+ pci_address: "0000:3b:00.1"
+ ip4_address: "172.16.60.1"
+ driver: i40e
+ link: link6
+ model: Intel-XXV710
+ port7:
+ # s33-t27-sut1-c9/p1 - 100GE-port1 ConnectX5-2p100GE.
+ mac_address: "ec:0d:9a:8c:c6:96"
+ pci_address: "0000:5e:00.0"
+ ip4_address: "172.16.70.2"
+ driver: mlx5_core
+ link: link7
+ model: Mellanox-CX556A
+ port8:
+ # s33-t27-sut1-c9/p2 - 100GE-port2 ConnectX5-2p100GE.
+ mac_address: "ec:0d:9a:8c:c6:96"
+ pci_address: "0000:5e:00.1"
+ ip4_address: "172.16.70.1"
+ driver: mlx5_core
+ link: link8
+ model: Mellanox-CX556A
--- /dev/null
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/2_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 2-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "10.32.8.21"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # s36-t28-tg1-c2/p1 - 10GE port1 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e8:28"
+ pci_address: "0000:18:00.0"
+ ip4_address: "172.16.10.2"
+ driver: i40e
+ link: link1
+ model: Intel-X710
+ port2:
+ # s36-t28-tg1-c2/p2 - 10GE port2 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e8:29"
+ pci_address: "0000:18:00.1"
+ ip4_address: "172.16.20.2"
+ driver: i40e
+ link: link2
+ model: Intel-X710
+ port3:
+ # s36-t28-tg1-c2/p3 - 10GE port3 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e8:2a"
+ pci_address: "0000:18:00.2"
+ ip4_address: "172.16.30.2"
+ driver: i40e
+ link: link3
+ model: Intel-X710
+ port4:
+ # s36-t28-tg1-c2/p4 - 10GE port4 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e8:2b"
+ pci_address: "0000:18:00.3"
+ ip4_address: "172.16.40.2"
+ driver: i40e
+ link: link4
+ model: Intel-X710
+ port5:
+ # s36-t28-tg1-c4/p1 - 25GE port1 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:cf:69:ec"
+ pci_address: "0000:3b:00.0"
+ ip4_address: "172.16.50.2"
+ driver: i40e
+ link: link5
+ model: Intel-XXV710
+ port6:
+ # s36-t28-tg1-c4/p2 - 25GE port2 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:cf:69:ed"
+ pci_address: "0000:3b:00.1"
+ ip4_address: "172.16.60.2"
+ driver: i40e
+ link: link6
+ model: Intel-XXV710
+ port7:
+ # s36-t28-tg1-c9/p1 - 100GE-port1 ConnectX5-2p100GE.
+ mac_address: "ec:0d:9a:8c:c7:f6"
+ pci_address: "0000:5e:00.0"
+ ip4_address: "172.16.70.2"
+ driver: mlx5_core
+ link: link7
+ model: Mellanox-CX556A
+ port8:
+ # s36-t28-tg1-c9/p2 - 100GE-port2 ConnectX5-2p100GE.
+ mac_address: "ec:0d:9a:8c:c7:f7"
+ pci_address: "0000:5e:00.1"
+ ip4_address: "172.16.70.1"
+ driver: mlx5_core
+ link: link8
+ model: Mellanox-CX556A
+ DUT1:
+ type: DUT
+ host: "10.32.8.20"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
+ interfaces:
+ port1:
+ # s35-t28-sut1-c2/p1 - 10GE port1 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e3:b0"
+ pci_address: "0000:18:00.0"
+ ip4_address: "172.16.10.1"
+ driver: i40e
+ link: link1
+ model: Intel-X710
+ port2:
+ # s35-t28-sut1-c2/p2 - 10GE port2 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e3:b1"
+ pci_address: "0000:18:00.1"
+ ip4_address: "172.16.20.1"
+ driver: i40e
+ link: link2
+ model: Intel-X710
+ port3:
+ # s35-t28-sut1-c2/p3 - 10GE port3 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e3:b2"
+ pci_address: "0000:18:00.2"
+ ip4_address: "172.16.30.1"
+ driver: i40e
+ link: link3
+ model: Intel-X710
+ port4:
+ # s35-t28-sut1-c2/p4 - 10GE port4 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:e3:b3"
+ pci_address: "0000:18:00.3"
+ ip4_address: "172.16.40.1"
+ driver: i40e
+ link: link4
+ model: Intel-X710
+ port5:
+ # s35-t28-sut1-c2/p1 - 25GE port1 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:dd:d1:90"
+ pci_address: "0000:3b:00.0"
+ ip4_address: "172.16.50.1"
+ driver: i40e
+ link: link5
+ model: Intel-XXV710
+ port6:
+ # s35-t28-sut1-c2/p2 - 25GE port2 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:dd:d1:91"
+ pci_address: "0000:3b:00.1"
+ ip4_address: "172.16.60.1"
+ driver: i40e
+ link: link6
+ model: Intel-XXV710
+ port7:
+ # s35-t28-sut1-c9/p1 - 100GE-port1 ConnectX5-2p100GE.
+ mac_address: "ec:0d:9a:8c:c7:b6"
+ pci_address: "0000:5e:00.0"
+ ip4_address: "172.16.70.2"
+ driver: mlx5_core
+ link: link7
+ model: Mellanox-CX556A
+ port8:
+ # s35-t28-sut1-c9/p2 - 100GE-port2 ConnectX5-2p100GE.
+ mac_address: "ec:0d:9a:8c:c7:b7"
+ pci_address: "0000:5e:00.1"
+ ip4_address: "172.16.70.1"
+ driver: mlx5_core
+ link: link8
+ model: Mellanox-CX556A
--- /dev/null
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/2_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 2-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "10.32.8.23"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # s38-t29-tg1-c2/p1 - 10GE port1 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:a8:b1:90"
+ pci_address: "0000:18:00.0"
+ ip4_address: "172.16.10.2"
+ driver: i40e
+ link: link1
+ model: Intel-X710
+ port2:
+ # s38-t29-tg1-c2/p2 - 10GE port2 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:a8:b1:91"
+ pci_address: "0000:18:00.1"
+ ip4_address: "172.16.20.2"
+ driver: i40e
+ link: link2
+ model: Intel-X710
+ port3:
+ # s38-t29-tg1-c2/p3 - 10GE port3 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:a8:b1:92"
+ pci_address: "0000:18:00.2"
+ ip4_address: "172.16.30.2"
+ driver: i40e
+ link: link3
+ model: Intel-X710
+ port4:
+ # s38-t29-tg1-c2/p4 - 10GE port4 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:a8:b1:93"
+ pci_address: "0000:18:00.3"
+ ip4_address: "172.16.40.2"
+ driver: i40e
+ link: link4
+ model: Intel-X710
+ port5:
+ # s38-t29-tg1-c4/p1 - 25GE port1 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:cf:6c:bc"
+ pci_address: "0000:3b:00.0"
+ ip4_address: "172.16.50.2"
+ driver: i40e
+ link: link5
+ model: Intel-XXV710
+ port6:
+ # s38-t29-tg1-c4/p2 - 25GE port2 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:cf:6c:bd"
+ pci_address: "0000:3b:00.1"
+ ip4_address: "172.16.60.2"
+ driver: i40e
+ link: link6
+ model: Intel-XXV710
+ port7:
+ # s38-t29-tg1-c9/p1 - 100GE-port1 ConnectX5-2p100GE.
+ mac_address: "b8:59:9f:fe:4a:a8"
+ pci_address: "0000:5e:00.0"
+ ip4_address: "172.16.70.2"
+ driver: mlx5_core
+ link: link7
+ model: Mellanox-CX556A
+ port8:
+ # s38-t29-tg1-c9/p2 - 100GE-port2 ConnectX5-2p100GE.
+ mac_address: "b8:59:9f:fe:4a:a9"
+ pci_address: "0000:5e:00.1"
+ ip4_address: "172.16.70.2"
+ driver: mlx5_core
+ link: link8
+ model: Mellanox-CX556A
+ DUT1:
+ type: DUT
+ host: "10.32.8.22"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
+ interfaces:
+ port1:
+ # s37-t29-sut1-c2/p1 - 10GE port1 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:eb:10"
+ pci_address: "0000:18:00.0"
+ ip4_address: "172.16.10.1"
+ driver: i40e
+ link: link1
+ model: Intel-X710
+ port2:
+ # s37-t29-sut1-c2/p2 - 10GE port2 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:eb:11"
+ pci_address: "0000:18:00.1"
+ ip4_address: "172.16.20.1"
+ driver: i40e
+ link: link2
+ model: Intel-X710
+ port3:
+ # s37-t29-sut1-c2/p3 - 10GE port3 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:eb:12"
+ pci_address: "0000:18:00.2"
+ ip4_address: "172.16.30.1"
+ driver: i40e
+ link: link3
+ model: Intel-X710
+ port4:
+ # s37-t29-sut1-c2/p4 - 10GE port4 on Intel NIC x710 4p10GE.
+ mac_address: "3c:fd:fe:ca:eb:13"
+ pci_address: "0000:18:00.3"
+ ip4_address: "172.16.40.1"
+ driver: i40e
+ link: link4
+ model: Intel-X710
+ port5:
+ # s37-t29-sut1-c2/p1 - 25GE port1 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:dd:d3:48"
+ pci_address: "0000:3b:00.0"
+ ip4_address: "172.16.50.1"
+ driver: i40e
+ link: link5
+ model: Intel-XXV710
+ port6:
+ # s37-t29-sut1-c2/p2 - 25GE port2 on Intel NIC xxv710 2p25GE.
+ mac_address: "3c:fd:fe:dd:d3:49"
+ pci_address: "0000:3b:00.1"
+ ip4_address: "172.16.60.1"
+ driver: i40e
+ link: link6
+ model: Intel-XXV710
+ port7:
+ # s37-t29-sut1-c9/p1 - 100GE-port1 ConnectX5-2p100GE.
+ mac_address: "b8:59:9f:fe:4a:c8"
+ pci_address: "0000:5e:00.0"
+ ip4_address: "172.16.70.1"
+ driver: mlx5_core
+ link: link7
+ model: Mellanox-CX556A
+ port8:
+ # s37-t29-sut1-c9/p2 - 100GE-port2 ConnectX5-2p100GE.
+ mac_address: "b8:59:9f:fe:4a:c9"
+ pci_address: "0000:5e:00.1"
+ ip4_address: "172.16.70.1"
+ driver: mlx5_core
+ link: link8
+ model: Mellanox-CX556A
link: link1
model: Intel-X553
port2:
- mac_address: "b4:96:91:46:b3:9c"
+ mac_address: "b4:96:91:46:b6:48"
pci_address: "0000:5e:00.0"
driver: ixgbe
link: link2
password: Csit1234
interfaces:
port1:
- mac_address: "b4:96:91:46:b2:28"
+ mac_address: "b4:96:91:46:ae:3c"
pci_address: "0000:af:00.0"
driver: ixgbe
link: link1
model: Intel-X553
port2:
- mac_address: "b4:96:91:46:ae:3c"
+ mac_address: "b4:96:91:46:b2:e4"
pci_address: "0000:d8:00.0"
driver: ixgbe
link: link2
-# Copyright (c) 2019 PANTHEON.tech and/or its affiliates.
+# Copyright (c) 2020 PANTHEON.tech and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
link: link1
driver: i40e
model: Intel-X710
+ port2:
+ mac_address: "3c:fd:fe:a8:aa:c2"
+ pci_address: "0000:18:00.2"
+ link: link10
+ driver: i40e
+ model: Intel-X710
+ port3:
+ mac_address: "3c:fd:fe:a8:aa:c3"
+ pci_address: "0000:18:00.3"
+ link: link11
+ driver: i40e
+ model: Intel-X710
DUT1:
type: DUT
host: 10.30.51.36
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# will execute only checks defined in "pylint" tox environment.
[tox]
-envlist = new_line_length, line_length, autogen, pylint, tc_naming, tc_coverage
+# Fast and brief checkers to front, slow or verbose checkers to back.
+envlist = tc_naming, tc_coverage, copyright_year, new_line_length, line_length,
+ autogen, pylint, doc_verify
+
# The following is needed as tox requires setup.py by default.
skipsdist = true
# Just a shorthand to avoid long lines.
# TODO: Tox prints various warnings. Figure them out and fix them.
-[testenv:pylint]
-deps =
- pylint==1.5.4
- -r ./requirements.txt
+# Keep testenvs sorted alphabetically, please.
+
+[testenv:autogen]
whitelist_externals = /bin/bash
setenv = PYTHONPATH = {toxinidir}
-# Run pylint, but hide its return value until python warnings are cleared.
-commands = bash -c "bash {[tox]checker_dir}/pylint.sh || true"
+commands = bash {[tox]checker_dir}/autogen.sh
-# TODO: See FIXME in https://gerrit.fd.io/r/16423
+[testenv:copyright_year]
+whitelist_externals = /bin/bash
+setenv = PYTHONPATH = {toxinidir}
+commands = bash {[tox]checker_dir}/copyright_year.sh
+
+[testenv:doc_verify]
+# Fix all documentaion error before enabling voting.
+whitelist_externals = /bin/bash
+commands = bash -c "bash {[tox]checker_dir}/doc_verify.sh || true"
[testenv:line_length]
whitelist_externals = /bin/bash
# the checker has to remain non-voting.
commands = bash -c "bash {[tox]checker_dir}/new_line.sh || true"
-[testenv:autogen]
+[testenv:pylint]
+basepython = python2
+deps =
+ pylint==1.5.4
+ -r ./requirements.txt
whitelist_externals = /bin/bash
setenv = PYTHONPATH = {toxinidir}
-commands = bash {[tox]checker_dir}/autogen.sh
+# Run pylint, but hide its return value until python warnings are cleared.
+commands = bash -c "bash {[tox]checker_dir}/pylint.sh || true"
-[testenv:tc_naming]
-whitelist_externals = /bin/bash
-# Fix all TC namings and remove the " || true" workaround.
-commands = bash -c "bash {[tox]checker_dir}/tc_naming.sh || true"
+# TODO: See FIXME in https://gerrit.fd.io/r/16423
[testenv:tc_coverage]
whitelist_externals = /bin/bash
# Coverage is not needed to be voting.
commands = bash -c "bash {[tox]checker_dir}/tc_coverage.sh || true"
+[testenv:tc_naming]
+whitelist_externals = /bin/bash
+commands = bash {[tox]checker_dir}/tc_naming.sh
+
+# Keep testenvs sorted alphabetically, please.
+
# TODO: Migrate current docs check here.
# TODO: Create voting "pylint violations should not increase" checker.
# TODO: Create voting checker to reject suites with Force Tags of other suite.