if unset, arch variable will default to "x86_64"
* Note on "arm64" vs "aarch64"
debian-based uses arm64
rhel-based uses aarch64
qemu binaries of both distribs uses aarch64
dpdk uses arm64
vpp uses aarch64
python machine modules uses aarch64
=> prefer aarch64 to use the same nomenclature as vpp
* add ARCH argument to:
init_dpdk.sh, install_dpdk.sh, run_l2fwd.sh, install_tldk.sh, run_tldk.sh.
default to x86_64
converts "aarch64" if needed for dpdk naming convention
* fixed terminal end detection to allow "~]# "
add dut node arch as param to all robot set bin calls
* add --target-list flag to qemu_build.sh
defaults to x86_64-softmmu
* add arch flag to all the topology files
* topologies/available/ (and example file)
* resources/tools/virl/topologies/
* set _qemu_bin path using node['arch'] in qemu_set_node()
Change-Id: If46d88d064d213d3e4c6fc584bb8e0d4b6428cb8
Signed-off-by: Gabriel Ganne <gabriel.ganne@enea.com>
23 files changed:
QEMU_PACKAGE_URL="${QEMU_DOWNLOAD_REPO}${QEMU_DOWNLOAD_PACKAGE}"
QEMU_INSTALL_DIR="/opt/${QEMU_VERSION}"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
QEMU_PACKAGE_URL="${QEMU_DOWNLOAD_REPO}${QEMU_DOWNLOAD_PACKAGE}"
QEMU_INSTALL_DIR="/opt/${QEMU_VERSION}"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+TARGET_LIST="x86_64-softmmu"
for i in "$@"; do
case $i in
for i in "$@"; do
case $i in
--force)
FORCE=1
shift ;;
--force)
FORCE=1
shift ;;
+ --target-list)
+ TARGET_LIST="${i#*=}"
+ shift ;;
-./configure --target-list=x86_64-softmmu --prefix=${QEMU_INSTALL_DIR} || \
+./configure --target-list=${TARGET_LIST} --prefix=${QEMU_INSTALL_DIR} || \
{ echo "Failed to configure ${QEMU_VERSION}"; exit 1; }
make -j`nproc` || \
{ echo "Failed to compile ${QEMU_VERSION}"; exit 1; }
{ echo "Failed to configure ${QEMU_VERSION}"; exit 1; }
make -j`nproc` || \
{ echo "Failed to compile ${QEMU_VERSION}"; exit 1; }
ssh = SSH()
ssh.connect(dut_node)
ssh = SSH()
ssh.connect(dut_node)
- cmd = 'cd {0}/tests/dpdk/dpdk_scripts/ && sudo ./init_dpdk.sh {1} {2}' \
- .format(con.REMOTE_FW_DIR, pci_address1, pci_address2)
+ arch = Topology.get_node_arch(dut_node)
+ cmd = 'cd {0}/tests/dpdk/dpdk_scripts/ &&'\
+ 'sudo ./init_dpdk.sh {1} {2} {3}' .format(con.REMOTE_FW_DIR,
+ pci_address1,
+ pci_address2,
+ arch)
(ret_code, _, _) = ssh.exec_command(cmd, timeout=600)
if ret_code != 0:
(ret_code, _, _) = ssh.exec_command(cmd, timeout=600)
if ret_code != 0:
from resources.libraries.python.ssh import SSH
from resources.libraries.python.constants import Constants as con
from resources.libraries.python.ssh import SSH
from resources.libraries.python.constants import Constants as con
+from resources.libraries.python.topology import Topology
ssh.connect(dut_node)
cmd = 'cd {0}/tests/dpdk/dpdk_scripts/ && sudo ./run_l2fwd.sh {1} ' \
ssh.connect(dut_node)
cmd = 'cd {0}/tests/dpdk/dpdk_scripts/ && sudo ./run_l2fwd.sh {1} ' \
- '{2} {3} {4}'.format(con.REMOTE_FW_DIR, cpu_cores, nb_cores,
- queue_nums, jumbo_frames)
+ '{2} {3} {4} {5}'.format(con.REMOTE_FW_DIR, cpu_cores, nb_cores,
+ queue_nums, jumbo_frames,
+ Topology.get_node_arch(dut_node))
(ret_code, _, _) = ssh.exec_command(cmd, timeout=600)
if ret_code != 0:
(ret_code, _, _) = ssh.exec_command(cmd, timeout=600)
if ret_code != 0:
from resources.libraries.python.ssh import SSH
from resources.libraries.python.constants import Constants as con
from resources.libraries.python.topology import NodeType
from resources.libraries.python.ssh import SSH
from resources.libraries.python.constants import Constants as con
from resources.libraries.python.topology import NodeType
+from resources.libraries.python.topology import Topology
__all__ = ["SetupDPDKTest"]
__all__ = ["SetupDPDKTest"]
:type node: dict
:returns: nothing
"""
:type node: dict
:returns: nothing
"""
- logger.console('Install the DPDK on {0}'.format(node['host']))
+ arch = Topology.get_node_arch(node)
+ logger.console('Install the DPDK on {0} ({1})'.format(node['host'],
+ arch))
ssh = SSH()
ssh.connect(node)
(ret_code, _, stderr) = ssh.exec_command(
ssh = SSH()
ssh.connect(node)
(ret_code, _, stderr) = ssh.exec_command(
- 'cd {0}/tests/dpdk/dpdk_scripts/ && ./install_dpdk.sh'
- .format(con.REMOTE_FW_DIR), timeout=600)
+ 'cd {0}/tests/dpdk/dpdk_scripts/ && ./install_dpdk.sh {1}'
+ .format(con.REMOTE_FW_DIR, arch), timeout=600)
if ret_code != 0:
logger.error('Install the DPDK error: {0}'.format(stderr))
if ret_code != 0:
logger.error('Install the DPDK error: {0}'.format(stderr))
:rtype: bool
"""
tarball, remote_tarball, node = args
:rtype: bool
"""
tarball, remote_tarball, node = args
+
+ # if unset, arch defaults to x86_64
+ if 'arch' not in node or not node['arch']:
+ node['arch'] = 'x86_64'
+
try:
copy_tarball_to_node(tarball, node)
extract_tarball_at_node(remote_tarball, node)
try:
copy_tarball_to_node(tarball, node)
extract_tarball_at_node(remote_tarball, node)
else:
logger.console('Setup of node {0} done'.format(node['host']))
return True
else:
logger.console('Setup of node {0} done'.format(node['host']))
return True
+#pylint: enable=broad-except
def delete_local_tarball(tarball):
"""Delete local tarball to prevent disk pollution.
def delete_local_tarball(tarball):
"""Delete local tarball to prevent disk pollution.
'Executed node setups in parallel, waiting for processes to end')
result.wait()
'Executed node setups in parallel, waiting for processes to end')
result.wait()
- logger.info('Results: {0}'.format(result.get()))
+ results = result.get()
+ node_setup_success = all(results)
+ logger.info('Results: {0}'.format(results))
logger.trace('Test framework copied to all topology nodes')
delete_local_tarball(tarball)
logger.trace('Test framework copied to all topology nodes')
delete_local_tarball(tarball)
- logger.console('All nodes are ready')
-
+ if node_setup_success:
+ logger.console('All nodes are ready')
+ else:
+ logger.console('Failed to setup dpdk on all the nodes')
from resources.libraries.python.ssh import SSH, SSHTimeout
from resources.libraries.python.constants import Constants
from resources.libraries.python.ssh import SSH, SSHTimeout
from resources.libraries.python.constants import Constants
-from resources.libraries.python.topology import NodeType
+from resources.libraries.python.topology import NodeType, Topology
def __init__(self, qemu_id=1):
self._qemu_id = qemu_id
def __init__(self, qemu_id=1):
self._qemu_id = qemu_id
+ # Path to QEMU binary. Use x86_64 by default
self._qemu_bin = '/usr/bin/qemu-system-x86_64'
# QEMU Machine Protocol socket
self._qmp_sock = '/tmp/qmp{0}.sock'.format(self._qemu_id)
self._qemu_bin = '/usr/bin/qemu-system-x86_64'
# QEMU Machine Protocol socket
self._qmp_sock = '/tmp/qmp{0}.sock'.format(self._qemu_id)
self._ssh.connect(node)
self._vm_info['host'] = node['host']
self._ssh.connect(node)
self._vm_info['host'] = node['host']
+ arch = Topology.get_node_arch(node)
+ self._qemu_bin = '/usr/bin/qemu-system-{0}'.format(arch)
+
def qemu_add_vhost_user_if(self, socket, server=True, mac=None):
"""Add Vhost-user interface.
def qemu_add_vhost_user_if(self, socket, server=True, mac=None):
"""Add Vhost-user interface.
version = ' --version={0}'.format(Constants.QEMU_INSTALL_VERSION)
force = ' --force' if force_install else ''
patch = ' --patch' if apply_patch else ''
version = ' --version={0}'.format(Constants.QEMU_INSTALL_VERSION)
force = ' --force' if force_install else ''
patch = ' --patch' if apply_patch else ''
+ arch = Topology.get_node_arch(node)
+ target_list = ' --target-list={0}-softmmu'.format(arch)
(ret_code, stdout, stderr) = \
ssh.exec_command(
(ret_code, stdout, stderr) = \
ssh.exec_command(
- "sudo -E sh -c '{0}/{1}/qemu_build.sh{2}{3}{4}{5}'"\
+ "sudo -E sh -c '{0}/{1}/qemu_build.sh{2}{3}{4}{5}{6}'"\
.format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH,
.format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH,
- version, directory, force, patch), 1000)
+ version, directory, force, patch, target_list), 1000)
if int(ret_code) != 0:
logger.debug('QEMU build failed {0}'.format(stdout + stderr))
if int(ret_code) != 0:
logger.debug('QEMU build failed {0}'.format(stdout + stderr))
from resources.libraries.python.ssh import SSH
from resources.libraries.python.TLDK.TLDKConstants import TLDKConstants as con
from resources.libraries.python.ssh import SSH
from resources.libraries.python.TLDK.TLDKConstants import TLDKConstants as con
-from resources.libraries.python.topology import NodeType
+from resources.libraries.python.topology import NodeType, Topology
from resources.libraries.python.TLDK.gen_pcap import gen_all_pcap
__all__ = ["SetupTLDKTest"]
from resources.libraries.python.TLDK.gen_pcap import gen_all_pcap
__all__ = ["SetupTLDKTest"]
:returns: nothing.
:raises RuntimeError: If install tldk failed.
"""
:returns: nothing.
:raises RuntimeError: If install tldk failed.
"""
- logger.console('Install the TLDK on {0}'.format(node['host']))
+
+ arch = Topology.get_node_arch(node)
+ logger.console('Install the TLDK on {0} ({1})'.format(node['host'],
+ arch))
ssh = SSH()
ssh.connect(node)
(ret_code, _, stderr) = ssh.exec_command(
ssh = SSH()
ssh.connect(node)
(ret_code, _, stderr) = ssh.exec_command(
- 'cd {0}/{1} && ./install_tldk.sh'
- .format(con.REMOTE_FW_DIR, con.TLDK_SCRIPTS), timeout=600)
+ 'cd {0}/{1} && ./install_tldk.sh {2}'
+ .format(con.REMOTE_FW_DIR, con.TLDK_SCRIPTS, arch), timeout=600)
if ret_code != 0:
logger.error('Install the TLDK error: {0}'.format(stderr))
if ret_code != 0:
logger.error('Install the TLDK error: {0}'.format(stderr))
:raises RuntimeError: If node setup failed.
"""
tarball, remote_tarball, node = args
:raises RuntimeError: If node setup failed.
"""
tarball, remote_tarball, node = args
+
+ # if unset, arch defaults to x86_64
+ Topology.get_node_arch(node)
+
try:
copy_tarball_to_node(tarball, node)
extract_tarball_at_node(remote_tarball, node)
try:
copy_tarball_to_node(tarball, node)
extract_tarball_at_node(remote_tarball, node)
'Executed node setups in parallel, waiting for processes to end')
result.wait()
'Executed node setups in parallel, waiting for processes to end')
result.wait()
- logger.info('Results: {0}'.format(result.get()))
+ results = result.get()
+ node_setup_success = all(results)
+ logger.info('Results: {0}'.format(results))
logger.trace('Test framework copied to all topology nodes')
delete_local_tarball(tarball)
logger.trace('Test framework copied to all topology nodes')
delete_local_tarball(tarball)
- logger.console('All nodes are ready')
+ if node_setup_success:
+ logger.console('All nodes are ready')
+ else:
+ logger.console('Failed to setup dpdk on all the nodes')
"""
__VAT_PROMPT = ("vat# ", )
"""
__VAT_PROMPT = ("vat# ", )
- __LINUX_PROMPT = (":~$ ", "~]$ ")
+ __LINUX_PROMPT = (":~$ ", "~]$ ", "~]# ")
def __init__(self, node, json_param=True):
json_text = ' json' if json_param else ''
def __init__(self, node, json_param=True):
json_text = ' json' if json_param else ''
chan.set_combine_stderr(True)
buf = ''
chan.set_combine_stderr(True)
buf = ''
- while not buf.endswith((":~$ ", "~]$ ")):
+ while not buf.endswith((":~$ ", "~]$ ", "~]# ")):
try:
chunk = chan.recv(self.__MAX_RECV_BUF)
if not chunk:
try:
chunk = chan.recv(self.__MAX_RECV_BUF)
if not chunk:
+ @staticmethod
+ def get_node_arch(node):
+ """Return arch of the node.
+ Default to x86_64 if no arch present
+
+ :param node: Node created from topology.
+ :type node: dict
+ :returns: Node architecture
+ :rtype: str
+ """
+ try:
+ return node['arch']
+ except KeyError:
+ node['arch'] = 'x86_64'
+ return 'x86_64'
+
@staticmethod
def get_cryptodev(node):
"""Return Crytodev configuration of the node.
@staticmethod
def get_cryptodev(node):
"""Return Crytodev configuration of the node.
TG:
type: TG
host: "{topology[tg1][nic-management][ip-addr]}"
TG:
type: TG
host: "{topology[tg1][nic-management][ip-addr]}"
port: 22
username: cisco
priv_key: |
port: 22
username: cisco
priv_key: |
DUT1:
type: DUT
host: "{topology[sut1][nic-management][ip-addr]}"
DUT1:
type: DUT
host: "{topology[sut1][nic-management][ip-addr]}"
port: 22
username: cisco
honeycomb:
port: 22
username: cisco
honeycomb:
DUT2:
type: DUT
host: "{topology[sut2][nic-management][ip-addr]}"
DUT2:
type: DUT
host: "{topology[sut2][nic-management][ip-addr]}"
port: 22
username: cisco
honeycomb:
port: 22
username: cisco
honeycomb:
TG:
type: TG
host: "{topology[tg1][nic-management][ip-addr]}"
TG:
type: TG
host: "{topology[tg1][nic-management][ip-addr]}"
port: 22
username: cisco
priv_key: |
port: 22
username: cisco
priv_key: |
DUT1:
type: DUT
host: "{topology[sut1][nic-management][ip-addr]}"
DUT1:
type: DUT
host: "{topology[sut1][nic-management][ip-addr]}"
port: 22
username: cisco
honeycomb:
port: 22
username: cisco
honeycomb:
DUT2:
type: DUT
host: "{topology[sut2][nic-management][ip-addr]}"
DUT2:
type: DUT
host: "{topology[sut2][nic-management][ip-addr]}"
port: 22
username: cisco
honeycomb:
port: 22
username: cisco
honeycomb:
TG:
type: TG
host: "{topology[tg1][nic-management][ip-addr]}"
TG:
type: TG
host: "{topology[tg1][nic-management][ip-addr]}"
port: 22
username: cisco
priv_key: |
port: 22
username: cisco
priv_key: |
DUT1:
type: DUT
host: "{topology[sut1][nic-management][ip-addr]}"
DUT1:
type: DUT
host: "{topology[sut1][nic-management][ip-addr]}"
port: 22
username: cisco
honeycomb:
port: 22
username: cisco
honeycomb:
DUT2:
type: DUT
host: "{topology[sut2][nic-management][ip-addr]}"
DUT2:
type: DUT
host: "{topology[sut2][nic-management][ip-addr]}"
port: 22
username: cisco
honeycomb:
port: 22
username: cisco
honeycomb:
DPDK_VERSION=dpdk-17.11
ROOTDIR=/tmp/openvpp-testing
PWDDIR=$(pwd)
DPDK_VERSION=dpdk-17.11
ROOTDIR=/tmp/openvpp-testing
PWDDIR=$(pwd)
+
+# set arch, default to x86_64 if none given
+ARCH=${3:-"x86_64"}
+
+# dpdk prefers "arm64" to "aarch64" and does not allow arm64 native target
+if [ $ARCH == "aarch64" ]; then
+ ARCH="arm64"
+ MACHINE="armv8a"
+else
+ MACHINE="native"
+fi
+
cd ${ROOTDIR}/${DPDK_VERSION}/
modprobe uio
cd ${ROOTDIR}/${DPDK_VERSION}/
modprobe uio
{ echo "Failed to remove uio_pci_generic module"; exit 1; }
fi
{ echo "Failed to remove uio_pci_generic module"; exit 1; }
fi
-insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko || \
+insmod ./${ARCH}-${MACHINE}-linuxapp-gcc/kmod/igb_uio.ko || \
{ echo "Failed to insert igb_uio module"; exit 1; }
# Binding
{ echo "Failed to insert igb_uio module"; exit 1; }
# Binding
set -x
# Setting variables
set -x
# Setting variables
+
+# set arch, default to x86_64 if none given
+ARCH=${1:-"x86_64"}
+
+
+# dpdk prefers "arm64" to "aarch64" and does not allow arm64 native target
+if [ $ARCH == "aarch64" ]; then
+ ARCH="arm64"
+ MACHINE="armv8a"
+else
+ MACHINE="native"
+fi
+
DPDK_VERSION=dpdk-17.11
DPDK_DIR=${DPDK_VERSION}
DPDK_PACKAGE=${DPDK_DIR}.tar.xz
DPDK_VERSION=dpdk-17.11
DPDK_DIR=${DPDK_VERSION}
DPDK_PACKAGE=${DPDK_DIR}.tar.xz
# Compile the DPDK
cd ./${DPDK_DIR}
sudo sed -i 's/^CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n/CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=y/g' ./config/common_base
# Compile the DPDK
cd ./${DPDK_DIR}
sudo sed -i 's/^CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n/CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=y/g' ./config/common_base
-make install T=x86_64-native-linuxapp-gcc -j || \
+make install T=${ARCH}-${MACHINE}-linuxapp-gcc -j || \
{ echo "Failed to compile $DPDK_VERSION"; exit 1; }
cd ${PWDDIR}
# Compile the l3fwd
export RTE_SDK=${ROOTDIR}/${DPDK_DIR}/
{ echo "Failed to compile $DPDK_VERSION"; exit 1; }
cd ${PWDDIR}
# Compile the l3fwd
export RTE_SDK=${ROOTDIR}/${DPDK_DIR}/
-export RTE_TARGET=x86_64-native-linuxapp-gcc
+export RTE_TARGET=${ARCH}-${MACHINE}-linuxapp-gcc
cd ${RTE_SDK}/examples/l3fwd
sudo sed -i 's/^#define RTE_TEST_RX_DESC_DEFAULT 128/#define RTE_TEST_RX_DESC_DEFAULT 2048/g' ./main.c
sudo sed -i 's/^#define RTE_TEST_TX_DESC_DEFAULT 512/#define RTE_TEST_TX_DESC_DEFAULT 2048/g' ./main.c
cd ${RTE_SDK}/examples/l3fwd
sudo sed -i 's/^#define RTE_TEST_RX_DESC_DEFAULT 128/#define RTE_TEST_RX_DESC_DEFAULT 2048/g' ./main.c
sudo sed -i 's/^#define RTE_TEST_TX_DESC_DEFAULT 512/#define RTE_TEST_TX_DESC_DEFAULT 2048/g' ./main.c
nb_cores=$2
queue_nums=$3
jumbo_frames=$4
nb_cores=$2
queue_nums=$3
jumbo_frames=$4
+arch=${5:-"x86_64"}
+
+# dpdk prefers "arm64" to "aarch64" and does not allow arm64 native target
+if [ $arch == "aarch64" ]; then
+ arch="arm64"
+ machine="armv8a"
+else
+ machine="native"
+fi
# Try to kill the testpmd
sudo pgrep testpmd
# Try to kill the testpmd
sudo pgrep testpmd
cd ${ROOTDIR}/${DPDK_VERSION}/
rm -f ${TESTPMDLOG}
cd ${ROOTDIR}/${DPDK_VERSION}/
rm -f ${TESTPMDLOG}
+TESTPMD_BIN=./${arch}-${machine}-linuxapp-gcc/app/testpmd
+
if [ "$jumbo_frames" = "yes" ]; then
if [ "$jumbo_frames" = "yes" ]; then
- sudo sh -c "screen -dmSL DPDK-test ./x86_64-native-linuxapp-gcc/app/testpmd \
+ sudo sh -c "screen -dmSL DPDK-test $TESTPMD_BIN \
-l ${cpu_corelist} -n 4 -- \
--numa \
--nb-ports=2 \
-l ${cpu_corelist} -n 4 -- \
--numa \
--nb-ports=2 \
--disable-link-check \
--auto-start"
else
--disable-link-check \
--auto-start"
else
- sudo sh -c "screen -dmSL DPDK-test ./x86_64-native-linuxapp-gcc/app/testpmd \
+ sudo sh -c "screen -dmSL DPDK-test $TESTPMD_BIN \
-l ${cpu_corelist} -n 4 -- \
--numa \
--nb-ports=2 \
-l ${cpu_corelist} -n 4 -- \
--numa \
--nb-ports=2 \
+# set arch, default to x86_64 if none given
+ARCH=${1:-"x86_64"}
+
+# dpdk prefers "arm64" to "aarch64" and does not allow arm64 native target
+if [ $ARCH == "aarch64" ]; then
+ ARCH="arm64"
+ MACHINE="armv8a"
+else
+ MACHINE="native"
+fi
+
DPDK_VERSION=16.11.1
ROOTDIR=/tmp/TLDK-testing
DPDK_VERSION=16.11.1
ROOTDIR=/tmp/TLDK-testing
echo ${DPDK_PACKAGE}
cd ./${DPDK_DIR}
sudo sed -i 's/^CONFIG_RTE_LIBRTE_PMD_PCAP=n/CONFIG_RTE_LIBRTE_PMD_PCAP=y/g' ./config/common_base
echo ${DPDK_PACKAGE}
cd ./${DPDK_DIR}
sudo sed -i 's/^CONFIG_RTE_LIBRTE_PMD_PCAP=n/CONFIG_RTE_LIBRTE_PMD_PCAP=y/g' ./config/common_base
-sudo make install T=x86_64-native-linuxapp-gcc
+sudo make install T=${ARCH}-${MACHINE}-linuxapp-gcc
cd ${PWDDIR}
# compile the TLDK
export RTE_SDK=${ROOTDIR}/${DPDK_DIR}/
cd ${PWDDIR}
# compile the TLDK
export RTE_SDK=${ROOTDIR}/${DPDK_DIR}/
-export RTE_TARGET=x86_64-native-linuxapp-gcc
+export RTE_TARGET=${ARCH}-${MACHINE}-linuxapp-gcc
cd ${ROOTDIR}/tldk
make all
cd ${PWDDIR}
cd ${ROOTDIR}/tldk
make all
cd ${PWDDIR}
+# set arch, default to x86_64 if none given
+ARCH=${1:-"x86_64"}
+
+# dpdk prefers "arm64" to "aarch64" and does not allow arm64 native target
+if [ $ARCH == "aarch64" ]; then
+ ARCH="arm64"
+ MACHINE="armv8a"
+else
+ MACHINE="native"
+fi
+
ROOTDIR=/tmp/TLDK-testing
PWDDIR=$(pwd)
ROOTDIR=/tmp/TLDK-testing
PWDDIR=$(pwd)
# need to install libpcap, libpcap-dev to use --vdev
cd ${ROOTDIR}
if [ "$IPv6_addr" == "NONE" ]; then
# need to install libpcap, libpcap-dev to use --vdev
cd ${ROOTDIR}
if [ "$IPv6_addr" == "NONE" ]; then
-sudo sh -c "nohup ./tldk/x86_64-native-linuxapp-gcc/app/l4fwd --lcore='0' \
+sudo sh -c "nohup ./tldk/${ARCH}-${MACHINE}-linuxapp-gcc/app/l4fwd --lcore='0' \
-n 2 --vdev 'eth_pcap1,rx_pcap=${rx_file},tx_pcap=${tx_file}' \
-b ${nic_pci} -- -P -U -R 0x1000 -S 0x1000 -s 0x20 -f ${fe_cfg} -b ${be_cfg} \
port=0,lcore=0,rx_offload=0,tx_offload=0,ipv4=${IPv4_addr} &"
elif [ "$IPv4_addr" == "NONE" ]; then
-n 2 --vdev 'eth_pcap1,rx_pcap=${rx_file},tx_pcap=${tx_file}' \
-b ${nic_pci} -- -P -U -R 0x1000 -S 0x1000 -s 0x20 -f ${fe_cfg} -b ${be_cfg} \
port=0,lcore=0,rx_offload=0,tx_offload=0,ipv4=${IPv4_addr} &"
elif [ "$IPv4_addr" == "NONE" ]; then
-sudo sh -c "nohup ./tldk/x86_64-native-linuxapp-gcc/app/l4fwd --lcore='0' \
+sudo sh -c "nohup ./tldk/${ARCH}-${MACHINE}-linuxapp-gcc/app/l4fwd --lcore='0' \
-n 2 --vdev 'eth_pcap1,rx_pcap=${rx_file},tx_pcap=${tx_file}' \
-b ${nic_pci} -- -P -U -R 0x1000 -S 0x1000 -s 0x20 -f ${fe_cfg} -b ${be_cfg} \
port=0,lcore=0,rx_offload=0,tx_offload=0,ipv6=${IPv6_addr} &"
-n 2 --vdev 'eth_pcap1,rx_pcap=${rx_file},tx_pcap=${tx_file}' \
-b ${nic_pci} -- -P -U -R 0x1000 -S 0x1000 -s 0x20 -f ${fe_cfg} -b ${be_cfg} \
port=0,lcore=0,rx_offload=0,tx_offload=0,ipv6=${IPv6_addr} &"
TG:
type: TG
host: 10.0.0.3
TG:
type: TG
host: 10.0.0.3
port: 22
username: lab
password: lab
port: 22
username: lab
password: lab
DUT1:
type: DUT
host: 10.0.0.1
DUT1:
type: DUT
host: 10.0.0.1
port: 22
username: lab
password: lab
port: 22
username: lab
password: lab
DUT2:
type: DUT
host: 10.0.0.2
DUT2:
type: DUT
host: 10.0.0.2
port: 22
username: lab
password: lab
port: 22
username: lab
password: lab
type: TG
subtype: TREX
host: "10.30.51.16"
type: TG
subtype: TREX
host: "10.30.51.16"
port: 22
username: testuser
password: Csit1234
port: 22
username: testuser
password: Csit1234
DUT1:
type: DUT
host: "10.30.51.17"
DUT1:
type: DUT
host: "10.30.51.17"
port: 22
username: testuser
password: Csit1234
port: 22
username: testuser
password: Csit1234
DUT2:
type: DUT
host: "10.30.51.18"
DUT2:
type: DUT
host: "10.30.51.18"
port: 22
username: testuser
password: Csit1234
port: 22
username: testuser
password: Csit1234
type: TG
subtype: TREX
host: "10.30.51.20"
type: TG
subtype: TREX
host: "10.30.51.20"
port: 22
username: testuser
password: Csit1234
port: 22
username: testuser
password: Csit1234
DUT1:
type: DUT
host: "10.30.51.21"
DUT1:
type: DUT
host: "10.30.51.21"
port: 22
username: testuser
password: Csit1234
port: 22
username: testuser
password: Csit1234
DUT2:
type: DUT
host: "10.30.51.22"
DUT2:
type: DUT
host: "10.30.51.22"
port: 22
username: testuser
password: Csit1234
port: 22
username: testuser
password: Csit1234
type: TG
subtype: TREX
host: "10.30.51.24"
type: TG
subtype: TREX
host: "10.30.51.24"
port: 22
username: testuser
password: Csit1234
port: 22
username: testuser
password: Csit1234
DUT1:
type: DUT
host: "10.30.51.25"
DUT1:
type: DUT
host: "10.30.51.25"
port: 22
username: testuser
password: Csit1234
port: 22
username: testuser
password: Csit1234
DUT2:
type: DUT
host: "10.30.51.26"
DUT2:
type: DUT
host: "10.30.51.26"
port: 22
username: testuser
password: Csit1234
port: 22
username: testuser
password: Csit1234
TG:
type: TG
host: "192.168.255.100"
TG:
type: TG
host: "192.168.255.100"
port: 22
username: csit
password: csit
port: 22
username: csit
password: csit
DUT1:
type: DUT
host: "192.168.255.101"
DUT1:
type: DUT
host: "192.168.255.101"
port: 22
username: csit
password: csit
port: 22
username: csit
password: csit
DUT2:
type: DUT
host: "192.168.255.102"
DUT2:
type: DUT
host: "192.168.255.102"
port: 22
username: csit
password: csit
port: 22
username: csit
password: csit
TG:
type: TG
host: "10.30.51.73"
TG:
type: TG
host: "10.30.51.73"
port: 22
username: cisco
priv_key: |
port: 22
username: cisco
priv_key: |
DUT1:
type: DUT
host: "10.30.51.72"
DUT1:
type: DUT
host: "10.30.51.72"
port: 22
username: cisco
priv_key: |
port: 22
username: cisco
priv_key: |
DUT2:
type: DUT
host: "10.30.51.71"
DUT2:
type: DUT
host: "10.30.51.71"
port: 22
username: cisco
priv_key: |
port: 22
username: cisco
priv_key: |