Test VIRL connection. 90/290/76
authorStefan Kobza <skobza@cisco.com>
Wed, 10 Feb 2016 18:41:09 +0000 (19:41 +0100)
committerGerrit Code Review <gerrit@fd.io>
Wed, 17 Feb 2016 16:29:03 +0000 (16:29 +0000)
Change-Id: I812ff9c8c9669b63907469c643c839e8bd3b419a
Signed-off-by: Stefan Kobza <skobza@cisco.com>
17 files changed:
README
bootstrap.sh
resources/libraries/bash/dut_setup.sh
resources/libraries/python/DUTSetup.py
resources/libraries/python/IPv6Setup.py
resources/libraries/python/PacketVerifier.py
resources/libraries/python/ssh.py
resources/libraries/python/topology.py
resources/libraries/robot/ipv4.robot
resources/libraries/robot/ipv6.robot
resources/traffic_scripts/ipv4_ping_ttl_check.py
tests/suites/__init__.robot
tests/suites/bridge_domain/test.robot
tests/suites/ipv6/ipv6_untagged.robot
tests/suites/l2_xconnect/l2_xconnect_untagged.robot
tests/suites/performance/short.robot
topologies/available/virl.yaml [new file with mode: 0644]

diff --git a/README b/README
index f3d9397..f8c478e 100644 (file)
--- a/README
+++ b/README
@@ -7,7 +7,7 @@
  - install python requirements for this project by executing:
     # pip install -r requirements.txt
  - make sure user mentioned in topology.py has NOPASSWD sudo access to
-    vpe_api_test
+    vpp_api_test
 
 
  Done.
index 4a91121..01b7968 100755 (executable)
@@ -1,5 +1,42 @@
 #!/bin/bash
-set -euf -o pipefail
+#set -xeuf -o pipefail
+set -x
+
+#sudo apt-get -y install libpython2.7-dev
+
+rm -f priv_key
+cat > priv_key <<EOF
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpgIBAAKCAQEAwUDlTpzSHpwLQotZOFS4AgcPNEWCnP1AB2hWFmvI+8Kah/gb
+v8ruZU9RqhPs56tyKzxbhvNkY4VbH5F1GilHZu3mLqzM4KfghMmaeMEjO1T7BYYd
+vuBfTvIluljfQ2vAlnYrDwn+ClxJk81m0pDgvrLEX4qVVh2sGh7UEkYy5r82DNa2
+4VjzPB1J/c8a9zP8FoZUhYIzF4FLvRMjUADpbMXgJMsGpaZLmz95ap0Eot7vb1Cc
+1LvF97iyBCrtIOSKRKA50ZhLGjMKmOwnYU+cP5718tbproDVi6VJOo7zeuXyetMs
+8YBl9kWblWG9BqP9jctFvsmi5G7hXgq1Y8u+DwIDAQABAoIBAQC/W4E0DHjLMny7
+0bvw2YKzD0Zw3fttdB94tkm4PdZv5MybooPnsAvLaXVV0hEdfVi5kzSWNl/LY/tN
+EP1BgGphc2QgB59/PPxGwFIjDCvUzlsZpynBHe+B/qh5ExNQcVvsIOqWI7DXlXaN
+0i/khOzmJ6HncRRah1spKimYRsaUUDskyg7q3QqMWVaqBbbMvLs/w7ZWd/zoDqCU
+MY/pCI6hkB3QbRo0OdiZLohphBl2ShABTwjvVyyKL5UA4jAEneJrhH5gWVLXnfgD
+p62W5CollKEYblC8mUkPxpP7Qo277zw3xaq+oktIZhc5SUEUd7nJZtNqVAHqkItW
+79VmpKyxAoGBAPfU+kqNPaTSvp+x1n5sn2SgipzDtgi9QqNmC4cjtrQQaaqI57SG
+OHw1jX8i7L2G1WvVtkHg060nlEVo5n65ffFOqeVBezLVJ7ghWI8U+oBiJJyQ4boD
+GJVNsoOSUQ0rtuGd9eVwfDk3ol9aCN0KK53oPfIYli29pyu4l095kg11AoGBAMef
+bPEMBI/2XmCPshLSwhGFl+dW8d+Klluj3CUQ/0vUlvma3dfBOYNsIwAgTP0iIUTg
+8DYE6KBCdPtxAUEI0YAEAKB9ry1tKR2NQEIPfslYytKErtwjAiqSi0heM6+zwEzu
+f54Z4oBhsMSL0jXoOMnu+NZzEc6EUdQeY4O+jhjzAoGBAIogC3dtjMPGKTP7+93u
+UE/XIioI8fWg9fj3sMka4IMu+pVvRCRbAjRH7JrFLkjbUyuMqs3Arnk9K+gbdQt/
++m95Njtt6WoFXuPCwgbM3GidSmZwYT4454SfDzVBYScEDCNm1FuR+8ov9bFLDtGT
+D4gsngnGJj1MDFXTxZEn4nzZAoGBAKCg4WmpUPaCuXibyB+rZavxwsTNSn2lJ83/
+sYJGBhf/raiV/FLDUcM1vYg5dZnu37RsB/5/vqxOLZGyYd7x+Jo5HkQGPnKgNwhn
+g8BkdZIRF8uEJqxOo0ycdOU7n/2O93swIpKWo5LIiRPuqqzj+uZKnAL7vuVdxfaY
+qVz2daMPAoGBALgaaKa3voU/HO1PYLWIhFrBThyJ+BQSQ8OqrEzC8AnegWFxRAM8
+EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD
+BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN
+-----END RSA PRIVATE KEY-----
+EOF
+
+chmod 600 priv_key
+ls -la
 
 #git clone ssh://rotterdam-jobbuilder@gerrit.fd.io:29418/vpp
 #
@@ -9,45 +46,58 @@ set -euf -o pipefail
 #
 #ls -la
 
-#set -x
+#VIRL_VMS="10.30.51.53,10.30.51.51,10.30.51.52"
+#IFS=',' read -ra ADDR <<< "${VIRL_VMS}"
 #
-#ping 10.30.51.17 -w 3 || true
-#ping 10.30.51.18 -w 3 || true
-#ping 10.30.51.16 -w 3 || true
-#ping 10.30.51.21 -w 3 || true
-#ping 10.30.51.22 -w 3 || true
-#ping 10.30.51.20 -w 3 || true
-#ping 10.30.51.25 -w 3 || true
-#ping 10.30.51.26 -w 3 || true
-#ping 10.30.51.24 -w 3 || true
-
-
-exit 0
+function ssh_do() {
+    echo
+    echo "### "  ssh $@
+    ssh -i priv_key -o StrictHostKeyChecking=no $@
+}
 
-#IFS=',' read -ra ADDR <<< "${JCLOUDS_IPS}"
-#
-#function ssh_do() {
-#    echo
-#    echo "### "  ssh $@
-#    ssh $@
-#}
-#
-#
-#set
-#
 #for addr in "${ADDR[@]}"; do
 #    echo
 #    echo ${addr}
 #    echo
 #
-#    ssh_do localadmin@${addr} hostname || true
-#    ssh_do localadmin@${addr} ifconfig -a || true
-#    ssh_do localadmin@${addr} lspci -Dnn || true
-#    ssh_do localadmin@${addr} "lspci -Dnn | grep 0200" || true
-#    ssh_do localadmin@${addr} free -m || true
-#    ssh_do localadmin@${addr} cat /proc/meminfo || true
+#    ssh_do cisco@${addr} hostname || true
+#    ssh_do cisco@${addr} "ifconfig -a" || true
+#    ssh_do cisco@${addr} "lspci -Dnn | grep 0200" || true
+#    ssh_do cisco@${addr} "free -m" || true
+#    ssh_do cisco@${addr} "cat /proc/meminfo" || true
+#    ssh_do cisco@${addr} "dpkg -l vpp\*" || true
+#    ssh_do cisco@${addr} "lshw -c network" || true
+#    ssh_do cisco@${addr} "sudo -S sh -c 'echo exec show  hardware | vpp_api_test '"
 #done
 
 
+#ssh_do cisco@10.30.51.73 "sudo apt-get -y install python-virtualenv python-dev"
+#ssh_do cisco@10.30.51.73 "lspci -vmmks 0000:00:04.0"
+
+#ssh_do cisco@10.30.51.72 "sudo -S sh -c 'echo exec show  hardware | vpp_api_test '"
+#ssh_do cisco@10.30.51.71 "sudo -S sh -c 'echo exec show  hardware | vpp_api_test '"
+
+
+#echo Virtualenv install
+#VE_DIR=`pwd`/build
+#export PYTHONPATH=${VE_DIR}/lib/python2.7/site-packages
+#
+#curl -O https://pypi.python.org/packages/source/v/virtualenv/virtualenv-14.0.6.tar.gz
+#tar -zxf virtualenv-14.0.6.tar.gz
+#cd virtualenv-14.0.6
+#
+#python setup.py install --prefix=${VE_DIR}
+#
+#cd ..
+#${VE_DIR}/bin/virtualenv env
+
+virtualenv env
+. env/bin/activate
+
+echo pip install
+pip install -r requirements.txt
+
+#PYTHONPATH=`pwd` pybot -L TRACE -v TOPOLOGY_PATH:topologies/available/virl.yaml --exitonfailure --exitonerror --skipteardownonexit tests
+PYTHONPATH=`pwd` pybot -L TRACE -v TOPOLOGY_PATH:topologies/available/virl.yaml --exclude PERFTEST tests || true
 
 
index 0309228..077fdcc 100644 (file)
@@ -37,3 +37,8 @@ echo See free memory
 echo
 free -m
 
+echo UUID
+sudo dmidecode | grep UUID
+
+echo Add dpdk-input trace
+sudo vpp_api_test <<< "exec trace add dpdk-input 100"
index 76f76ae..e5e5678 100644 (file)
@@ -20,6 +20,20 @@ class DUTSetup(object):
     def __init__(self):
         pass
 
+    def start_vpp_service_on_all_duts(self, nodes):
+        """Start up the VPP service on all nodes."""
+        ssh = SSH()
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                ssh.connect(node)
+                (ret_code, stdout, stderr) = \
+                        ssh.exec_command_sudo('service vpp restart')
+                if 0 != int(ret_code):
+                    logger.debug('stdout: {0}'.format(stdout))
+                    logger.debug('stderr: {0}'.format(stderr))
+                    raise Exception('DUT {0} failed to start VPP service'.
+                            format(node['host']))
+
     def setup_all_duts(self, nodes):
         """Prepare all DUTs in given topology for test execution."""
         for node in nodes.values():
@@ -35,7 +49,7 @@ class DUTSetup(object):
                 Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH))
         logger.trace(stdout)
         if 0 != int(ret_code):
-            logger.error('DUT {0} setup script failed: "{1}"'.
+            logger.debug('DUT {0} setup script failed: "{1}"'.
                     format(node['host'], stdout + stderr))
             raise Exception('DUT test setup script failed at node {}'.
                     format(node['host']))
index db49775..edd5d2c 100644 (file)
@@ -18,6 +18,7 @@ from ipaddress import IPv6Network
 from topology import NodeType
 from topology import Topology
 from constants import Constants
+from robot.api import logger
 
 
 class IPv6Networks(object):
@@ -177,6 +178,12 @@ class IPv6Setup(object):
             raise Exception('VPP sw_interface_set_flags failed on {h}'.format(
                 h=node['host']))
 
+        cmd_input = 'exec show int'
+        (ret_code, stdout, stderr) = ssh.exec_command_sudo(cmd, cmd_input)
+        logger.debug('ret: {0}'.format(ret_code))
+        logger.debug('stdout: {0}'.format(stdout))
+        logger.debug('stderr: {0}'.format(stderr))
+
     @staticmethod
     def vpp_del_if_ipv6_addr(node, interface, addr, prefix):
         """Delete IPv6 address on VPP.
index 566ea95..19d6aaf 100644 (file)
@@ -232,6 +232,7 @@ class RxQueue(PacketVerifier):
             return None
 
         pkt = self._sock.recv(0x7fff)
+        pkt_pad = auto_pad(pkt)
         print 'Received packet on {0} of len {1}'.format(self._ifname, len(pkt))
         Ether(pkt).show2()
         print
@@ -241,7 +242,7 @@ class RxQueue(PacketVerifier):
                 # Auto pad all packets in ignore list
                 ignore[i] = auto_pad(ig_pkt)
             for ig_pkt in ignore:
-                if ig_pkt == pkt:
+                if ig_pkt == pkt_pad:
                     # Found the packet in ignore list, get another one
                     # TODO: subtract timeout - time_spent in here
                     ignore.remove(ig_pkt)
index 385619c..a2bb9b1 100644 (file)
@@ -11,6 +11,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import paramiko
+from paramiko import RSAKey
+import StringIO
 from scp import SCPClient
 from time import time
 from robot.api import logger
@@ -28,9 +30,7 @@ class SSH(object):
     __existing_connections = {}
 
     def __init__(self):
-        self._ssh = paramiko.SSHClient()
-        self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-        self._hostname = None
+        pass
 
     def _node_hash(self, node):
         return hash(frozenset([node['host'], node['port']]))
@@ -40,31 +40,47 @@ class SSH(object):
 
         If there already is a connection to the node, this method reuses it.
         """
-        self._hostname = node['host']
         node_hash = self._node_hash(node)
-        if node_hash in self.__existing_connections:
-            self._ssh = self.__existing_connections[node_hash]
+        if node_hash in SSH.__existing_connections:
+            self._ssh = SSH.__existing_connections[node_hash]
+            logger.debug('reusing ssh: {0}'.format(self._ssh))
         else:
             start = time()
+            pkey = None
+            if 'priv_key' in node:
+                pkey = RSAKey.from_private_key(
+                        StringIO.StringIO(node['priv_key']))
+
+            self._ssh = paramiko.SSHClient()
+            self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
             self._ssh.connect(node['host'], username=node['username'],
-                              password=node['password'])
-            self.__existing_connections[node_hash] = self._ssh
+                              password=node.get('password'), pkey=pkey)
+
+            SSH.__existing_connections[node_hash] = self._ssh
+
             logger.trace('connect took {} seconds'.format(time() - start))
+            logger.debug('new ssh: {0}'.format(self._ssh))
+
+        logger.debug('Connect peer: {0}'.
+                format(self._ssh.get_transport().getpeername()))
+        logger.debug('Connections: {0}'.format(str(SSH.__existing_connections)))
 
     def exec_command(self, cmd, timeout=10):
         """Execute SSH command on a new channel on the connected Node.
 
         Returns (return_code, stdout, stderr).
         """
-        logger.trace('exec_command on {0}: {1}'.format(self._hostname, cmd))
+        logger.trace('exec_command on {0}: {1}'
+                .format(self._ssh.get_transport().getpeername(), cmd))
         start = time()
         chan = self._ssh.get_transport().open_session()
         if timeout is not None:
             chan.settimeout(int(timeout))
         chan.exec_command(cmd)
         end = time()
-        logger.trace('exec_command "{0}" on {1} took {2} seconds'.format(
-            cmd, self._hostname, end-start))
+        logger.trace('exec_command on {0} took {1} seconds'.format(
+            self._ssh.get_transport().getpeername(), end-start))
 
         stdout = ""
         while True:
@@ -99,9 +115,9 @@ class SSH(object):
             >>> ssh = SSH()
             >>> ssh.connect(node)
             >>> #Execute command without input (sudo -S cmd)
-            >>> ssh.exex_command_sudo("ifconfig eth0 down")
+            >>> ssh.exec_command_sudo("ifconfig eth0 down")
             >>> #Execute command with input (sudo -S cmd <<< "input")
-            >>> ssh.exex_command_sudo("vpp_api_test", "dump_interface_table")
+            >>> ssh.exec_command_sudo("vpp_api_test", "dump_interface_table")
         """
         if cmd_input is None:
             command = 'sudo -S {c}'.format(c=cmd)
@@ -182,7 +198,7 @@ class SSH(object):
         connect() method has to be called first!
         """
         logger.trace('SCP {0} to {1}:{2}'.format(
-            local_path, self._hostname, remote_path))
+            local_path, self._ssh.get_transport().getpeername(), remote_path))
         # SCPCLient takes a paramiko transport as its only argument
         scp = SCPClient(self._ssh.get_transport())
         start = time()
@@ -229,7 +245,7 @@ def exec_cmd_no_error(node, cmd, timeout=None, sudo=False):
 
     Returns (stdout, stderr).
     """
-    (rc, stdout, stderr) = exec_cmd(node,cmd, timeout=timeout, sudo=sudo)
+    (rc, stdout, stderr) = exec_cmd(node, cmd, timeout=timeout, sudo=sudo)
     assert_equal(rc, 0, 'Command execution failed: "{}"\n{}'.
                  format(cmd, stderr))
     return (stdout, stderr)
index 8b6905d..1034160 100644 (file)
@@ -195,7 +195,7 @@ class Topology(object):
 
         interface_dict = {}
         list_mac_address = self.convert_mac_to_number_list(mac_address)
-        logger.trace(list_mac_address.__str__())
+        logger.trace(str(list_mac_address))
         for interface in interfaces_list:
             # TODO: create vat json integrity checking and move there
             if "l2_address" not in interface:
@@ -254,6 +254,9 @@ class Topology(object):
             if_mac = ifc['mac_address']
             interface_dict = self._extract_vpp_interface_by_mac(interface_list,
                                                                 if_mac)
+            if not interface_dict:
+                raise Exception('Interface {0} not found by MAC {1}'.
+                        format(ifc, if_mac))
             ifc['name'] = interface_dict["interface_name"]
             ifc['vpp_sw_index'] = interface_dict["sw_if_index"]
 
index a5745a8..c271de5 100644 (file)
@@ -45,6 +45,7 @@
 | | Setup IPv4 adresses on all DUT nodes in topology | ${nodes} | ${nodes_ipv4_addr}
 | | Setup ARP on all DUTs | ${nodes} | ${nodes_ipv4_addr}
 | | Routes are set up for IPv4 testing
+| | Sleep | 10
 
 | TG interface "${tg_port}" can route to node "${node}" interface "${port}" "${hops}" hops away using IPv4
 | | Node "${nodes['TG']}" interface "${tg_port}" can route to node "${node}" interface "${port}" "${hops}" hops away using IPv4
@@ -74,7 +75,7 @@
 | # TODO: end_size is currently minimum MTU size for Ethernet minus IPv4 and
 | # ICMP echo header size (1500 - 20 - 8),
 | # MTU info is not in VAT sw_interface_dump output
-| | ${args}= | Set Variable | ${args} --start_size 0 --end_size 1472 --step 1
+| | ${args}= | Set Variable | ${args} --start_size 1 --end_size 1472 --step 1
 | | Run Traffic Script On Node | ipv4_sweep_ping.py | ${src_node} | ${args}
 
 | Send ARP request and validate response
index 3d8a2ea..6cd6640 100644 (file)
 | | [Arguments] | ${nodes} | ${nodes_addr}
 | | Setup all DUTs before test
 | | Nodes Setup Ipv6 Addresses | ${nodes} | ${nodes_addr}
+| | Sleep | 10
 
 | Clear ipv6 on all dut in topology
 | | [Documentation] | Remove IPv6 address on all DUTs
index 2fd9d55..54b6aa1 100755 (executable)
@@ -30,8 +30,8 @@ def check_ttl(ttl_begin, ttl_end, ttl_diff):
 
 
 def ckeck_packets_equal(pkt_send, pkt_recv):
-    pkt_send_raw = str(pkt_send)
-    pkt_recv_raw = str(pkt_recv)
+    pkt_send_raw = auto_pad(pkt_send)
+    pkt_recv_raw = auto_pad(pkt_recv)
     if pkt_send_raw != pkt_recv_raw:
         print "Sent:     {}".format(pkt_send_raw.encode('hex'))
         print "Received: {}".format(pkt_recv_raw.encode('hex'))
@@ -63,20 +63,21 @@ if dst_if_defined and (src_if_name == dst_if_name):
     raise Exception("Source interface name equals destination interface name")
 
 src_if = Interface(src_if_name)
-src_if.send_pkt(create_gratuitous_arp_request(src_mac, src_ip))
+src_if.send_pkt(str(create_gratuitous_arp_request(src_mac, src_ip)))
 if dst_if_defined:
     dst_if = Interface(dst_if_name)
-    dst_if.send_pkt(create_gratuitous_arp_request(dst_mac, dst_ip))
+    dst_if.send_pkt(str(create_gratuitous_arp_request(dst_mac, dst_ip)))
 
-pkt_req_send = auto_pad(Ether(src=src_mac, dst=first_hop_mac) /
-                        IP(src=src_ip, dst=dst_ip) /
-                        ICMP())
-pkt_req_send = Ether(pkt_req_send)
+pkt_req_send = (Ether(src=src_mac, dst=first_hop_mac) /
+                      IP(src=src_ip, dst=dst_ip) /
+                      ICMP())
 src_if.send_pkt(pkt_req_send)
 
 if dst_if_defined:
     try:
         pkt_req_recv = dst_if.recv_pkt()
+        if pkt_req_recv is None:
+            raise Exception('Timeout waiting for packet')
     except:
         src_if.close()
         if dst_if_defined:
@@ -89,14 +90,15 @@ if dst_if_defined:
     del pkt_req_send_mod[IP].chksum  # update checksum
     ckeck_packets_equal(pkt_req_send_mod[IP], pkt_req_recv[IP])
 
-    pkt_resp_send = auto_pad(Ether(src=dst_mac, dst=pkt_req_recv.src) /
-                             IP(src=dst_ip, dst=src_ip) /
-                             ICMP(type=0))  # echo-reply
-    pkt_resp_send = Ether(pkt_resp_send)
+    pkt_resp_send = (Ether(src=dst_mac, dst=pkt_req_recv.src) /
+                           IP(src=dst_ip, dst=src_ip) /
+                           ICMP(type=0))  # echo-reply
     dst_if.send_pkt(pkt_resp_send)
 
 try:
     pkt_resp_recv = src_if.recv_pkt()
+    if pkt_resp_recv is None:
+        raise Exception('Timeout waiting for packet')
 except:
     src_if.close()
     if dst_if_defined:
index fc3c810..64f4a99 100644 (file)
@@ -16,5 +16,6 @@
 | Library | resources/libraries/python/SetupFramework.py
 | Library | resources.libraries.python.topology.Topology
 | Suite Setup | Run Keywords | Setup Framework | ${nodes}
+| ...         | AND          | Start VPP Service On All DUTs | ${nodes}
 | ...         | AND          | Update All Interface Data On All Nodes | ${nodes}
 
index 108e2aa..4c88cae 100644 (file)
@@ -17,7 +17,6 @@
 | Test Setup | Setup all DUTs before test
 | Library | resources.libraries.python.topology.Topology
 | Variables | resources/libraries/python/topology.py
-| Force Tags | 3_NODE_DOUBLE_LINK_TOPO
 | Suite Setup | Setup all TGs before traffic script
 
 *** Test Cases ***
@@ -37,7 +36,7 @@
 | | ${dut1}= | Set Variable | ${nodes['DUT1']}
 | | ${dut2}= | Set Variable | ${nodes['DUT2']}
 | | ${tg_links}= | Setup TG "${tg}" DUT1 "${dut1}" and DUT2 "${dut2}" for 3 node l2 bridge domain test
-| | Sleep | 5 | Workaround for interface still in down state after vpp restart
+| | Sleep | 10 | Workaround for interface still in down state after vpp restart
 | | Send traffic on node "${nodes['TG']}" from link "${tg_links[0]}" to link "${tg_links[1]}"
 
 | Vpp forwards packets via L2 bridge domain in circular topology with static L2FIB entries
@@ -46,5 +45,5 @@
 | | ${dut1}= | Set Variable | ${nodes['DUT1']}
 | | ${dut2}= | Set Variable | ${nodes['DUT2']}
 | | ${tg_links}= | Setup TG "${tg}" DUT1 "${dut1}" and DUT2 "${dut2}" for 3 node static l2fib test
-| | Sleep | 5 | Workaround for interface still in down state after vpp restart
+| | Sleep | 10 | Workaround for interface still in down state after vpp restart
 | | Send traffic on node "${nodes['TG']}" from link "${tg_links[0]}" to link "${tg_links[1]}"
index 6f8b9c0..e804add 100644 (file)
@@ -23,7 +23,6 @@
 | ...         | AND          | Vpp nodes ra supress link layer | ${nodes}
 | ...         | AND          | Vpp nodes setup ipv6 routing | ${nodes} | ${nodes_ipv6_addr}
 | ...         | AND          | Setup all TGs before traffic script
-| Suite Teardown | Clear ipv6 on all dut in topology | ${nodes} | ${nodes_ipv6_addr}
 | Test Setup | Clear interface counters on all vpp nodes in topology | ${nodes}
 
 *** Test Cases ***
index 36f49b3..286a63e 100644 (file)
@@ -28,5 +28,6 @@
 | | ${dut1}= | Set Variable | ${nodes['DUT1']}
 | | ${dut2}= | Set Variable | ${nodes['DUT2']}
 | | ${tg_links}= | Get traffic links between TG "${tg}" and DUT1 "${dut1}" and DUT2 "${dut2}"
+| | Sleep | 10 | Work around VPP interface up taking too long.
 | | Send traffic on node "${nodes['TG']}" from link "${tg_links[0]}" to link "${tg_links[1]}"
 
index 3d29485..1514377 100644 (file)
@@ -17,7 +17,7 @@
 | Resource | resources/libraries/robot/ipv4.robot
 | Library | resources/libraries/python/TrafficGenerator.py
 | Library | resources/libraries/python/CrossConnectSetup.py
-| Force Tags | topo-3node
+| Force Tags | topo-3node | PERFTEST
 | Test Setup | Setup all DUTs before test
 | Suite Setup | Initialize traffic generator | ${nodes['TG']}
 | ... | ${nodes['TG']['interfaces']['port3']['pci_address']}
diff --git a/topologies/available/virl.yaml b/topologies/available/virl.yaml
new file mode 100644 (file)
index 0000000..36d45fe
--- /dev/null
@@ -0,0 +1,137 @@
+---
+metadata:
+  version: 0.1
+  schema:
+    - resources/topology_schemas/3_node_topology.sch.yaml
+    - resources/topology_schemas/topology.sch.yaml
+  tags: [hw, 3-node]
+
+nodes:
+  TG:
+    type: TG
+    host: "10.30.51.73"
+    port: 22
+    username: cisco
+    priv_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEpgIBAAKCAQEAwUDlTpzSHpwLQotZOFS4AgcPNEWCnP1AB2hWFmvI+8Kah/gb
+      v8ruZU9RqhPs56tyKzxbhvNkY4VbH5F1GilHZu3mLqzM4KfghMmaeMEjO1T7BYYd
+      vuBfTvIluljfQ2vAlnYrDwn+ClxJk81m0pDgvrLEX4qVVh2sGh7UEkYy5r82DNa2
+      4VjzPB1J/c8a9zP8FoZUhYIzF4FLvRMjUADpbMXgJMsGpaZLmz95ap0Eot7vb1Cc
+      1LvF97iyBCrtIOSKRKA50ZhLGjMKmOwnYU+cP5718tbproDVi6VJOo7zeuXyetMs
+      8YBl9kWblWG9BqP9jctFvsmi5G7hXgq1Y8u+DwIDAQABAoIBAQC/W4E0DHjLMny7
+      0bvw2YKzD0Zw3fttdB94tkm4PdZv5MybooPnsAvLaXVV0hEdfVi5kzSWNl/LY/tN
+      EP1BgGphc2QgB59/PPxGwFIjDCvUzlsZpynBHe+B/qh5ExNQcVvsIOqWI7DXlXaN
+      0i/khOzmJ6HncRRah1spKimYRsaUUDskyg7q3QqMWVaqBbbMvLs/w7ZWd/zoDqCU
+      MY/pCI6hkB3QbRo0OdiZLohphBl2ShABTwjvVyyKL5UA4jAEneJrhH5gWVLXnfgD
+      p62W5CollKEYblC8mUkPxpP7Qo277zw3xaq+oktIZhc5SUEUd7nJZtNqVAHqkItW
+      79VmpKyxAoGBAPfU+kqNPaTSvp+x1n5sn2SgipzDtgi9QqNmC4cjtrQQaaqI57SG
+      OHw1jX8i7L2G1WvVtkHg060nlEVo5n65ffFOqeVBezLVJ7ghWI8U+oBiJJyQ4boD
+      GJVNsoOSUQ0rtuGd9eVwfDk3ol9aCN0KK53oPfIYli29pyu4l095kg11AoGBAMef
+      bPEMBI/2XmCPshLSwhGFl+dW8d+Klluj3CUQ/0vUlvma3dfBOYNsIwAgTP0iIUTg
+      8DYE6KBCdPtxAUEI0YAEAKB9ry1tKR2NQEIPfslYytKErtwjAiqSi0heM6+zwEzu
+      f54Z4oBhsMSL0jXoOMnu+NZzEc6EUdQeY4O+jhjzAoGBAIogC3dtjMPGKTP7+93u
+      UE/XIioI8fWg9fj3sMka4IMu+pVvRCRbAjRH7JrFLkjbUyuMqs3Arnk9K+gbdQt/
+      +m95Njtt6WoFXuPCwgbM3GidSmZwYT4454SfDzVBYScEDCNm1FuR+8ov9bFLDtGT
+      D4gsngnGJj1MDFXTxZEn4nzZAoGBAKCg4WmpUPaCuXibyB+rZavxwsTNSn2lJ83/
+      sYJGBhf/raiV/FLDUcM1vYg5dZnu37RsB/5/vqxOLZGyYd7x+Jo5HkQGPnKgNwhn
+      g8BkdZIRF8uEJqxOo0ycdOU7n/2O93swIpKWo5LIiRPuqqzj+uZKnAL7vuVdxfaY
+      qVz2daMPAoGBALgaaKa3voU/HO1PYLWIhFrBThyJ+BQSQ8OqrEzC8AnegWFxRAM8
+      EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD
+      BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN
+      -----END RSA PRIVATE KEY-----
+    interfaces:
+      port3:
+        mac_address: "fa:16:3e:c6:df:f2"
+        pci_address: "0000:00:04.0"
+        link: link1
+        driver: virtio-pci
+      port5:
+        mac_address: "fa:16:3e:ab:ef:db"
+        pci_address: "0000:00:05.0"
+        link: link2
+        driver: virtio-pci
+  DUT1:
+    type: DUT
+    host: "10.30.51.72"
+    port: 22
+    username: cisco
+    priv_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEpgIBAAKCAQEAwUDlTpzSHpwLQotZOFS4AgcPNEWCnP1AB2hWFmvI+8Kah/gb
+      v8ruZU9RqhPs56tyKzxbhvNkY4VbH5F1GilHZu3mLqzM4KfghMmaeMEjO1T7BYYd
+      vuBfTvIluljfQ2vAlnYrDwn+ClxJk81m0pDgvrLEX4qVVh2sGh7UEkYy5r82DNa2
+      4VjzPB1J/c8a9zP8FoZUhYIzF4FLvRMjUADpbMXgJMsGpaZLmz95ap0Eot7vb1Cc
+      1LvF97iyBCrtIOSKRKA50ZhLGjMKmOwnYU+cP5718tbproDVi6VJOo7zeuXyetMs
+      8YBl9kWblWG9BqP9jctFvsmi5G7hXgq1Y8u+DwIDAQABAoIBAQC/W4E0DHjLMny7
+      0bvw2YKzD0Zw3fttdB94tkm4PdZv5MybooPnsAvLaXVV0hEdfVi5kzSWNl/LY/tN
+      EP1BgGphc2QgB59/PPxGwFIjDCvUzlsZpynBHe+B/qh5ExNQcVvsIOqWI7DXlXaN
+      0i/khOzmJ6HncRRah1spKimYRsaUUDskyg7q3QqMWVaqBbbMvLs/w7ZWd/zoDqCU
+      MY/pCI6hkB3QbRo0OdiZLohphBl2ShABTwjvVyyKL5UA4jAEneJrhH5gWVLXnfgD
+      p62W5CollKEYblC8mUkPxpP7Qo277zw3xaq+oktIZhc5SUEUd7nJZtNqVAHqkItW
+      79VmpKyxAoGBAPfU+kqNPaTSvp+x1n5sn2SgipzDtgi9QqNmC4cjtrQQaaqI57SG
+      OHw1jX8i7L2G1WvVtkHg060nlEVo5n65ffFOqeVBezLVJ7ghWI8U+oBiJJyQ4boD
+      GJVNsoOSUQ0rtuGd9eVwfDk3ol9aCN0KK53oPfIYli29pyu4l095kg11AoGBAMef
+      bPEMBI/2XmCPshLSwhGFl+dW8d+Klluj3CUQ/0vUlvma3dfBOYNsIwAgTP0iIUTg
+      8DYE6KBCdPtxAUEI0YAEAKB9ry1tKR2NQEIPfslYytKErtwjAiqSi0heM6+zwEzu
+      f54Z4oBhsMSL0jXoOMnu+NZzEc6EUdQeY4O+jhjzAoGBAIogC3dtjMPGKTP7+93u
+      UE/XIioI8fWg9fj3sMka4IMu+pVvRCRbAjRH7JrFLkjbUyuMqs3Arnk9K+gbdQt/
+      +m95Njtt6WoFXuPCwgbM3GidSmZwYT4454SfDzVBYScEDCNm1FuR+8ov9bFLDtGT
+      D4gsngnGJj1MDFXTxZEn4nzZAoGBAKCg4WmpUPaCuXibyB+rZavxwsTNSn2lJ83/
+      sYJGBhf/raiV/FLDUcM1vYg5dZnu37RsB/5/vqxOLZGyYd7x+Jo5HkQGPnKgNwhn
+      g8BkdZIRF8uEJqxOo0ycdOU7n/2O93swIpKWo5LIiRPuqqzj+uZKnAL7vuVdxfaY
+      qVz2daMPAoGBALgaaKa3voU/HO1PYLWIhFrBThyJ+BQSQ8OqrEzC8AnegWFxRAM8
+      EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD
+      BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN
+      -----END RSA PRIVATE KEY-----
+    interfaces:
+      port1:
+        mac_address: "fa:16:3e:f1:61:90"
+        pci_address: "0000:00:05.0"
+        link: link1
+      port3:
+        mac_address: "fa:16:3e:2b:eb:53"
+        pci_address: "0000:00:04.0"
+        link: link3
+  DUT2:
+    type: DUT
+    host: "10.30.51.71"
+    port: 22
+    username: cisco
+    priv_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEpgIBAAKCAQEAwUDlTpzSHpwLQotZOFS4AgcPNEWCnP1AB2hWFmvI+8Kah/gb
+      v8ruZU9RqhPs56tyKzxbhvNkY4VbH5F1GilHZu3mLqzM4KfghMmaeMEjO1T7BYYd
+      vuBfTvIluljfQ2vAlnYrDwn+ClxJk81m0pDgvrLEX4qVVh2sGh7UEkYy5r82DNa2
+      4VjzPB1J/c8a9zP8FoZUhYIzF4FLvRMjUADpbMXgJMsGpaZLmz95ap0Eot7vb1Cc
+      1LvF97iyBCrtIOSKRKA50ZhLGjMKmOwnYU+cP5718tbproDVi6VJOo7zeuXyetMs
+      8YBl9kWblWG9BqP9jctFvsmi5G7hXgq1Y8u+DwIDAQABAoIBAQC/W4E0DHjLMny7
+      0bvw2YKzD0Zw3fttdB94tkm4PdZv5MybooPnsAvLaXVV0hEdfVi5kzSWNl/LY/tN
+      EP1BgGphc2QgB59/PPxGwFIjDCvUzlsZpynBHe+B/qh5ExNQcVvsIOqWI7DXlXaN
+      0i/khOzmJ6HncRRah1spKimYRsaUUDskyg7q3QqMWVaqBbbMvLs/w7ZWd/zoDqCU
+      MY/pCI6hkB3QbRo0OdiZLohphBl2ShABTwjvVyyKL5UA4jAEneJrhH5gWVLXnfgD
+      p62W5CollKEYblC8mUkPxpP7Qo277zw3xaq+oktIZhc5SUEUd7nJZtNqVAHqkItW
+      79VmpKyxAoGBAPfU+kqNPaTSvp+x1n5sn2SgipzDtgi9QqNmC4cjtrQQaaqI57SG
+      OHw1jX8i7L2G1WvVtkHg060nlEVo5n65ffFOqeVBezLVJ7ghWI8U+oBiJJyQ4boD
+      GJVNsoOSUQ0rtuGd9eVwfDk3ol9aCN0KK53oPfIYli29pyu4l095kg11AoGBAMef
+      bPEMBI/2XmCPshLSwhGFl+dW8d+Klluj3CUQ/0vUlvma3dfBOYNsIwAgTP0iIUTg
+      8DYE6KBCdPtxAUEI0YAEAKB9ry1tKR2NQEIPfslYytKErtwjAiqSi0heM6+zwEzu
+      f54Z4oBhsMSL0jXoOMnu+NZzEc6EUdQeY4O+jhjzAoGBAIogC3dtjMPGKTP7+93u
+      UE/XIioI8fWg9fj3sMka4IMu+pVvRCRbAjRH7JrFLkjbUyuMqs3Arnk9K+gbdQt/
+      +m95Njtt6WoFXuPCwgbM3GidSmZwYT4454SfDzVBYScEDCNm1FuR+8ov9bFLDtGT
+      D4gsngnGJj1MDFXTxZEn4nzZAoGBAKCg4WmpUPaCuXibyB+rZavxwsTNSn2lJ83/
+      sYJGBhf/raiV/FLDUcM1vYg5dZnu37RsB/5/vqxOLZGyYd7x+Jo5HkQGPnKgNwhn
+      g8BkdZIRF8uEJqxOo0ycdOU7n/2O93swIpKWo5LIiRPuqqzj+uZKnAL7vuVdxfaY
+      qVz2daMPAoGBALgaaKa3voU/HO1PYLWIhFrBThyJ+BQSQ8OqrEzC8AnegWFxRAM8
+      EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD
+      BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN
+      -----END RSA PRIVATE KEY-----
+    interfaces:
+      port1:
+        mac_address: "fa:16:3e:5c:b8:f6"
+        pci_address: "0000:00:04.0"
+        link: link2
+      port3:
+        mac_address: "fa:16:3e:b3:d2:6f"
+        pci_address: "0000:00:05.0"
+        link: link3