Add DHCP test 99/6699/2
authorFilip Tehlar <ftehlar@cisco.com>
Mon, 15 May 2017 12:05:24 +0000 (14:05 +0200)
committerFilip Tehlar <ftehlar@cisco.com>
Tue, 16 May 2017 07:41:13 +0000 (09:41 +0200)
Change-Id: I22d7ca6cd58b377f1c0a166022411d21f6c966e8
Signed-off-by: Filip Tehlar <ftehlar@cisco.com>
tests/data_plane/configs/vpp_lite_config/dhcp/vpp1.config [new file with mode: 0644]
tests/data_plane/configs/vpp_lite_config/dhcp/vpp2.config [new file with mode: 0644]
tests/data_plane/vpp_lite_topo/scripts/dhcp_client.py [new file with mode: 0644]
tests/data_plane/vpp_lite_topo/scripts/dhcp_server.py [new file with mode: 0644]
tests/data_plane/vpp_lite_topo/test_driver/dhcp.sh [new file with mode: 0644]
tests/data_plane/vpp_lite_topo/tests/test_dhcp.sh [new file with mode: 0755]
tests/data_plane/vpp_lite_topo/topologies/3_node_snake_topo.sh [new file with mode: 0644]

diff --git a/tests/data_plane/configs/vpp_lite_config/dhcp/vpp1.config b/tests/data_plane/configs/vpp_lite_config/dhcp/vpp1.config
new file mode 100644 (file)
index 0000000..54d8448
--- /dev/null
@@ -0,0 +1,8 @@
+create_host_iface vpp1 6.0.1.1/24
+create_host_iface intervpp1 6.0.3.1/24
+
+lisp_state enable
+lisp_locator_set_with_locator ls1 host-intervpp1 1 1
+lisp_local_eid eid 6.0.1.0/24 locator-set ls1
+lisp_remote_mapping eid 6.0.2.0/24 rloc 6.0.3.2
+lisp_adjacency leid 6.0.1.0/24 reid 6.0.2.0/24
diff --git a/tests/data_plane/configs/vpp_lite_config/dhcp/vpp2.config b/tests/data_plane/configs/vpp_lite_config/dhcp/vpp2.config
new file mode 100644 (file)
index 0000000..34d7151
--- /dev/null
@@ -0,0 +1,8 @@
+create_host_iface intervpp2 6.0.3.2/24
+create_host_iface vpp2 6.0.2.1/24
+
+lisp_state enable
+lisp_locator_set_with_locator ls1 host-intervpp2 1 1
+lisp_local_eid eid 6.0.2.0/24 locator-set ls1
+lisp_remote_mapping eid 6.0.1.0/24 rloc 6.0.3.1
+lisp_adjacency leid 6.0.2.0/24 reid 6.0.1.0/24
diff --git a/tests/data_plane/vpp_lite_topo/scripts/dhcp_client.py b/tests/data_plane/vpp_lite_topo/scripts/dhcp_client.py
new file mode 100644 (file)
index 0000000..8e49426
--- /dev/null
@@ -0,0 +1,31 @@
+import sys
+from scapy.all import *
+
+def p(s):
+    print 'DHCP client: {}'.format(s)
+
+def main(argv):
+  src_mac = argv[1]
+  dhcp_src = argv[2]
+
+  # needed for scapy not to match replies since DHCP uses broadcast addresses
+  # which wouldn't work
+  conf.checkIPaddr = False
+
+  while True:
+    discover = Ether(dst='ff:ff:ff:ff:ff:ff', src=src_mac)/ \
+      IP(src='0.0.0.0', dst='255.255.255.255')/ \
+      UDP(dport=67,sport=68)/ \
+      BOOTP(op=1, chaddr=src_mac)/ \
+      DHCP(options=[('message-type', 'discover'), ('end')])
+
+    ans,unans = srp(discover, timeout=3)
+    for snd,rcv in ans:
+      if rcv[IP].src == dhcp_src:
+        exit(0)
+      else:
+        p('Unexpected DHCP packet source address! ({})'.format(rcv[IP].src))
+        exit(1)
+
+if __name__ == "__main__":
+  main(sys.argv)
diff --git a/tests/data_plane/vpp_lite_topo/scripts/dhcp_server.py b/tests/data_plane/vpp_lite_topo/scripts/dhcp_server.py
new file mode 100644 (file)
index 0000000..cd6f43e
--- /dev/null
@@ -0,0 +1,32 @@
+from scapy.all import *
+from scapy.layers import *
+
+server_ip="6.0.2.2"
+client_ip="6.0.1.2"
+server_mac="00:0B:CD:AE:9F:C6"
+client_mac="aa:a2:a5:ea:54:20"
+subnet_mask="255.255.255.0"
+gateway="6.0.1.1"
+
+# suboption 1 Agent circuit ID; len:4; val:0x00000001
+# suboption 5 Link selection; len:4; val:6.0.1.1
+option82 = '\x01\x04\x00\x00\x00\x01\x05\x04\x06\00\x01\x01'
+
+def detect_dhcp(pkt):
+  # check if we get DHCP discover and send offer message
+  if pkt[DHCP] and pkt[DHCP].options[0][1] == 1:
+    sendp(Ether(src=server_mac,dst="ff:ff:ff:ff:ff:ff")/
+          IP(src=server_ip,dst="6.0.1.1")/
+          UDP(sport=67,dport=68)/
+          BOOTP(op=2, yiaddr=client_ip, siaddr=server_ip, giaddr=gateway,
+                chaddr=client_mac, xid=pkt[BOOTP].xid)/
+          DHCP(options=[('message_type', 'offer')])/
+          DHCP(options=[('subnet_mask',subnet_mask)])/
+          DHCP(options=[('server_id',server_ip)])/
+          DHCP(options=[('relay_agent_Information', option82), ('end')]))
+
+#sniff DHCP requests
+def start():
+    sniff(filter="udp and (port 67 or 68)", prn=detect_dhcp, store=0)
+
+start()
diff --git a/tests/data_plane/vpp_lite_topo/test_driver/dhcp.sh b/tests/data_plane/vpp_lite_topo/test_driver/dhcp.sh
new file mode 100644 (file)
index 0000000..e988b2a
--- /dev/null
@@ -0,0 +1,53 @@
+source config.sh
+source odl_utils.sh
+source topologies/2_node_topo.sh
+
+if [ "$1" == "clean" ] ; then
+  2_node_topo_clean no_odl
+  exit 0
+fi
+
+if [[ $(id -u) != 0 ]]; then
+  echo "Error: run this as root."
+  exit 1
+fi
+
+function start_dhcp_server
+{
+  echo "starting DHCP server from namespace $1"
+  ip netns exec "$1" python scripts/dhcp_server.py &
+  dhcp_id=$!
+}
+
+function send_dhcp_discovery
+{
+  src_mac="`sudo ip netns exec vppns1 ifconfig veth_vpp1 | grep HWaddr | awk '{print $5}'`"
+  ip netns exec "$1" python scripts/dhcp_client.py "$src_mac" "$2"
+  rc=$?
+}
+
+function test_dhcp
+{
+  2_node_topo_setup no_odl
+  test_result=1
+
+  # dhcp proxy1 config
+  echo "set dhcp proxy server 6.0.2.2 src-address 6.0.1.1" | nc 0 5002
+
+  maybe_pause
+
+  # run DHCP server from namespace
+  start_dhcp_server vppns2
+
+  # send DHCP discovery from namespace and check if reply (= DHCP offer)
+  # comes from the proxy DHCP address
+  send_dhcp_discovery vppns1 "6.0.1.1"
+
+  maybe_pause
+  2_node_topo_clean no_odl
+  kill $dhcp_id
+
+  print_status $rc "DHCP test failed!"
+  exit $test_result
+}
+
diff --git a/tests/data_plane/vpp_lite_topo/tests/test_dhcp.sh b/tests/data_plane/vpp_lite_topo/tests/test_dhcp.sh
new file mode 100755 (executable)
index 0000000..851c454
--- /dev/null
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+# Requires scapy for python.
+
+VPP_LITE_CONF=`pwd`/../configs/vpp_lite_config/dhcp
+
+source test_driver/dhcp.sh
+
+test_dhcp
diff --git a/tests/data_plane/vpp_lite_topo/topologies/3_node_snake_topo.sh b/tests/data_plane/vpp_lite_topo/topologies/3_node_snake_topo.sh
new file mode 100644 (file)
index 0000000..366bcbc
--- /dev/null
@@ -0,0 +1,112 @@
+
+function 3_node_snake_topo_clean
+{
+  echo "Clearing all VPP instances.."
+  pkill vpp --signal 9
+  rm /dev/shm/*
+
+  echo "Cleaning topology.."
+  ip netns exec xtr-ns12 ifconfig br12 down
+  ip netns exec xtr-ns23 ifconfig br23 down
+
+  ip netns exec xtr-ns12 brctl delbr br12
+  ip netns exec xtr-ns23 brctl delbr br23
+
+  ip link del dev veth_vpp1 &> /dev/null
+  ip link del dev veth_vpp2 &> /dev/null
+  ip link del dev veth_xtr1_xtr2 &> /dev/null
+  ip link del dev veth_xtr2_xtr1 &> /dev/null
+  ip link del dev veth_xtr2_xtr3 &> /dev/null
+  ip link del dev veth_xtr3_xtr2 &> /dev/null
+
+  ip netns del vppns1 &> /dev/null
+  ip netns del vppns2 &> /dev/null
+  ip netns del xtr-ns12 &> /dev/null
+  ip netns del xtr-ns23 &> /dev/null
+}
+
+function 3_node_snake_topo_setup
+{
+  ip netns add vppns1
+  ip netns add vppns2
+  ip netns add xtr-ns12
+  ip netns add xtr-ns23
+
+  ip link add veth_xtr1_xtr2 type veth peer name xtr1_xtr2
+  ip link add veth_xtr2_xtr1 type veth peer name xtr2_xtr1
+  ip link add veth_xtr2_xtr3 type veth peer name xtr2_xtr3
+  ip link add veth_xtr3_xtr2 type veth peer name xtr3_xtr2
+
+  # enable peer interfaces
+  ip link set dev xtr1_xtr2 up
+  ip link set dev xtr2_xtr1 up
+  ip link set dev xtr2_xtr3 up
+  ip link set dev xtr3_xtr2 up
+
+  ip link set dev veth_xtr1_xtr2 up netns xtr-ns12
+  ip link set dev veth_xtr2_xtr1 up netns xtr-ns12
+  ip link set dev veth_xtr2_xtr3 up netns xtr-ns23
+  ip link set dev veth_xtr3_xtr2 up netns xtr-ns23
+
+  ip netns exec xtr-ns12 brctl addbr br12
+  ip netns exec xtr-ns23 brctl addbr br23
+
+  ip netns exec xtr-ns12 brctl addif br12 veth_xtr1_xtr2
+  ip netns exec xtr-ns12 brctl addif br12 veth_xtr2_xtr1
+  ip netns exec xtr-ns12 ifconfig br12 up
+  ip netns exec xtr-ns23 brctl addif br23 veth_xtr2_xtr3
+  ip netns exec xtr-ns23 brctl addif br23 veth_xtr3_xtr2
+  ip netns exec xtr-ns23 ifconfig br23 up
+
+  # create and configure 1st veth client to vpp pair
+  ip link add veth_vpp1 type veth peer name vpp1
+  ip link set dev vpp1 up
+  ip link set dev veth_vpp1 up netns vppns1
+
+  # create and configure 2nd veth client to vpp pair
+  ip link add veth_vpp2 type veth peer name vpp2
+  ip link set dev vpp2 up
+  ip link set dev veth_vpp2 up netns vppns2
+
+  ip netns exec vppns1 \
+  bash -c "
+    ip link set dev lo up
+    ip addr add 6.0.1.2/24 dev veth_vpp1
+    ip route add 6.0.2.0/24 via 6.0.1.1
+    ip addr add 6:0:1::2/64 dev veth_vpp1
+    ip route add 6:0:2::0/64 via 6:0:1::1
+  "
+
+  ip netns exec vppns2 \
+  bash -c "
+    ip link set dev lo up
+    ip addr add 6.0.2.2/24 dev veth_vpp2
+    ip route add 6.0.1.0/24 via 6.0.2.1
+    ip addr add 6:0:2::2/64 dev veth_vpp2
+    ip route add 6:0:1::0/64 via 6:0:2::1
+  "
+
+  # generate config files
+  ./scripts/generate_config.py ${VPP_LITE_CONF} ${CFG_METHOD}
+
+  start_vpp 5002 vpp1
+  start_vpp 5003 vpp2
+  start_vpp 5004 vpp3
+
+  sleep 2
+  echo "* Selected configuration method: $CFG_METHOD"
+  if [ "$CFG_METHOD" == "cli" ] ; then
+    echo "exec ${VPP_LITE_CONF}/vpp1.cli" | nc 0 5002
+    echo "exec ${VPP_LITE_CONF}/vpp2.cli" | nc 0 5003
+    echo "exec ${VPP_LITE_CONF}/vpp3.cli" | nc 0 5004
+  elif [ "$CFG_METHOD" == "vat" ] ; then
+    ${VPP_API_TEST} chroot prefix vpp1 script in ${VPP_LITE_CONF}/vpp1.vat
+    ${VPP_API_TEST} chroot prefix vpp2 script in ${VPP_LITE_CONF}/vpp2.vat
+    ${VPP_API_TEST} chroot prefix vpp3 script in ${VPP_LITE_CONF}/vpp3.vat
+  else
+    echo "=== WARNING:"
+    echo "=== Invalid configuration method selected!"
+    echo "=== To resolve this set env variable CFG_METHOD to vat or cli."
+    echo "==="
+  fi
+}