--- /dev/null
+{
+ "input": {
+ "mapping-record": {
+ "recordTtl": 1440,
+ "action": "NoAction",
+ "authoritative": true,
+ "eid": {
+ "address-type": "ietf-lisp-address-types:ipv4-prefix-afi",
+ "ipv4-prefix": "6.0.2.0/24"
+ },
+ "LocatorRecord": [
+ {
+ "locator-id": "ISP1",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv4-afi",
+ "ipv4": "6.0.3.20"
+ }
+ }
+ ]
+ }
+ }
+}
--- /dev/null
+{
+ "input": {
+ "mapping-record": {
+ "recordTtl": 1440,
+ "action": "NoAction",
+ "authoritative": true,
+ "eid": {
+ "address-type": "ietf-lisp-address-types:ipv4-prefix-afi",
+ "ipv4-prefix": "6.0.1.0/24"
+ },
+ "LocatorRecord": [
+ {
+ "locator-id": "ISP1",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv4-afi",
+ "ipv4": "6.0.3.1"
+ }
+ },
+ {
+ "locator-id": "ISP2",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv4-afi",
+ "ipv4": "6.0.4.1"
+ }
+ }
+ ]
+ }
+ }
+}
--- /dev/null
+{
+ "input": {
+ "mapping-record": {
+ "recordTtl": 1440,
+ "action": "NoAction",
+ "authoritative": true,
+ "eid": {
+ "address-type": "ietf-lisp-address-types:ipv4-prefix-afi",
+ "ipv4-prefix": "6.0.2.0/24"
+ },
+ "LocatorRecord": [
+ {
+ "locator-id": "ISP1",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv4-afi",
+ "ipv4": "6.0.3.2"
+ }
+ },
+ {
+ "locator-id": "ISP2",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv4-afi",
+ "ipv4": "6.0.4.2"
+ }
+ }
+ ]
+ }
+ }
+}
--- /dev/null
+{
+ "input": {
+ "mapping-record": {
+ "recordTtl": 1440,
+ "action": "NoAction",
+ "authoritative": true,
+ "eid": {
+ "address-type": "ietf-lisp-address-types:ipv6-prefix-afi",
+ "ipv6-prefix": "6:0:2::0/64"
+ },
+ "LocatorRecord": [
+ {
+ "locator-id": "ISP1",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv6-afi",
+ "ipv6": "6:0:3::20"
+ }
+ }
+ ]
+ }
+ }
+}
--- /dev/null
+{
+ "input": {
+ "mapping-record": {
+ "recordTtl": 1440,
+ "action": "NoAction",
+ "authoritative": true,
+ "eid": {
+ "address-type": "ietf-lisp-address-types:ipv6-prefix-afi",
+ "ipv6-prefix": "6:0:1::0/64"
+ },
+ "LocatorRecord": [
+ {
+ "locator-id": "ISP1",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv6-afi",
+ "ipv6": "6:0:3::1"
+ }
+ },
+ {
+ "locator-id": "ISP2",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv6-afi",
+ "ipv6": "6:0:4::1"
+ }
+ }
+ ]
+ }
+ }
+}
--- /dev/null
+{
+ "input": {
+ "mapping-record": {
+ "recordTtl": 1440,
+ "action": "NoAction",
+ "authoritative": true,
+ "eid": {
+ "address-type": "ietf-lisp-address-types:ipv6-prefix-afi",
+ "ipv6-prefix": "6:0:2::0/64"
+ },
+ "LocatorRecord": [
+ {
+ "locator-id": "ISP1",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv6-afi",
+ "ipv6": "6:0:3::2"
+ }
+ },
+ {
+ "locator-id": "ISP2",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv6-afi",
+ "ipv6": "6:0:4::2"
+ }
+ }
+ ]
+ }
+ }
+}
--- /dev/null
+{
+ "input": {
+ "mapping-record": {
+ "recordTtl": 1440,
+ "action": "NoAction",
+ "authoritative": true,
+ "eid": {
+ "virtual-network-id": 10,
+ "address-type": "ietf-lisp-address-types:mac-afi",
+ "mac": "08:22:22:22:22:22"
+ },
+ "LocatorRecord": [
+ {
+ "locator-id": "ISP1",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": true,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv4-afi",
+ "ipv4": "6.0.3.20"
+ }
+ }
+ ]
+ }
+ }
+}
--- /dev/null
+{
+ "input": {
+ "mapping-record": {
+ "recordTtl": 1440,
+ "action": "NoAction",
+ "authoritative": true,
+ "eid": {
+ "virtual-network-id": 10,
+ "address-type": "ietf-lisp-address-types:mac-afi",
+ "mac": "08:11:11:11:11:11"
+ },
+ "LocatorRecord": [
+ {
+ "locator-id": "ISP1",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": false,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv4-afi",
+ "ipv4": "6.0.3.1"
+ }
+ },
+ {
+ "locator-id": "ISP2",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": false,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv4-afi",
+ "ipv4": "6.0.4.1"
+ }
+ }
+ ]
+ }
+ }
+}
--- /dev/null
+{
+ "input": {
+ "mapping-record": {
+ "recordTtl": 1440,
+ "action": "NoAction",
+ "authoritative": true,
+ "eid": {
+ "virtual-network-id": 10,
+ "address-type": "ietf-lisp-address-types:mac-afi",
+ "mac": "08:22:22:22:22:22"
+ },
+ "LocatorRecord": [
+ {
+ "locator-id": "ISP1",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": false,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv4-afi",
+ "ipv4": "6.0.3.2"
+ }
+ },
+ {
+ "locator-id": "ISP2",
+ "priority": 1,
+ "weight": 1,
+ "multicastPriority": 255,
+ "multicastWeight": 0,
+ "localLocator": true,
+ "rlocProbed": false,
+ "routed": false,
+ "rloc": {
+ "address-type": "ietf-lisp-address-types:ipv4-afi",
+ "ipv4": "6.0.4.2"
+ }
+ }
+ ]
+ }
+ }
+}
--- /dev/null
+create host-interface name vpp1
+set int state host-vpp1 up
+set int ip address host-vpp1 6.0.1.1/24
+
+create host-interface name intervpp11
+set int state host-intervpp11 up
+set int ip address host-intervpp11 6.0.3.1/24
+
+create host-interface name intervpp21
+set int state host-intervpp21 up
+set int ip address host-intervpp21 6.0.4.1/24
+
+lisp enable
+
+lisp locator-set add ls1 iface host-intervpp11 p 1 w 1 iface host-intervpp21 p 1 w 1
+lisp eid-table add eid 6.0.1.0/24 locator-set ls1
+lisp map-resolver add 6.0.3.100
--- /dev/null
+create host-interface name vpp2
+set int state host-vpp2 up
+set int ip address host-vpp2 6.0.2.1/24
+
+create host-interface name intervpp12
+set int state host-intervpp12 up
+set int ip address host-intervpp12 6.0.3.2/24
+
+create host-interface name intervpp22
+set int state host-intervpp22 up
+set int ip address host-intervpp22 6.0.4.2/24
+
+lisp enable
+
+lisp locator-set add ls1 iface host-intervpp12 p 1 w 1 iface host-intervpp22 p 1 w 1
+lisp eid-table add eid 6.0.2.0/24 locator-set ls1
+lisp map-resolver add 6.0.3.100
--- /dev/null
+create host-interface name vpp1
+set int state host-vpp1 up
+set int ip address host-vpp1 6:0:1::1/64
+
+create host-interface name intervpp11
+set int state host-intervpp11 up
+set int ip address host-intervpp11 6:0:3::1/64
+
+create host-interface name intervpp21
+set int state host-intervpp21 up
+set int ip address host-intervpp21 6:0:4::1/64
+
+lisp enable
+
+lisp locator-set add ls1 iface host-intervpp11 p 1 w 1 iface host-intervpp21 p 1 w 1
+lisp eid-table add eid 6:0:1::0/64 locator-set ls1
+lisp map-resolver add 6:0:3::100
--- /dev/null
+create host-interface name vpp2
+set int state host-vpp2 up
+set int ip address host-vpp2 6:0:2::1/64
+
+create host-interface name intervpp12
+set int state host-intervpp12 up
+set int ip address host-intervpp12 6:0:3::2/64
+
+create host-interface name intervpp22
+set int state host-intervpp22 up
+set int ip address host-intervpp22 6:0:4::2/64
+
+lisp enable
+
+lisp locator-set add ls1 iface host-intervpp12 p 1 w 1 iface host-intervpp22 p 1 w 1
+lisp eid-table add eid 6:0:2::0/64 locator-set ls1
+lisp map-resolver add 6:0:3::100
--- /dev/null
+create host-interface name vpp1
+set int state host-vpp1 up
+set int ip address host-vpp1 6.0.1.1/24
+
+create host-interface name intervpp11
+set int state host-intervpp11 up
+set int ip address host-intervpp11 6.0.3.1/24
+
+create host-interface name intervpp21
+set int state host-intervpp21 up
+set int ip address host-intervpp21 6.0.4.1/24
+
+lisp enable
+
+lisp locator-set add ls1 iface host-intervpp11 p 1 w 1 iface host-intervpp21 p 1 w 1
+lisp map-resolver add 6.0.3.100
+
+lisp eid-table map vni 10 bd 10
+set interface l2 bridge host-vpp1 10
+lisp eid-table add vni 10 eid 08:11:11:11:11:11 locator-set ls1
--- /dev/null
+create host-interface name vpp2
+set int state host-vpp2 up
+set int ip address host-vpp2 6.0.1.2/24
+
+create host-interface name intervpp12
+set int state host-intervpp12 up
+set int ip address host-intervpp12 6.0.3.2/24
+
+create host-interface name intervpp22
+set int state host-intervpp22 up
+set int ip address host-intervpp22 6.0.4.2/24
+
+lisp enable
+
+lisp locator-set add ls1 iface host-intervpp12 p 1 w 1 iface host-intervpp22 p 1 w 1
+lisp map-resolver add 6.0.3.100
+
+lisp eid-table map vni 10 bd 10
+set interface l2 bridge host-vpp2 10
+lisp eid-table add vni 10 eid 08:22:22:22:22:22 locator-set ls1
lisp locator-set add ls1 iface host-rtr_vpp1 p 1 w 1 iface host-rtr_vpp2 p 1 w 1
lisp pitr ls ls1
comment { lisp pitr disable }
-lisp remote-mapping deid 6:0:0::0/32 action send-map-request
+lisp remote-mapping eid 6:0:0::0/32 action send-map-request
lisp map-resolver add 6.0.3.100
lisp pitr ls ls1
lisp eid-table map vni 100 vrf 100
lisp eid-table map vni 200 vrf 200
-lisp remote-mapping vni 100 deid 6:0:0::0/32 action send-map-request
-lisp remote-mapping vni 200 deid 6:0:0::0/32 action send-map-request
+lisp remote-mapping vni 100 eid 6:0:0::0/32 action send-map-request
+lisp remote-mapping vni 200 eid 6:0:0::0/32 action send-map-request
lisp locator-set add ls1 iface host-rtr_wan4 p 1 w 1 iface host-rtr_vpp2 p 1 w 1
lisp pitr ls ls1
comment { lisp pitr disable }
-lisp remote-mapping deid 6:0:0::0/32 action send-map-request
+lisp remote-mapping eid 6:0:0::0/32 action send-map-request
lisp map-resolver add 6.0.3.100
--- /dev/null
+source config.sh
+source odl_utils.sh
+source topologies/multihoming_topo.sh
+
+ODL_CONFIG_FILE1="vpp1.json"
+ODL_CONFIG_FILE2="vpp2.json"
+ODL_CONFIG_FILE3="update_vpp2.json"
+
+if [ "$1" == "clean" ] ; then
+ multihoming_topo_clean
+ exit 0
+fi
+
+if [[ $(id -u) != 0 ]]; then
+ echo "Error: run this as a root."
+ exit 1
+fi
+
+function test_multihoming
+{
+ if [ "$3" != "no_setup" ] ; then
+ multihoming_topo_setup
+ fi
+
+ if [ "$3" == "wait" ] ; then
+ read -p "press any key to continue .." -n1
+ fi
+
+ test_result=1
+
+ ip netns exec vppns1 "${1}" -w 15 -c 1 "${2}"
+
+ # do some port sweeping to see that load balancing works
+ ip netns exec vppns1 nc -n -z "${2}" 1-1000 > /dev/null 2>&1
+
+ # check that it works
+ pkts=$(echo "show int" | nc 0 5002 | grep host-intervpp11 | awk '{print $6}' | tr -d '\r')
+
+ if [ $pkts -gt 450 ] && [ $pkts -lt 550 ] ; then
+ rc=0
+ else
+ rc=1
+ fi
+
+ if [ $rc -ne 0 ] ; then
+ echo "Load balancing doesn't work!"
+
+ if [ "$3" == "wait" ] ; then
+ read -p "press any key to continue .." -n1
+ fi
+
+ multihoming_topo_clean
+ exit $test_result
+ fi
+
+ if [ "$3" == "wait" ] ; then
+ read -p "press any key to continue .." -n1
+ fi
+
+ # change IP addresses of destination RLOC
+ echo "set int ip address del host-intervpp12 6.0.3.2/24" | nc 0 5003
+ echo "set int ip address host-intervpp12 6.0.3.20/24" | nc 0 5003
+ echo "set int ip address del host-intervpp12 6:0:3::2/64" | nc 0 5003
+ echo "set int ip address host-intervpp12 6:0:3::20/64" | nc 0 5003
+ post_curl "update-mapping" ${ODL_CONFIG_FILE3}
+
+ ip netns exec vppns1 "${1}" -w 15 -c 1 "${2}"
+ rc=$?
+
+ # test done
+
+ if [ "$3" == "wait" ] ; then
+ read -p "press any key to continue .." -n1
+ fi
+
+ multihoming_topo_clean
+ if [ $rc -ne 0 ] ; then
+ echo "Test failed: No ICMP response received within specified timeout limit!"
+ else
+ echo "Test passed."
+ test_result=0
+ fi
+
+ exit $test_result
+}
--- /dev/null
+source config.sh
+source odl_utils.sh
+source topologies/multihoming_topo_l2.sh
+
+ODL_CONFIG_FILE1="vpp1.json"
+ODL_CONFIG_FILE2="vpp2.json"
+ODL_CONFIG_FILE3="update_vpp2.json"
+
+function maybe_pause
+{
+ if [ "$1" == "wait" ] ; then
+ read -p "press any key to continue .." -n1
+ fi
+}
+
+if [ "$1" == "clean" ] ; then
+ multihoming_topo_clean
+ exit 0
+fi
+
+if [[ $(id -u) != 0 ]]; then
+ echo "Error: run this as a root."
+ exit 1
+fi
+
+function test_multihoming
+{
+ if [ "$3" != "no_setup" ] ; then
+ multihoming_topo_setup
+ fi
+
+ maybe_pause $3
+
+ test_result=1
+
+ ip netns exec vppns1 "${1}" -w 15 -c 1 "${2}"
+ rc=$?
+ if [ $rc -ne 0 ] ; then
+ echo "No response received!"
+
+ maybe_pause $3
+ multihoming_topo_clean
+ exit $test_result
+ fi
+
+ maybe_pause $3
+
+ # do some port sweeping to see that load balancing works
+ ip netns exec vppns1 nc -n -z "${2}" 1-1000 > /dev/null 2>&1
+
+ # check that it works
+ pkts=$(echo "show int" | nc 0 5002 | grep host-intervpp11 | awk '{print $6}' | tr -d '\r')
+
+ if [ $pkts -gt 450 ] && [ $pkts -lt 550 ] ; then
+ rc=0
+ else
+ rc=1
+ fi
+
+ if [ $rc -ne 0 ] ; then
+ echo "Load balancing doesn't work!"
+
+ maybe_pause $3
+
+ multihoming_topo_clean
+ exit $test_result
+ fi
+
+ maybe_pause $3
+
+ # change IP addresses of destination RLOC
+ echo "set int ip address del host-intervpp12 6.0.3.2/24" | nc 0 5003
+ echo "set int ip address host-intervpp12 6.0.3.20/24" | nc 0 5003
+ echo "set int ip address del host-intervpp12 6:0:3::2/64" | nc 0 5003
+ echo "set int ip address host-intervpp12 6:0:3::20/64" | nc 0 5003
+ post_curl "update-mapping" ${ODL_CONFIG_FILE3}
+
+ ip netns exec vppns1 "${1}" -w 15 -c 1 "${2}"
+ rc=$?
+
+ # test done
+
+ maybe_pause $3
+
+ multihoming_topo_clean
+ if [ $rc -ne 0 ] ; then
+ echo "Test failed: No ICMP response received within specified timeout limit!"
+ else
+ echo "Test passed."
+ test_result=0
+ fi
+
+ exit $test_result
+}
--- /dev/null
+#!/usr/bin/env bash
+
+# Test basic LISP functionality (ip4 over ip4)
+
+VPP_LITE_CONF=`pwd`/../configs/vpp_lite_config/multihoming/4o4
+ODL_CONFIG_DIR=`pwd`/../configs/odl/multihoming/4o4
+
+source test_driver/multihoming.sh
+
+test_multihoming ping "6.0.2.2"
--- /dev/null
+#!/usr/bin/env bash
+
+# Test LISP multihoming functionality (ip6 over ip6)
+
+VPP_LITE_CONF=`pwd`/../configs/vpp_lite_config/multihoming/6o6
+ODL_CONFIG_DIR=`pwd`/../configs/odl/multihoming/6o6
+
+source test_driver/multihoming.sh
+
+test_multihoming ping6 "6:0:2::2"
--- /dev/null
+#!/usr/bin/env bash
+
+# Test basic LISP functionality (ip4 over ip4)
+
+VPP_LITE_CONF=`pwd`/../configs/vpp_lite_config/multihoming/l2o4
+ODL_CONFIG_DIR=`pwd`/../configs/odl/multihoming/l2o4
+
+source test_driver/multihoming_l2.sh
+
+test_multihoming ping "6.0.1.12"
--- /dev/null
+#!/usr/bin/env bash
+
+function multihoming_topo_clean
+{
+ echo "Clearing all VPP instances.."
+ pkill vpp --signal 9
+ rm /dev/shm/*
+
+ echo "Cleaning topology.."
+ ip netns exec intervppns1 ifconfig vppbr down
+ ip netns exec intervppns1 brctl delbr vppbr
+ ip link del dev veth_vpp1 &> /dev/null
+ ip link del dev veth_vpp2 &> /dev/null
+ ip link del dev veth_intervpp11 &> /dev/null
+ ip link del dev veth_intervpp12 &> /dev/null
+ ip link del dev veth_odl &> /dev/null
+ ip netns del vppns1 &> /dev/null
+ ip netns del vppns2 &> /dev/null
+ ip netns del intervppns1 &> /dev/null
+
+ ip netns exec intervppns2 ifconfig vppbr down
+ ip netns exec intervppns2 brctl delbr vppbr
+ ip link del dev veth_intervpp21 &> /dev/null
+ ip link del dev veth_intervpp22 &> /dev/null
+ ip netns del intervppns2 &> /dev/null
+
+ if [ "$1" != "no_odl" ] ; then
+ odl_clear_all
+ fi
+}
+
+function multihoming_topo_setup
+{
+
+ # create vpp to clients and inter-vpp namespaces
+ ip netns add vppns1
+ ip netns add vppns2
+ ip netns add intervppns1
+ ip netns add intervppns2
+
+ # create vpp and odl interfaces and set them in intervppns1
+ ip link add veth_intervpp11 type veth peer name intervpp11
+ ip link add veth_intervpp12 type veth peer name intervpp12
+ ip link add veth_odl type veth peer name odl
+ ip link set dev intervpp11 up
+ ip link set dev intervpp12 up
+ ip link set dev odl up
+ ip link set dev veth_intervpp11 up netns intervppns1
+ ip link set dev veth_intervpp12 up netns intervppns1
+ ip link set dev veth_odl up netns intervppns1
+
+ ip link add veth_intervpp21 type veth peer name intervpp21
+ ip link add veth_intervpp22 type veth peer name intervpp22
+ ip link set dev intervpp21 up
+ ip link set dev intervpp22 up
+ ip link set dev veth_intervpp21 up netns intervppns2
+ ip link set dev veth_intervpp22 up netns intervppns2
+
+ # create bridge in intervppns1 and add vpp and odl interfaces
+ ip netns exec intervppns1 brctl addbr vppbr
+ ip netns exec intervppns1 brctl addif vppbr veth_intervpp11
+ ip netns exec intervppns1 brctl addif vppbr veth_intervpp12
+ ip netns exec intervppns1 brctl addif vppbr veth_odl
+ ip netns exec intervppns1 ifconfig vppbr up
+
+ # create bridge in intervppns2 and add vpp and odl interfaces
+ ip netns exec intervppns2 brctl addbr vppbr
+ ip netns exec intervppns2 brctl addif vppbr veth_intervpp21
+ ip netns exec intervppns2 brctl addif vppbr veth_intervpp22
+ ip netns exec intervppns2 brctl addif vppbr veth_odl
+ ip netns exec intervppns2 ifconfig vppbr up
+
+ # create and configure 1st veth client to vpp pair
+ ip link add veth_vpp1 type veth peer name vpp1
+ ip link set dev vpp1 up
+ ip link set dev veth_vpp1 up netns vppns1
+
+ # create and configure 2nd veth client to vpp pair
+ ip link add veth_vpp2 type veth peer name vpp2
+ ip link set dev vpp2 up
+ ip link set dev veth_vpp2 up netns vppns2
+
+ ip netns exec vppns1 \
+ bash -c "
+ ip link set dev lo up
+ ip addr add 6.0.1.2/24 dev veth_vpp1
+ ip route add 6.0.2.0/24 via 6.0.1.1
+ ip addr add 6:0:1::2/64 dev veth_vpp1
+ ip route add 6:0:2::0/64 via 6:0:1::1
+ "
+
+ ip netns exec vppns2 \
+ bash -c "
+ ip link set dev lo up
+ ip addr add 6.0.2.2/24 dev veth_vpp2
+ ip route add 6.0.1.0/24 via 6.0.2.1
+ ip addr add 6:0:2::2/64 dev veth_vpp2
+ ip route add 6:0:1::0/64 via 6:0:2::1
+ "
+
+ # set odl iface ip and disable checksum offloading
+ ip addr add 6.0.3.100/24 dev odl
+ ip addr add 6:0:3::100/64 dev odl
+ ethtool --offload odl rx off tx off
+
+ # start vpp1 and vpp2 in separate chroot
+ ${VPP_LITE_BIN} \
+ unix { log /tmp/vpp1.log cli-listen \
+ localhost:5002 full-coredump \
+ exec ${VPP_LITE_CONF}/vpp1.config } \
+ api-trace { on } api-segment {prefix xtr1}
+
+ ${VPP_LITE_BIN} \
+ unix { log /tmp/vpp2.log cli-listen \
+ localhost:5003 full-coredump \
+ exec ${VPP_LITE_CONF}/vpp2.config } \
+ api-trace { on } api-segment {prefix xtr2}
+
+ if [ "$1" != "no_odl" ] ; then
+ post_curl "add-mapping" ${ODL_CONFIG_FILE1}
+ post_curl "add-mapping" ${ODL_CONFIG_FILE2}
+ fi
+}
+
--- /dev/null
+#!/usr/bin/env bash
+
+function multihoming_topo_clean
+{
+ echo "Clearing all VPP instances.."
+ pkill vpp --signal 9
+ rm /dev/shm/*
+
+ echo "Cleaning topology.."
+ ip netns exec intervppns1 ifconfig vppbr down
+ ip netns exec intervppns1 brctl delbr vppbr
+ ip link del dev veth_vpp1 &> /dev/null
+ ip link del dev veth_vpp2 &> /dev/null
+ ip link del dev veth_intervpp11 &> /dev/null
+ ip link del dev veth_intervpp12 &> /dev/null
+ ip link del dev veth_odl &> /dev/null
+ ip netns del vppns1 &> /dev/null
+ ip netns del vppns2 &> /dev/null
+ ip netns del intervppns1 &> /dev/null
+
+ ip netns exec intervppns2 ifconfig vppbr down
+ ip netns exec intervppns2 brctl delbr vppbr
+ ip link del dev veth_intervpp21 &> /dev/null
+ ip link del dev veth_intervpp22 &> /dev/null
+ ip netns del intervppns2 &> /dev/null
+
+ if [ "$1" != "no_odl" ] ; then
+ odl_clear_all
+ fi
+}
+
+function set_arp
+{
+ mac1=`ip netns exec vppns1 ip a show dev veth_vpp1 | grep "link/ether" | awk '{print $2}'`
+ ip netns exec vppns2 arp -s 6.0.1.11 $mac1
+
+ mac2=`ip netns exec vppns2 ip a show dev veth_vpp2 | grep "link/ether" | awk '{print $2}'`
+ ip netns exec vppns1 arp -s 6.0.1.12 $mac2
+}
+
+function multihoming_topo_setup
+{
+
+ # create vpp to clients and inter-vpp namespaces
+ ip netns add vppns1
+ ip netns add vppns2
+ ip netns add intervppns1
+ ip netns add intervppns2
+
+ # create vpp and odl interfaces and set them in intervppns1
+ ip link add veth_intervpp11 type veth peer name intervpp11
+ ip link add veth_intervpp12 type veth peer name intervpp12
+ ip link add veth_odl type veth peer name odl
+ ip link set dev intervpp11 up
+ ip link set dev intervpp12 up
+ ip link set dev odl up
+ ip link set dev veth_intervpp11 up netns intervppns1
+ ip link set dev veth_intervpp12 up netns intervppns1
+ ip link set dev veth_odl up netns intervppns1
+
+ ip link add veth_intervpp21 type veth peer name intervpp21
+ ip link add veth_intervpp22 type veth peer name intervpp22
+ ip link set dev intervpp21 up
+ ip link set dev intervpp22 up
+ ip link set dev veth_intervpp21 up netns intervppns2
+ ip link set dev veth_intervpp22 up netns intervppns2
+
+ # create bridge in intervppns1 and add vpp and odl interfaces
+ ip netns exec intervppns1 brctl addbr vppbr
+ ip netns exec intervppns1 brctl addif vppbr veth_intervpp11
+ ip netns exec intervppns1 brctl addif vppbr veth_intervpp12
+ ip netns exec intervppns1 brctl addif vppbr veth_odl
+ ip netns exec intervppns1 ifconfig vppbr up
+
+ # create bridge in intervppns2 and add vpp and odl interfaces
+ ip netns exec intervppns2 brctl addbr vppbr
+ ip netns exec intervppns2 brctl addif vppbr veth_intervpp21
+ ip netns exec intervppns2 brctl addif vppbr veth_intervpp22
+ ip netns exec intervppns2 brctl addif vppbr veth_odl
+ ip netns exec intervppns2 ifconfig vppbr up
+
+ # create and configure 1st veth client to vpp pair
+ ip link add veth_vpp1 type veth peer name vpp1
+ ip link set dev vpp1 up
+ ip link set dev veth_vpp1 address 08:11:11:11:11:11
+ ip link set dev veth_vpp1 up netns vppns1
+
+ # create and configure 2nd veth client to vpp pair
+ ip link add veth_vpp2 type veth peer name vpp2
+ ip link set dev vpp2 up
+ ip link set dev veth_vpp2 address 08:22:22:22:22:22
+ ip link set dev veth_vpp2 up netns vppns2
+
+ ip netns exec vppns1 \
+ bash -c "
+ ip link set dev lo up
+ ip addr add 6.0.1.11/24 dev veth_vpp1
+ ip addr add 6:0:1::11/64 dev veth_vpp1
+ "
+
+ ip netns exec vppns2 \
+ bash -c "
+ ip link set dev lo up
+ ip addr add 6.0.1.12/24 dev veth_vpp2
+ ip addr add 6:0:1::12/64 dev veth_vpp2
+ "
+
+ # set odl iface ip and disable checksum offloading
+ ip addr add 6.0.3.100/24 dev odl
+ ip addr add 6:0:3::100/64 dev odl
+ ethtool --offload odl rx off tx off
+
+ # start vpp1 and vpp2 in separate chroot
+ ${VPP_LITE_BIN} \
+ unix { log /tmp/vpp1.log cli-listen \
+ localhost:5002 full-coredump \
+ exec ${VPP_LITE_CONF}/vpp1.config } \
+ api-trace { on } api-segment {prefix xtr1}
+
+ ${VPP_LITE_BIN} \
+ unix { log /tmp/vpp2.log cli-listen \
+ localhost:5003 full-coredump \
+ exec ${VPP_LITE_CONF}/vpp2.config } \
+ api-trace { on } api-segment {prefix xtr2}
+
+ if [ "$1" != "no_odl" ] ; then
+ post_curl "add-mapping" ${ODL_CONFIG_FILE1}
+ post_curl "add-mapping" ${ODL_CONFIG_FILE2}
+ fi
+
+ set_arp
+}
+