Refactor IP input checks for re-use at MPLS disposition
[vpp.git] / test / test_mpls.py
index 700b709..d265e85 100644 (file)
@@ -6,7 +6,7 @@ import socket
 from framework import VppTestCase, VppTestRunner
 from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
     VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
-    MRouteItfFlags, MRouteEntryFlags
+    MRouteItfFlags, MRouteEntryFlags, DpoProto, VppIpTable, VppMplsTable
 from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
 
 from scapy.packet import Raw
@@ -16,6 +16,38 @@ from scapy.layers.inet6 import IPv6
 from scapy.contrib.mpls import MPLS
 
 
+def verify_filter(capture, sent):
+    if not len(capture) == len(sent):
+        # filter out any IPv6 RAs from the capture
+        for p in capture:
+            if p.haslayer(IPv6):
+                capture.remove(p)
+    return capture
+
+
+def verify_mpls_stack(tst, rx, mpls_labels, ttl=255, num=0):
+    # the rx'd packet has the MPLS label popped
+    eth = rx[Ether]
+    tst.assertEqual(eth.type, 0x8847)
+
+    rx_mpls = rx[MPLS]
+
+    for ii in range(len(mpls_labels)):
+        tst.assertEqual(rx_mpls.label, mpls_labels[ii])
+        tst.assertEqual(rx_mpls.cos, 0)
+        if ii == num:
+            tst.assertEqual(rx_mpls.ttl, ttl)
+        else:
+            tst.assertEqual(rx_mpls.ttl, 255)
+        if ii == len(mpls_labels) - 1:
+            tst.assertEqual(rx_mpls.s, 1)
+        else:
+            # not end of stack
+            tst.assertEqual(rx_mpls.s, 0)
+            # pop the label to expose the next
+            rx_mpls = rx_mpls[MPLS].payload
+
+
 class TestMPLS(VppTestCase):
     """ MPLS Test Case """
 
@@ -28,9 +60,23 @@ class TestMPLS(VppTestCase):
         # setup both interfaces
         # assign them different tables.
         table_id = 0
+        self.tables = []
+
+        tbl = VppMplsTable(self, 0)
+        tbl.add_vpp_config()
+        self.tables.append(tbl)
 
         for i in self.pg_interfaces:
             i.admin_up()
+
+            if table_id != 0:
+                tbl = VppIpTable(self, table_id)
+                tbl.add_vpp_config()
+                self.tables.append(tbl)
+                tbl = VppIpTable(self, table_id, is_ip6=1)
+                tbl.add_vpp_config()
+                self.tables.append(tbl)
+
             i.set_table_ip4(table_id)
             i.set_table_ip6(table_id)
             i.config_ip4()
@@ -41,12 +87,15 @@ class TestMPLS(VppTestCase):
             table_id += 1
 
     def tearDown(self):
-        super(TestMPLS, self).tearDown()
         for i in self.pg_interfaces:
             i.unconfig_ip4()
             i.unconfig_ip6()
             i.ip6_disable()
+            i.set_table_ip4(0)
+            i.set_table_ip6(0)
+            i.disable_mpls()
             i.admin_down()
+        super(TestMPLS, self).tearDown()
 
     # the default of 64 matches the IP packet TTL default
     def create_stream_labelled_ip4(
@@ -57,6 +106,7 @@ class TestMPLS(VppTestCase):
             ping=0,
             ip_itf=None,
             dst_ip=None,
+            chksum=None,
             n=257):
         self.reset_packet_infos()
         pkts = []
@@ -84,6 +134,8 @@ class TestMPLS(VppTestCase):
                             dst=ip_itf.local_ip4) /
                      ICMP())
 
+            if chksum:
+                p[IP].chksum = chksum
             info.data = p.copy()
             pkts.append(p)
         return pkts
@@ -102,7 +154,10 @@ class TestMPLS(VppTestCase):
             pkts.append(p)
         return pkts
 
-    def create_stream_labelled_ip6(self, src_if, mpls_label, mpls_ttl):
+    def create_stream_labelled_ip6(self, src_if, mpls_label, mpls_ttl,
+                                   dst_ip=None, hlim=64):
+        if dst_ip is None:
+            dst_ip = src_if.remote_ip6
         self.reset_packet_infos()
         pkts = []
         for i in range(0, 257):
@@ -110,25 +165,16 @@ class TestMPLS(VppTestCase):
             payload = self.info_to_payload(info)
             p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
                  MPLS(label=mpls_label, ttl=mpls_ttl) /
-                 IPv6(src=src_if.remote_ip6, dst=src_if.remote_ip6) /
+                 IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) /
                  UDP(sport=1234, dport=1234) /
                  Raw(payload))
             info.data = p.copy()
             pkts.append(p)
         return pkts
 
-    @staticmethod
-    def verify_filter(capture, sent):
-        if not len(capture) == len(sent):
-            # filter out any IPv6 RAs from the capture
-            for p in capture:
-                if p.haslayer(IPv6):
-                    capture.remove(p)
-        return capture
-
     def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0):
         try:
-            capture = self.verify_filter(capture, sent)
+            capture = verify_filter(capture, sent)
 
             self.assertEqual(len(capture), len(sent))
 
@@ -155,33 +201,10 @@ class TestMPLS(VppTestCase):
         except:
             raise
 
-    def verify_mpls_stack(self, rx, mpls_labels, ttl=255, num=0):
-        # the rx'd packet has the MPLS label popped
-        eth = rx[Ether]
-        self.assertEqual(eth.type, 0x8847)
-
-        rx_mpls = rx[MPLS]
-
-        for ii in range(len(mpls_labels)):
-            self.assertEqual(rx_mpls.label, mpls_labels[ii])
-            self.assertEqual(rx_mpls.cos, 0)
-            if ii == num:
-                self.assertEqual(rx_mpls.ttl, ttl)
-            else:
-                self.assertEqual(rx_mpls.ttl, 255)
-
-            if ii == len(mpls_labels) - 1:
-                self.assertEqual(rx_mpls.s, 1)
-            else:
-                # not end of stack
-                self.assertEqual(rx_mpls.s, 0)
-                # pop the label to expose the next
-                rx_mpls = rx_mpls[MPLS].payload
-
     def verify_capture_labelled_ip4(self, src_if, capture, sent,
                                     mpls_labels):
         try:
-            capture = self.verify_filter(capture, sent)
+            capture = verify_filter(capture, sent)
 
             self.assertEqual(len(capture), len(sent))
 
@@ -192,8 +215,8 @@ class TestMPLS(VppTestCase):
                 rx_ip = rx[IP]
 
                 # the MPLS TTL is copied from the IP
-                self.verify_mpls_stack(
-                    rx, mpls_labels, rx_ip.ttl, len(mpls_labels) - 1)
+                verify_mpls_stack(self, rx, mpls_labels, rx_ip.ttl,
+                                  len(mpls_labels) - 1)
 
                 self.assertEqual(rx_ip.src, tx_ip.src)
                 self.assertEqual(rx_ip.dst, tx_ip.dst)
@@ -203,9 +226,12 @@ class TestMPLS(VppTestCase):
         except:
             raise
 
-    def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels):
+    def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels,
+                                    ttl=255, top=None):
+        if top is None:
+            top = len(mpls_labels) - 1
         try:
-            capture = self.verify_filter(capture, sent)
+            capture = verify_filter(capture, sent)
 
             self.assertEqual(len(capture), len(sent))
 
@@ -216,8 +242,7 @@ class TestMPLS(VppTestCase):
                 rx_ip = rx[IP]
 
                 # the MPLS TTL is 255 since it enters a new tunnel
-                self.verify_mpls_stack(
-                    rx, mpls_labels, 255, len(mpls_labels) - 1)
+                verify_mpls_stack(self, rx, mpls_labels, ttl, top)
 
                 self.assertEqual(rx_ip.src, tx_ip.src)
                 self.assertEqual(rx_ip.dst, tx_ip.dst)
@@ -230,13 +255,13 @@ class TestMPLS(VppTestCase):
     def verify_capture_labelled(self, src_if, capture, sent,
                                 mpls_labels, ttl=254, num=0):
         try:
-            capture = self.verify_filter(capture, sent)
+            capture = verify_filter(capture, sent)
 
             self.assertEqual(len(capture), len(sent))
 
             for i in range(len(capture)):
                 rx = capture[i]
-                self.verify_mpls_stack(rx, mpls_labels, ttl, num)
+                verify_mpls_stack(self, rx, mpls_labels, ttl, num)
         except:
             raise
 
@@ -617,6 +642,26 @@ class TestMPLS(VppTestCase):
         rx = self.pg0.get_capture()
         self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46])
 
+        #
+        # add a labelled route through the new tunnel
+        #
+        route_10_0_0_4 = VppIpRoute(self, "10.0.0.4", 32,
+                                    [VppRoutePath("0.0.0.0",
+                                                  mpls_tun._sw_if_index,
+                                                  labels=[33])])
+        route_10_0_0_4.add_vpp_config()
+
+        self.vapi.cli("clear trace")
+        tx = self.create_stream_ip4(self.pg0, "10.0.0.4")
+        self.pg0.add_stream(tx)
+
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx = self.pg0.get_capture()
+        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46, 33],
+                                         ttl=63, top=2)
+
     def test_v4_exp_null(self):
         """ MPLS V4 Explicit NULL test """
 
@@ -745,6 +790,26 @@ class TestMPLS(VppTestCase):
         rx = self.pg1.get_capture(packet_count)
         self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)
 
+        #
+        # Double pop
+        #
+        route_36_neos = VppMplsRoute(self, 36, 0,
+                                     [VppRoutePath("0.0.0.0",
+                                                   0xffffffff)])
+        route_36_neos.add_vpp_config()
+
+        self.vapi.cli("clear trace")
+        tx = self.create_stream_labelled_ip4(self.pg0, [36, 35],
+                                             ping=1, ip_itf=self.pg1)
+        self.pg0.add_stream(tx)
+
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx = self.pg1.get_capture(len(tx))
+        self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)
+
+        route_36_neos.remove_vpp_config()
         route_35_eos.remove_vpp_config()
         route_34_eos.remove_vpp_config()
 
@@ -905,8 +970,8 @@ class TestMPLS(VppTestCase):
         rx = self.pg3.get_capture(257)
         self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [43])
 
-    def test_mcast_tail(self):
-        """ MPLS Multicast Tail """
+    def test_mcast_ip4_tail(self):
+        """ MPLS IPv4 Multicast Tail """
 
         #
         # Add a multicast route that will forward the traffic
@@ -963,6 +1028,14 @@ class TestMPLS(VppTestCase):
         rx = self.pg1.get_capture(257)
         self.verify_capture_ip4(self.pg1, rx, tx)
 
+        #
+        # disposed packets have an invalid IPv4 checkusm
+        #
+        tx = self.create_stream_labelled_ip4(self.pg0, [34],
+                                             dst_ip="232.1.1.1", n=65,
+                                             chksum=1)
+        self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")
+
         #
         # set the RPF-ID of the enrtry to not match the input packet's
         #
@@ -970,6 +1043,80 @@ class TestMPLS(VppTestCase):
         tx = self.create_stream_labelled_ip4(self.pg0, [34],
                                              dst_ip="232.1.1.1")
         self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
+        self.logger.error(self.vapi.cli("sh error"))
+
+    def test_mcast_ip6_tail(self):
+        """ MPLS IPv6 Multicast Tail """
+
+        #
+        # Add a multicast route that will forward the traffic
+        # post-disposition
+        #
+        route_ff = VppIpMRoute(
+            self,
+            "::",
+            "ff01::1", 32,
+            MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+            table_id=1,
+            paths=[VppMRoutePath(self.pg1.sw_if_index,
+                                 MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
+            is_ip6=1)
+        route_ff.add_vpp_config()
+
+        #
+        # An interface receive label that maps traffic to RX on interface
+        # pg1
+        # by injecting the packet in on pg0, which is in table 0
+        # doing an rpf-id  and matching a route in table 1
+        # if the packet egresses, then we must have matched the route in
+        # table 1
+        #
+        route_34_eos = VppMplsRoute(
+            self, 34, 1,
+            [VppRoutePath("::",
+                          self.pg1.sw_if_index,
+                          nh_table_id=1,
+                          rpf_id=55,
+                          proto=DpoProto.DPO_PROTO_IP6)],
+            is_multicast=1)
+
+        route_34_eos.add_vpp_config()
+
+        #
+        # Drop due to interface lookup miss
+        #
+        tx = self.create_stream_labelled_ip6(self.pg0, [34], 255,
+                                             dst_ip="ff01::1")
+
+        #
+        # set the RPF-ID of the enrtry to match the input packet's
+        #
+        route_ff.update_rpf_id(55)
+
+        tx = self.create_stream_labelled_ip6(self.pg0, [34], 255,
+                                             dst_ip="ff01::1")
+        self.pg0.add_stream(tx)
+
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx = self.pg1.get_capture(257)
+        self.verify_capture_ip6(self.pg1, rx, tx)
+
+        #
+        # disposed packets have hop-limit = 1
+        #
+        tx = self.create_stream_labelled_ip6(self.pg0, [34], 255,
+                                             dst_ip="ff01::1", hlim=1)
+        self.send_and_assert_no_replies(self.pg0, tx, "Hop Limt Expired")
+
+        #
+        # set the RPF-ID of the enrtry to not match the input packet's
+        #
+        route_ff.update_rpf_id(56)
+        tx = self.create_stream_labelled_ip6(self.pg0, [34], 225,
+                                             dst_ip="ff01::1")
+        self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
 
 
 class TestMPLSDisabled(VppTestCase):
@@ -981,6 +1128,9 @@ class TestMPLSDisabled(VppTestCase):
         # create 2 pg interfaces
         self.create_pg_interfaces(range(2))
 
+        self.tbl = VppMplsTable(self, 0)
+        self.tbl.add_vpp_config()
+
         # PG0 is MPLS enalbed
         self.pg0.admin_up()
         self.pg0.config_ip4()
@@ -991,11 +1141,13 @@ class TestMPLSDisabled(VppTestCase):
         self.pg1.admin_up()
 
     def tearDown(self):
-        super(TestMPLSDisabled, self).tearDown()
         for i in self.pg_interfaces:
             i.unconfig_ip4()
             i.admin_down()
 
+        self.pg0.disable_mpls()
+        super(TestMPLSDisabled, self).tearDown()
+
     def send_and_assert_no_replies(self, intf, pkts, remark):
         intf.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
@@ -1054,5 +1206,585 @@ class TestMPLSDisabled(VppTestCase):
         self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")
 
 
+class TestMPLSPIC(VppTestCase):
+    """ MPLS PIC edge convergence """
+
+    def setUp(self):
+        super(TestMPLSPIC, self).setUp()
+
+        # create 2 pg interfaces
+        self.create_pg_interfaces(range(4))
+
+        mpls_tbl = VppMplsTable(self, 0)
+        mpls_tbl.add_vpp_config()
+        tbl4 = VppIpTable(self, 1)
+        tbl4.add_vpp_config()
+        tbl6 = VppIpTable(self, 1, is_ip6=1)
+        tbl6.add_vpp_config()
+
+        # core links
+        self.pg0.admin_up()
+        self.pg0.config_ip4()
+        self.pg0.resolve_arp()
+        self.pg0.enable_mpls()
+        self.pg1.admin_up()
+        self.pg1.config_ip4()
+        self.pg1.resolve_arp()
+        self.pg1.enable_mpls()
+
+        # VRF (customer facing) link
+        self.pg2.admin_up()
+        self.pg2.set_table_ip4(1)
+        self.pg2.config_ip4()
+        self.pg2.resolve_arp()
+        self.pg2.set_table_ip6(1)
+        self.pg2.config_ip6()
+        self.pg2.resolve_ndp()
+        self.pg3.admin_up()
+        self.pg3.set_table_ip4(1)
+        self.pg3.config_ip4()
+        self.pg3.resolve_arp()
+        self.pg3.set_table_ip6(1)
+        self.pg3.config_ip6()
+        self.pg3.resolve_ndp()
+
+    def tearDown(self):
+        self.pg0.disable_mpls()
+        self.pg1.disable_mpls()
+        for i in self.pg_interfaces:
+            i.unconfig_ip4()
+            i.unconfig_ip6()
+            i.set_table_ip4(0)
+            i.set_table_ip6(0)
+            i.admin_down()
+        super(TestMPLSPIC, self).tearDown()
+
+    def test_mpls_ibgp_pic(self):
+        """ MPLS iBGP PIC edge convergence
+
+        1) setup many iBGP VPN routes via a pair of iBGP peers.
+        2) Check EMCP forwarding to these peers
+        3) withdraw the IGP route to one of these peers.
+        4) check forwarding continues to the remaining peer
+        """
+
+        #
+        # IGP+LDP core routes
+        #
+        core_10_0_0_45 = VppIpRoute(self, "10.0.0.45", 32,
+                                    [VppRoutePath(self.pg0.remote_ip4,
+                                                  self.pg0.sw_if_index,
+                                                  labels=[45])])
+        core_10_0_0_45.add_vpp_config()
+
+        core_10_0_0_46 = VppIpRoute(self, "10.0.0.46", 32,
+                                    [VppRoutePath(self.pg1.remote_ip4,
+                                                  self.pg1.sw_if_index,
+                                                  labels=[46])])
+        core_10_0_0_46.add_vpp_config()
+
+        #
+        # Lot's of VPN routes. We need more the 64 so VPP will build
+        # the fast convergence indirection
+        #
+        vpn_routes = []
+        pkts = []
+        for ii in range(64):
+            dst = "192.168.1.%d" % ii
+            vpn_routes.append(VppIpRoute(self, dst, 32,
+                                         [VppRoutePath("10.0.0.45",
+                                                       0xffffffff,
+                                                       labels=[145],
+                                                       is_resolve_host=1),
+                                          VppRoutePath("10.0.0.46",
+                                                       0xffffffff,
+                                                       labels=[146],
+                                                       is_resolve_host=1)],
+                                         table_id=1))
+            vpn_routes[ii].add_vpp_config()
+
+            pkts.append(Ether(dst=self.pg2.local_mac,
+                              src=self.pg2.remote_mac) /
+                        IP(src=self.pg2.remote_ip4, dst=dst) /
+                        UDP(sport=1234, dport=1234) /
+                        Raw('\xa5' * 100))
+
+        #
+        # Send the packet stream (one pkt to each VPN route)
+        #  - expect a 50-50 split of the traffic
+        #
+        self.pg2.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg0._get_capture(1)
+        rx1 = self.pg1._get_capture(1)
+
+        # not testig the LB hashing algorithm so we're not concerned
+        # with the split ratio, just as long as neither is 0
+        self.assertNotEqual(0, len(rx0))
+        self.assertNotEqual(0, len(rx1))
+
+        #
+        # use a test CLI command to stop the FIB walk process, this
+        # will prevent the FIB converging the VPN routes and thus allow
+        # us to probe the interim (psot-fail, pre-converge) state
+        #
+        self.vapi.ppcli("test fib-walk-process disable")
+
+        #
+        # Withdraw one of the IGP routes
+        #
+        core_10_0_0_46.remove_vpp_config()
+
+        #
+        # now all packets should be forwarded through the remaining peer
+        #
+        self.vapi.ppcli("clear trace")
+        self.pg2.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg0.get_capture(len(pkts))
+
+        #
+        # enable the FIB walk process to converge the FIB
+        #
+        self.vapi.ppcli("test fib-walk-process enable")
+
+        #
+        # packets should still be forwarded through the remaining peer
+        #
+        self.pg2.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg0.get_capture(64)
+
+        #
+        # Add the IGP route back and we return to load-balancing
+        #
+        core_10_0_0_46.add_vpp_config()
+
+        self.pg2.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg0._get_capture(1)
+        rx1 = self.pg1._get_capture(1)
+        self.assertNotEqual(0, len(rx0))
+        self.assertNotEqual(0, len(rx1))
+
+    def test_mpls_ebgp_pic(self):
+        """ MPLS eBGP PIC edge convergence
+
+        1) setup many eBGP VPN routes via a pair of eBGP peers
+        2) Check EMCP forwarding to these peers
+        3) withdraw one eBGP path - expect LB across remaining eBGP
+        """
+
+        #
+        # Lot's of VPN routes. We need more the 64 so VPP will build
+        # the fast convergence indirection
+        #
+        vpn_routes = []
+        vpn_bindings = []
+        pkts = []
+        for ii in range(64):
+            dst = "192.168.1.%d" % ii
+            local_label = 1600 + ii
+            vpn_routes.append(VppIpRoute(self, dst, 32,
+                                         [VppRoutePath(self.pg2.remote_ip4,
+                                                       0xffffffff,
+                                                       nh_table_id=1,
+                                                       is_resolve_attached=1),
+                                          VppRoutePath(self.pg3.remote_ip4,
+                                                       0xffffffff,
+                                                       nh_table_id=1,
+                                                       is_resolve_attached=1)],
+                                         table_id=1))
+            vpn_routes[ii].add_vpp_config()
+
+            vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
+                                              ip_table_id=1))
+            vpn_bindings[ii].add_vpp_config()
+
+            pkts.append(Ether(dst=self.pg0.local_mac,
+                              src=self.pg0.remote_mac) /
+                        MPLS(label=local_label, ttl=64) /
+                        IP(src=self.pg0.remote_ip4, dst=dst) /
+                        UDP(sport=1234, dport=1234) /
+                        Raw('\xa5' * 100))
+
+        self.pg0.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg2._get_capture(1)
+        rx1 = self.pg3._get_capture(1)
+        self.assertNotEqual(0, len(rx0))
+        self.assertNotEqual(0, len(rx1))
+
+        #
+        # use a test CLI command to stop the FIB walk process, this
+        # will prevent the FIB converging the VPN routes and thus allow
+        # us to probe the interim (psot-fail, pre-converge) state
+        #
+        self.vapi.ppcli("test fib-walk-process disable")
+
+        #
+        # withdraw the connected prefix on the interface.
+        #
+        self.pg2.unconfig_ip4()
+
+        #
+        # now all packets should be forwarded through the remaining peer
+        #
+        self.pg0.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg3.get_capture(len(pkts))
+
+        #
+        # enable the FIB walk process to converge the FIB
+        #
+        self.vapi.ppcli("test fib-walk-process enable")
+        self.pg0.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg3.get_capture(len(pkts))
+
+        #
+        # put the connecteds back
+        #
+        self.pg2.config_ip4()
+
+        self.pg0.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg2._get_capture(1)
+        rx1 = self.pg3._get_capture(1)
+        self.assertNotEqual(0, len(rx0))
+        self.assertNotEqual(0, len(rx1))
+
+    def test_mpls_v6_ebgp_pic(self):
+        """ MPLSv6 eBGP PIC edge convergence
+
+        1) setup many eBGP VPNv6 routes via a pair of eBGP peers
+        2) Check EMCP forwarding to these peers
+        3) withdraw one eBGP path - expect LB across remaining eBGP
+        """
+
+        #
+        # Lot's of VPN routes. We need more the 64 so VPP will build
+        # the fast convergence indirection
+        #
+        vpn_routes = []
+        vpn_bindings = []
+        pkts = []
+        for ii in range(64):
+            dst = "3000::%d" % ii
+            local_label = 1600 + ii
+            vpn_routes.append(VppIpRoute(
+                self, dst, 128,
+                [VppRoutePath(self.pg2.remote_ip6,
+                              0xffffffff,
+                              nh_table_id=1,
+                              is_resolve_attached=1,
+                              proto=DpoProto.DPO_PROTO_IP6),
+                 VppRoutePath(self.pg3.remote_ip6,
+                              0xffffffff,
+                              nh_table_id=1,
+                              proto=DpoProto.DPO_PROTO_IP6,
+                              is_resolve_attached=1)],
+                table_id=1,
+                is_ip6=1))
+            vpn_routes[ii].add_vpp_config()
+
+            vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
+                                              ip_table_id=1,
+                                              is_ip6=1))
+            vpn_bindings[ii].add_vpp_config()
+
+            pkts.append(Ether(dst=self.pg0.local_mac,
+                              src=self.pg0.remote_mac) /
+                        MPLS(label=local_label, ttl=64) /
+                        IPv6(src=self.pg0.remote_ip6, dst=dst) /
+                        UDP(sport=1234, dport=1234) /
+                        Raw('\xa5' * 100))
+
+        self.pg0.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg2._get_capture(1)
+        rx1 = self.pg3._get_capture(1)
+        self.assertNotEqual(0, len(rx0))
+        self.assertNotEqual(0, len(rx1))
+
+        #
+        # use a test CLI command to stop the FIB walk process, this
+        # will prevent the FIB converging the VPN routes and thus allow
+        # us to probe the interim (psot-fail, pre-converge) state
+        #
+        self.vapi.ppcli("test fib-walk-process disable")
+
+        #
+        # withdraw the connected prefix on the interface.
+        # and shutdown the interface so the ND cache is flushed.
+        #
+        self.pg2.unconfig_ip6()
+        self.pg2.admin_down()
+
+        #
+        # now all packets should be forwarded through the remaining peer
+        #
+        self.pg0.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg3.get_capture(len(pkts))
+
+        #
+        # enable the FIB walk process to converge the FIB
+        #
+        self.vapi.ppcli("test fib-walk-process enable")
+        self.pg0.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg3.get_capture(len(pkts))
+
+        #
+        # put the connecteds back
+        #
+        self.pg2.admin_up()
+        self.pg2.config_ip6()
+
+        self.pg0.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg2._get_capture(1)
+        rx1 = self.pg3._get_capture(1)
+        self.assertNotEqual(0, len(rx0))
+        self.assertNotEqual(0, len(rx1))
+
+
+class TestMPLSL2(VppTestCase):
+    """ MPLS-L2 """
+
+    def setUp(self):
+        super(TestMPLSL2, self).setUp()
+
+        # create 2 pg interfaces
+        self.create_pg_interfaces(range(2))
+
+        # create the default MPLS table
+        self.tables = []
+        tbl = VppMplsTable(self, 0)
+        tbl.add_vpp_config()
+        self.tables.append(tbl)
+
+        # use pg0 as the core facing interface
+        self.pg0.admin_up()
+        self.pg0.config_ip4()
+        self.pg0.resolve_arp()
+        self.pg0.enable_mpls()
+
+        # use the other 2 for customer facing L2 links
+        for i in self.pg_interfaces[1:]:
+            i.admin_up()
+
+    def tearDown(self):
+        for i in self.pg_interfaces[1:]:
+            i.admin_down()
+
+        self.pg0.disable_mpls()
+        self.pg0.unconfig_ip4()
+        self.pg0.admin_down()
+        super(TestMPLSL2, self).tearDown()
+
+    def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels,
+                                         ttl=255, top=None):
+        if top is None:
+            top = len(mpls_labels) - 1
+
+        capture = verify_filter(capture, sent)
+
+        self.assertEqual(len(capture), len(sent))
+
+        for i in range(len(capture)):
+            tx = sent[i]
+            rx = capture[i]
+
+            # the MPLS TTL is 255 since it enters a new tunnel
+            verify_mpls_stack(self, rx, mpls_labels, ttl, top)
+
+            tx_eth = tx[Ether]
+            rx_eth = Ether(str(rx[MPLS].payload))
+
+            self.assertEqual(rx_eth.src, tx_eth.src)
+            self.assertEqual(rx_eth.dst, tx_eth.dst)
+
+    def test_vpws(self):
+        """ Virtual Private Wire Service """
+
+        #
+        # Create an MPLS tunnel that pushes 1 label
+        #
+        mpls_tun_1 = VppMPLSTunnelInterface(self,
+                                            [VppRoutePath(self.pg0.remote_ip4,
+                                                          self.pg0.sw_if_index,
+                                                          labels=[42])],
+                                            is_l2=1)
+        mpls_tun_1.add_vpp_config()
+        mpls_tun_1.admin_up()
+
+        #
+        # Create a label entry to for 55 that does L2 input to the tunnel
+        #
+        route_55_eos = VppMplsRoute(
+            self, 55, 1,
+            [VppRoutePath("0.0.0.0",
+                          mpls_tun_1.sw_if_index,
+                          is_interface_rx=1,
+                          proto=DpoProto.DPO_PROTO_ETHERNET)])
+        route_55_eos.add_vpp_config()
+
+        #
+        # Cross-connect the tunnel with one of the customers L2 interfaces
+        #
+        self.vapi.sw_interface_set_l2_xconnect(self.pg1.sw_if_index,
+                                               mpls_tun_1.sw_if_index,
+                                               enable=1)
+        self.vapi.sw_interface_set_l2_xconnect(mpls_tun_1.sw_if_index,
+                                               self.pg1.sw_if_index,
+                                               enable=1)
+
+        #
+        # inject a packet from the core
+        #
+        pcore = (Ether(dst=self.pg0.local_mac,
+                       src=self.pg0.remote_mac) /
+                 MPLS(label=55, ttl=64) /
+                 Ether(dst="00:00:de:ad:ba:be",
+                       src="00:00:de:ad:be:ef") /
+                 IP(src="10.10.10.10", dst="11.11.11.11") /
+                 UDP(sport=1234, dport=1234) /
+                 Raw('\xa5' * 100))
+
+        self.pg0.add_stream(pcore * 65)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg1.get_capture(65)
+        tx = pcore[MPLS].payload
+
+        self.assertEqual(rx0[0][Ether].dst, tx[Ether].dst)
+        self.assertEqual(rx0[0][Ether].src, tx[Ether].src)
+
+        #
+        # Inject a packet from the custoer/L2 side
+        #
+        self.pg1.add_stream(tx * 65)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg0.get_capture(65)
+
+        self.verify_capture_tunneled_ethernet(rx0, tx*65, [42])
+
+    def test_vpls(self):
+        """ Virtual Private LAN Service """
+        #
+        # Create an L2 MPLS tunnel
+        #
+        mpls_tun = VppMPLSTunnelInterface(self,
+                                          [VppRoutePath(self.pg0.remote_ip4,
+                                                        self.pg0.sw_if_index,
+                                                        labels=[42])],
+                                          is_l2=1)
+        mpls_tun.add_vpp_config()
+        mpls_tun.admin_up()
+
+        #
+        # Create a label entry to for 55 that does L2 input to the tunnel
+        #
+        route_55_eos = VppMplsRoute(
+            self, 55, 1,
+            [VppRoutePath("0.0.0.0",
+                          mpls_tun.sw_if_index,
+                          is_interface_rx=1,
+                          proto=DpoProto.DPO_PROTO_ETHERNET)])
+        route_55_eos.add_vpp_config()
+
+        #
+        # add to tunnel to the customers bridge-domain
+        #
+        self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index,
+                                             bd_id=1)
+        self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index,
+                                             bd_id=1)
+
+        #
+        # Packet from the customer interface and from the core
+        #
+        p_cust = (Ether(dst="00:00:de:ad:ba:be",
+                        src="00:00:de:ad:be:ef") /
+                  IP(src="10.10.10.10", dst="11.11.11.11") /
+                  UDP(sport=1234, dport=1234) /
+                  Raw('\xa5' * 100))
+        p_core = (Ether(src="00:00:de:ad:ba:be",
+                        dst="00:00:de:ad:be:ef") /
+                  IP(dst="10.10.10.10", src="11.11.11.11") /
+                  UDP(sport=1234, dport=1234) /
+                  Raw('\xa5' * 100))
+
+        #
+        # The BD is learning, so send in one of each packet to learn
+        #
+        p_core_encap = (Ether(dst=self.pg0.local_mac,
+                              src=self.pg0.remote_mac) /
+                        MPLS(label=55, ttl=64) /
+                        p_core)
+
+        self.pg1.add_stream(p_cust)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+        self.pg0.add_stream(p_core_encap)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        # we've learnt this so expect it be be forwarded
+        rx0 = self.pg1.get_capture(1)
+
+        self.assertEqual(rx0[0][Ether].dst, p_core[Ether].dst)
+        self.assertEqual(rx0[0][Ether].src, p_core[Ether].src)
+
+        #
+        # now a stream in each direction
+        #
+        self.pg1.add_stream(p_cust * 65)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx0 = self.pg0.get_capture(65)
+
+        self.verify_capture_tunneled_ethernet(rx0, p_cust*65, [42])
+
+        #
+        # remove interfaces from customers bridge-domain
+        #
+        self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index,
+                                             bd_id=1,
+                                             enable=0)
+        self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index,
+                                             bd_id=1,
+                                             enable=0)
+
 if __name__ == '__main__':
     unittest.main(testRunner=VppTestRunner)