tests: fix handling failed test case
[vpp.git] / test / test_mpls.py
index 4a1c663..a568f84 100644 (file)
@@ -1,23 +1,35 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 import unittest
 import socket
 
+from framework import tag_fixme_vpp_workers
 from framework import VppTestCase, VppTestRunner
-from vpp_ip import DpoProto
+from vpp_ip import DpoProto, INVALID_INDEX
 from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
     VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
-    MRouteItfFlags, MRouteEntryFlags, VppIpTable, VppMplsTable, \
-    VppMplsLabel, MplsLspMode, find_mpls_route
+    VppIpTable, VppMplsTable, \
+    VppMplsLabel, MplsLspMode, find_mpls_route, \
+    FibPathProto, FibPathType, FibPathFlags, VppMplsLabel, MplsLspMode
 from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
+from vpp_papi import VppEnum
 
 import scapy.compat
 from scapy.packet import Raw
-from scapy.layers.l2 import Ether
-from scapy.layers.inet import IP, UDP, ICMP
-from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded
+from scapy.layers.l2 import Ether, ARP
+from scapy.layers.inet import IP, UDP, ICMP, icmptypes, icmpcodes
+from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded, ICMPv6EchoRequest, \
+    ICMPv6PacketTooBig
 from scapy.contrib.mpls import MPLS
 
+NUM_PKTS = 67
+
+# scapy removed these attributes.
+# we asked that they be restored: https://github.com/secdev/scapy/pull/1878
+# semantic names have more meaning than numbers. so here they are.
+ARP.who_has = 1
+ARP.is_at = 2
+
 
 def verify_filter(capture, sent):
     if not len(capture) == len(sent):
@@ -49,6 +61,7 @@ def verify_mpls_stack(tst, rx, mpls_labels):
             rx_mpls = rx_mpls[MPLS].payload
 
 
+@tag_fixme_vpp_workers
 class TestMPLS(VppTestCase):
     """ MPLS Test Case """
 
@@ -99,7 +112,6 @@ class TestMPLS(VppTestCase):
         for i in self.pg_interfaces:
             i.unconfig_ip4()
             i.unconfig_ip6()
-            i.ip6_disable()
             i.set_table_ip4(0)
             i.set_table_ip6(0)
             i.disable_mpls()
@@ -151,7 +163,8 @@ class TestMPLS(VppTestCase):
             pkts.append(p)
         return pkts
 
-    def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0):
+    def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64,
+                          ip_dscp=0, payload_size=None):
         self.reset_packet_infos()
         pkts = []
         for i in range(0, 257):
@@ -163,6 +176,8 @@ class TestMPLS(VppTestCase):
                  UDP(sport=1234, dport=1234) /
                  Raw(payload))
             info.data = p.copy()
+            if payload_size:
+                self.extend_packet(p, payload_size)
             pkts.append(p)
         return pkts
 
@@ -182,7 +197,8 @@ class TestMPLS(VppTestCase):
         return pkts
 
     def create_stream_labelled_ip6(self, src_if, mpls_labels,
-                                   hlim=64, dst_ip=None):
+                                   hlim=64, dst_ip=None,
+                                   ping=0, ip_itf=None):
         if dst_ip is None:
             dst_ip = src_if.remote_ip6
         self.reset_packet_infos()
@@ -194,9 +210,14 @@ class TestMPLS(VppTestCase):
             for l in mpls_labels:
                 p = p / MPLS(label=l.value, ttl=l.ttl, cos=l.exp)
 
-            p = p / (IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) /
-                     UDP(sport=1234, dport=1234) /
-                     Raw(payload))
+            if ping:
+                p = p / (IPv6(src=ip_itf.remote_ip6,
+                              dst=ip_itf.local_ip6) /
+                         ICMPv6EchoRequest())
+            else:
+                p = p / (IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) /
+                         UDP(sport=1234, dport=1234) /
+                         Raw(payload))
             info.data = p.copy()
             pkts.append(p)
         return pkts
@@ -323,7 +344,8 @@ class TestMPLS(VppTestCase):
             raise
 
     def verify_capture_ip6(self, src_if, capture, sent,
-                           ip_hlim=None, ip_dscp=0):
+                           ip_hlim=None, ip_dscp=0,
+                           ping_resp=0):
         try:
             self.assertEqual(len(capture), len(sent))
 
@@ -338,21 +360,25 @@ class TestMPLS(VppTestCase):
                 tx_ip = tx[IPv6]
                 rx_ip = rx[IPv6]
 
-                self.assertEqual(rx_ip.src, tx_ip.src)
-                self.assertEqual(rx_ip.dst, tx_ip.dst)
-                self.assertEqual(rx_ip.tc,  ip_dscp)
-                # IP processing post pop has decremented the TTL
-                if not ip_hlim:
-                    self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
+                if not ping_resp:
+                    self.assertEqual(rx_ip.src, tx_ip.src)
+                    self.assertEqual(rx_ip.dst, tx_ip.dst)
+                    self.assertEqual(rx_ip.tc,  ip_dscp)
+                    # IP processing post pop has decremented the TTL
+                    if not ip_hlim:
+                        self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
+                    else:
+                        self.assertEqual(rx_ip.hlim, ip_hlim)
                 else:
-                    self.assertEqual(rx_ip.hlim, ip_hlim)
-
+                    self.assertEqual(rx_ip.src, tx_ip.dst)
+                    self.assertEqual(rx_ip.dst, tx_ip.src)
         except:
             raise
 
     def verify_capture_ip6_icmp(self, src_if, capture, sent):
         try:
-            self.assertEqual(len(capture), len(sent))
+            # rate limited ICMP
+            self.assertTrue(len(capture) <= len(sent))
 
             for i in range(len(capture)):
                 tx = sent[i]
@@ -376,6 +402,55 @@ class TestMPLS(VppTestCase):
         except:
             raise
 
+    def verify_capture_fragmented_labelled_ip4(self, src_if, capture, sent,
+                                               mpls_labels, ip_ttl=None):
+        try:
+            capture = verify_filter(capture, sent)
+
+            for i in range(len(capture)):
+                tx = sent[0]
+                rx = capture[i]
+                tx_ip = tx[IP]
+                rx_ip = rx[IP]
+
+                verify_mpls_stack(self, rx, mpls_labels)
+
+                self.assertEqual(rx_ip.src, tx_ip.src)
+                self.assertEqual(rx_ip.dst, tx_ip.dst)
+                if not ip_ttl:
+                    # IP processing post pop has decremented the TTL
+                    self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+                else:
+                    self.assertEqual(rx_ip.ttl, ip_ttl)
+
+        except:
+            raise
+
+    def verify_capture_fragmented_labelled_ip6(self, src_if, capture, sent,
+                                               mpls_labels, ip_ttl=None):
+        try:
+            capture = verify_filter(capture, sent)
+
+            for i in range(len(capture)):
+                tx = sent[0]
+                rx = capture[i]
+                tx_ip = tx[IPv6]
+                rx.show()
+                rx_ip = IPv6(rx[MPLS].payload)
+                rx_ip.show()
+
+                verify_mpls_stack(self, rx, mpls_labels)
+
+                self.assertEqual(rx_ip.src, tx_ip.src)
+                self.assertEqual(rx_ip.dst, tx_ip.dst)
+                if not ip_ttl:
+                    # IP processing post pop has decremented the hop-limit
+                    self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
+                else:
+                    self.assertEqual(rx_ip.hlim, ip_ttl)
+        except:
+            raise
+
     def test_swap(self):
         """ MPLS label swap tests """
 
@@ -496,8 +571,8 @@ class TestMPLS(VppTestCase):
             self, 333, 1,
             [VppRoutePath(self.pg0.remote_ip6,
                           self.pg0.sw_if_index,
-                          labels=[],
-                          proto=DpoProto.DPO_PROTO_IP6)])
+                          labels=[])],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
         route_333_eos.add_vpp_config()
 
         tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(333)])
@@ -511,7 +586,7 @@ class TestMPLS(VppTestCase):
                                              [VppMplsLabel(333, ttl=64)],
                                              dst_ip=self.pg1.remote_ip6,
                                              hlim=1)
-        rx = self.send_and_expect(self.pg0, tx, self.pg0)
+        rx = self.send_and_expect_some(self.pg0, tx, self.pg0)
         self.verify_capture_ip6_icmp(self.pg0, rx, tx)
 
         #
@@ -521,8 +596,8 @@ class TestMPLS(VppTestCase):
             self, 334, 1,
             [VppRoutePath(self.pg0.remote_ip6,
                           self.pg0.sw_if_index,
-                          labels=[VppMplsLabel(3)],
-                          proto=DpoProto.DPO_PROTO_IP6)])
+                          labels=[VppMplsLabel(3)])],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
         route_334_eos.add_vpp_config()
 
         tx = self.create_stream_labelled_ip6(self.pg0,
@@ -537,8 +612,8 @@ class TestMPLS(VppTestCase):
             self, 335, 1,
             [VppRoutePath(self.pg0.remote_ip6,
                           self.pg0.sw_if_index,
-                          labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)],
-                          proto=DpoProto.DPO_PROTO_IP6)])
+                          labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
         route_335_eos.add_vpp_config()
 
         tx = self.create_stream_labelled_ip6(
@@ -553,7 +628,7 @@ class TestMPLS(VppTestCase):
         tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(334)],
                                              dst_ip=self.pg1.remote_ip6,
                                              hlim=0)
-        rx = self.send_and_expect(self.pg0, tx, self.pg0)
+        rx = self.send_and_expect_some(self.pg0, tx, self.pg0)
         self.verify_capture_ip6_icmp(self.pg0, rx, tx)
 
         #
@@ -584,6 +659,7 @@ class TestMPLS(VppTestCase):
                                                   labels=[VppMplsLabel(44),
                                                           VppMplsLabel(45)])])
         route_34_eos.add_vpp_config()
+        self.logger.info(self.vapi.cli("sh mpls fib 34"))
 
         tx = self.create_stream_labelled_ip4(self.pg0,
                                              [VppMplsLabel(34, ttl=3)])
@@ -773,10 +849,8 @@ class TestMPLS(VppTestCase):
             self, "2001::3", 128,
             [VppRoutePath(self.pg0.remote_ip6,
                           self.pg0.sw_if_index,
-                          proto=DpoProto.DPO_PROTO_IP6,
                           labels=[VppMplsLabel(32,
-                                               mode=MplsLspMode.UNIFORM)])],
-            is_ip6=1)
+                                               mode=MplsLspMode.UNIFORM)])])
         route_2001_3.add_vpp_config()
 
         tx = self.create_stream_ip6(self.pg0, "2001::3",
@@ -849,11 +923,75 @@ class TestMPLS(VppTestCase):
         route_10_0_0_2.remove_vpp_config()
         route_10_0_0_1.remove_vpp_config()
 
+    def test_imposition_fragmentation(self):
+        """ MPLS label imposition fragmentation test """
+
+        #
+        # Add a ipv4 non-recursive route with a single out label
+        #
+        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
+                                    [VppRoutePath(self.pg0.remote_ip4,
+                                                  self.pg0.sw_if_index,
+                                                  labels=[VppMplsLabel(32)])])
+        route_10_0_0_1.add_vpp_config()
+        route_1000_1 = VppIpRoute(self, "1000::1", 128,
+                                  [VppRoutePath(self.pg0.remote_ip6,
+                                                self.pg0.sw_if_index,
+                                                labels=[VppMplsLabel(32)])])
+        route_1000_1.add_vpp_config()
+
+        #
+        # a stream that matches the route for 10.0.0.1
+        # PG0 is in the default table
+        #
+        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
+        for i in range(0, 257):
+            self.extend_packet(tx[i], 10000)
+
+        #
+        # 5 fragments per packet (257*5=1285)
+        #
+        rx = self.send_and_expect(self.pg0, tx, self.pg0, 1285)
+        self.verify_capture_fragmented_labelled_ip4(self.pg0, rx, tx,
+                                                    [VppMplsLabel(32)])
+
+        # packets with DF bit set generate ICMP
+        for t in tx:
+            t[IP].flags = 'DF'
+        rxs = self.send_and_expect_some(self.pg0, tx, self.pg0)
+
+        for rx in rxs:
+            self.assertEqual(icmptypes[rx[ICMP].type], "dest-unreach")
+            self.assertEqual(icmpcodes[rx[ICMP].type][rx[ICMP].code],
+                             "fragmentation-needed")
+            # the link MTU is 9000, the MPLS over head is 4 bytes
+            self.assertEqual(rx[ICMP].nexthopmtu, 9000 - 4)
+
+        self.assertEqual(self.statistics.get_err_counter(
+            "/err/mpls-frag/can't fragment this packet"),
+                         len(tx))
+        #
+        # a stream that matches the route for 1000::1/128
+        # PG0 is in the default table
+        #
+        tx = self.create_stream_ip6(self.pg0, "1000::1")
+        for i in range(0, 257):
+            self.extend_packet(tx[i], 10000)
+
+        rxs = self.send_and_expect_some(self.pg0, tx, self.pg0)
+        for rx in rxs:
+            self.assertEqual(rx[ICMPv6PacketTooBig].mtu, 9000 - 4)
+
+        #
+        # cleanup
+        #
+        route_10_0_0_1.remove_vpp_config()
+
     def test_tunnel_pipe(self):
         """ MPLS Tunnel Tests - Pipe """
 
         #
-        # Create a tunnel with a single out label
+        # Create a tunnel with two out labels
         #
         mpls_tun = VppMPLSTunnelInterface(
             self,
@@ -906,6 +1044,38 @@ class TestMPLS(VppTestCase):
                                           VppMplsLabel(46),
                                           VppMplsLabel(33, ttl=255)])
 
+        #
+        # change tunnel's MTU to a low value
+        #
+        mpls_tun.set_l3_mtu(1200)
+
+        # send IP into the tunnel to be fragmented
+        tx = self.create_stream_ip4(self.pg0, "10.0.0.3",
+                                    payload_size=1500)
+        rx = self.send_and_expect(self.pg0, tx, self.pg0, len(tx)*2)
+
+        fake_tx = []
+        for p in tx:
+            fake_tx.append(p)
+            fake_tx.append(p)
+        self.verify_capture_tunneled_ip4(self.pg0, rx, fake_tx,
+                                         [VppMplsLabel(44),
+                                          VppMplsLabel(46)])
+
+        # send MPLS into the tunnel to be fragmented
+        tx = self.create_stream_ip4(self.pg0, "10.0.0.4",
+                                    payload_size=1500)
+        rx = self.send_and_expect(self.pg0, tx, self.pg0, len(tx)*2)
+
+        fake_tx = []
+        for p in tx:
+            fake_tx.append(p)
+            fake_tx.append(p)
+        self.verify_capture_tunneled_ip4(self.pg0, rx, fake_tx,
+                                         [VppMplsLabel(44),
+                                          VppMplsLabel(46),
+                                          VppMplsLabel(33, ttl=255)])
+
     def test_tunnel_uniform(self):
         """ MPLS Tunnel Tests - Uniform """
 
@@ -966,9 +1136,9 @@ class TestMPLS(VppTestCase):
                                           VppMplsLabel(33, ttl=47)])
 
     def test_mpls_tunnel_many(self):
-        """ Multiple Tunnels """
+        """ MPLS Multiple Tunnels """
 
-        for ii in range(10):
+        for ii in range(100):
             mpls_tun = VppMPLSTunnelInterface(
                 self,
                 [VppRoutePath(self.pg0.remote_ip4,
@@ -977,6 +1147,16 @@ class TestMPLS(VppTestCase):
                                       VppMplsLabel(46, MplsLspMode.UNIFORM)])])
             mpls_tun.add_vpp_config()
             mpls_tun.admin_up()
+        for ii in range(100):
+            mpls_tun = VppMPLSTunnelInterface(
+                self,
+                [VppRoutePath(self.pg0.remote_ip4,
+                              self.pg0.sw_if_index,
+                              labels=[VppMplsLabel(44, ttl=32),
+                                      VppMplsLabel(46, MplsLspMode.UNIFORM)])],
+                is_l2=1)
+            mpls_tun.add_vpp_config()
+            mpls_tun.admin_up()
 
     def test_v4_exp_null(self):
         """ MPLS V4 Explicit NULL test """
@@ -1058,6 +1238,13 @@ class TestMPLS(VppTestCase):
                                                   0xffffffff,
                                                   nh_table_id=1)])
         route_35_eos.add_vpp_config()
+        route_356_eos = VppMplsRoute(
+            self, 356, 1,
+            [VppRoutePath("0::0",
+                          0xffffffff,
+                          nh_table_id=1)],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
+        route_356_eos.add_vpp_config()
 
         #
         # ping an interface in the non-default table
@@ -1068,6 +1255,10 @@ class TestMPLS(VppTestCase):
             self.pg0, [VppMplsLabel(35)], ping=1, ip_itf=self.pg1)
         rx = self.send_and_expect(self.pg0, tx, self.pg1)
         self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)
+        tx = self.create_stream_labelled_ip6(
+            self.pg0, [VppMplsLabel(356)], ping=1, ip_itf=self.pg1)
+        rx = self.send_and_expect(self.pg0, tx, self.pg1)
+        self.verify_capture_ip6(self.pg1, rx, tx, ping_resp=1)
 
         #
         # Double pop
@@ -1109,10 +1300,11 @@ class TestMPLS(VppTestCase):
         # if the packet egresses, then we must have swapped to pg1
         # so as to have matched the route in table 1
         #
-        route_34_eos = VppMplsRoute(self, 34, 1,
-                                    [VppRoutePath("0.0.0.0",
-                                                  self.pg1.sw_if_index,
-                                                  is_interface_rx=1)])
+        route_34_eos = VppMplsRoute(
+            self, 34, 1,
+            [VppRoutePath("0.0.0.0",
+                          self.pg1.sw_if_index,
+                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)])
         route_34_eos.add_vpp_config()
 
         #
@@ -1152,7 +1344,7 @@ class TestMPLS(VppTestCase):
                           labels=[VppMplsLabel(3402)]),
              VppRoutePath("0.0.0.0",
                           self.pg1.sw_if_index,
-                          is_interface_rx=1)],
+                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)],
             is_multicast=1)
         route_3400_eos.add_vpp_config()
 
@@ -1183,6 +1375,9 @@ class TestMPLS(VppTestCase):
     def test_mcast_head(self):
         """ MPLS Multicast Head-end """
 
+        MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+        MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
         #
         # Create a multicast tunnel with two replications
         #
@@ -1227,12 +1422,13 @@ class TestMPLS(VppTestCase):
             self,
             "0.0.0.0",
             "232.1.1.1", 32,
-            MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+            MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
             [VppMRoutePath(self.pg0.sw_if_index,
-                           MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
+                           MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT),
              VppMRoutePath(mpls_tun._sw_if_index,
-                           MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
+                           MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
         route_232_1_1_1.add_vpp_config()
+        self.logger.info(self.vapi.cli("sh ip mfib index 0"))
 
         self.vapi.cli("clear trace")
         tx = self.create_stream_ip4(self.pg0, "232.1.1.1")
@@ -1249,6 +1445,9 @@ class TestMPLS(VppTestCase):
     def test_mcast_ip4_tail(self):
         """ MPLS IPv4 Multicast Tail """
 
+        MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+        MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
         #
         # Add a multicast route that will forward the traffic
         # post-disposition
@@ -1257,10 +1456,10 @@ class TestMPLS(VppTestCase):
             self,
             "0.0.0.0",
             "232.1.1.1", 32,
-            MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+            MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
             table_id=1,
             paths=[VppMRoutePath(self.pg1.sw_if_index,
-                                 MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
+                                 MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
         route_232_1_1_1.add_vpp_config()
 
         #
@@ -1271,12 +1470,14 @@ class TestMPLS(VppTestCase):
         # if the packet egresses, then we must have matched the route in
         # table 1
         #
-        route_34_eos = VppMplsRoute(self, 34, 1,
-                                    [VppRoutePath("0.0.0.0",
-                                                  self.pg1.sw_if_index,
-                                                  nh_table_id=1,
-                                                  rpf_id=55)],
-                                    is_multicast=1)
+        route_34_eos = VppMplsRoute(
+            self, 34, 1,
+            [VppRoutePath("0.0.0.0",
+                          0xffffffff,
+                          nh_table_id=1,
+                          rpf_id=55)],
+            is_multicast=1,
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)
 
         route_34_eos.add_vpp_config()
 
@@ -1292,6 +1493,7 @@ class TestMPLS(VppTestCase):
         # set the RPF-ID of the entry to match the input packet's
         #
         route_232_1_1_1.update_rpf_id(55)
+        self.logger.info(self.vapi.cli("sh ip mfib index 1 232.1.1.1"))
 
         tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
                                              dst_ip="232.1.1.1")
@@ -1317,6 +1519,9 @@ class TestMPLS(VppTestCase):
     def test_mcast_ip6_tail(self):
         """ MPLS IPv6 Multicast Tail """
 
+        MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+        MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
         #
         # Add a multicast route that will forward the traffic
         # post-disposition
@@ -1325,11 +1530,11 @@ class TestMPLS(VppTestCase):
             self,
             "::",
             "ff01::1", 32,
-            MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+            MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
             table_id=1,
             paths=[VppMRoutePath(self.pg1.sw_if_index,
-                                 MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
-            is_ip6=1)
+                                 MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+                                 proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
         route_ff.add_vpp_config()
 
         #
@@ -1343,11 +1548,11 @@ class TestMPLS(VppTestCase):
         route_34_eos = VppMplsRoute(
             self, 34, 1,
             [VppRoutePath("::",
-                          self.pg1.sw_if_index,
+                          0xffffffff,
                           nh_table_id=1,
-                          rpf_id=55,
-                          proto=DpoProto.DPO_PROTO_IP6)],
-            is_multicast=1)
+                          rpf_id=55)],
+            is_multicast=1,
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
 
         route_34_eos.add_vpp_config()
 
@@ -1375,7 +1580,7 @@ class TestMPLS(VppTestCase):
                                              [VppMplsLabel(34)],
                                              dst_ip="ff01::1",
                                              hlim=1)
-        rx = self.send_and_expect(self.pg0, tx, self.pg0)
+        rx = self.send_and_expect_some(self.pg0, tx, self.pg0)
         self.verify_capture_ip6_icmp(self.pg0, rx, tx)
 
         #
@@ -1387,6 +1592,84 @@ class TestMPLS(VppTestCase):
                                              dst_ip="ff01::1")
         self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
 
+    def test_6pe(self):
+        """ MPLS 6PE """
+
+        #
+        # Add a non-recursive route with a single out label
+        #
+        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
+                                    [VppRoutePath(self.pg0.remote_ip4,
+                                                  self.pg0.sw_if_index,
+                                                  labels=[VppMplsLabel(45)])])
+        route_10_0_0_1.add_vpp_config()
+
+        # bind a local label to the route
+        binding = VppMplsIpBind(self, 44, "10.0.0.1", 32)
+        binding.add_vpp_config()
+
+        #
+        # a labelled v6 route that resolves through the v4
+        #
+        route_2001_3 = VppIpRoute(
+            self, "2001::3", 128,
+            [VppRoutePath("10.0.0.1",
+                          INVALID_INDEX,
+                          labels=[VppMplsLabel(32)])])
+        route_2001_3.add_vpp_config()
+
+        tx = self.create_stream_ip6(self.pg0, "2001::3")
+        rx = self.send_and_expect(self.pg0, tx, self.pg0)
+
+        self.verify_capture_labelled_ip6(self.pg0, rx, tx,
+                                         [VppMplsLabel(45),
+                                          VppMplsLabel(32)])
+
+        #
+        # and a v4 recursive via the v6
+        #
+        route_20_3 = VppIpRoute(
+            self, "20.0.0.3", 32,
+            [VppRoutePath("2001::3",
+                          INVALID_INDEX,
+                          labels=[VppMplsLabel(99)])])
+        route_20_3.add_vpp_config()
+
+        tx = self.create_stream_ip4(self.pg0, "20.0.0.3")
+        rx = self.send_and_expect(self.pg0, tx, self.pg0)
+
+        self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+                                         [VppMplsLabel(45),
+                                          VppMplsLabel(32),
+                                          VppMplsLabel(99)])
+
+    def test_attached(self):
+        """ Attach Routes with Local Label """
+
+        #
+        # test that if a local label is associated with an attached/connected
+        # prefix, that we can reach hosts in the prefix.
+        #
+        binding = VppMplsIpBind(self, 44,
+                                self.pg0._local_ip4_subnet,
+                                self.pg0.local_ip4_prefix_len)
+        binding.add_vpp_config()
+
+        tx = (Ether(src=self.pg1.remote_mac,
+                    dst=self.pg1.local_mac) /
+              MPLS(label=44, ttl=64) /
+              IP(src=self.pg0.remote_ip4, dst=self.pg0.remote_ip4) /
+              UDP(sport=1234, dport=1234) /
+              Raw(b'\xa5' * 100))
+        rxs = self.send_and_expect(self.pg0, [tx], self.pg0)
+        for rx in rxs:
+            # if there's an ARP then the label is linked to the glean
+            # which is wrong.
+            self.assertFalse(rx.haslayer(ARP))
+            # it should be unicasted to the host
+            self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+            self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+
 
 class TestMPLSDisabled(VppTestCase):
     """ MPLS disabled """
@@ -1428,12 +1711,16 @@ class TestMPLSDisabled(VppTestCase):
     def test_mpls_disabled(self):
         """ MPLS Disabled """
 
+        self.logger.info(self.vapi.cli("show mpls interface"))
+        self.logger.info(self.vapi.cli("show mpls interface pg1"))
+        self.logger.info(self.vapi.cli("show mpls interface pg0"))
+
         tx = (Ether(src=self.pg1.remote_mac,
                     dst=self.pg1.local_mac) /
               MPLS(label=32, ttl=64) /
               IPv6(src="2001::1", dst=self.pg0.remote_ip6) /
               UDP(sport=1234, dport=1234) /
-              Raw('\xa5' * 100))
+              Raw(b'\xa5' * 100))
 
         #
         # A simple MPLS xconnect - eos label in label out
@@ -1454,6 +1741,9 @@ class TestMPLSDisabled(VppTestCase):
         #
         self.pg1.enable_mpls()
 
+        self.logger.info(self.vapi.cli("show mpls interface"))
+        self.logger.info(self.vapi.cli("show mpls interface pg1"))
+
         #
         # Now we get packets through
         #
@@ -1476,7 +1766,7 @@ class TestMPLSDisabled(VppTestCase):
 
 
 class TestMPLSPIC(VppTestCase):
-    """ MPLS PIC edge convergence """
+    """ MPLS Prefix-Independent Convergence (PIC) edge convergence """
 
     @classmethod
     def setUpClass(cls):
@@ -1504,6 +1794,7 @@ class TestMPLSPIC(VppTestCase):
         self.pg0.config_ip4()
         self.pg0.resolve_arp()
         self.pg0.enable_mpls()
+
         self.pg1.admin_up()
         self.pg1.config_ip4()
         self.pg1.resolve_arp()
@@ -1517,6 +1808,7 @@ class TestMPLSPIC(VppTestCase):
         self.pg2.set_table_ip6(1)
         self.pg2.config_ip6()
         self.pg2.resolve_ndp()
+
         self.pg3.admin_up()
         self.pg3.set_table_ip4(1)
         self.pg3.config_ip4()
@@ -1537,7 +1829,7 @@ class TestMPLSPIC(VppTestCase):
         super(TestMPLSPIC, self).tearDown()
 
     def test_mpls_ibgp_pic(self):
-        """ MPLS iBGP PIC edge convergence
+        """ MPLS iBGP Prefix-Independent Convergence (PIC) edge convergence
 
         1) setup many iBGP VPN routes via a pair of iBGP peers.
         2) Check EMCP forwarding to these peers
@@ -1566,25 +1858,28 @@ class TestMPLSPIC(VppTestCase):
         #
         vpn_routes = []
         pkts = []
-        for ii in range(64):
+        for ii in range(NUM_PKTS):
             dst = "192.168.1.%d" % ii
-            vpn_routes.append(VppIpRoute(self, dst, 32,
-                                         [VppRoutePath("10.0.0.45",
-                                                       0xffffffff,
-                                                       labels=[145],
-                                                       is_resolve_host=1),
-                                          VppRoutePath("10.0.0.46",
-                                                       0xffffffff,
-                                                       labels=[146],
-                                                       is_resolve_host=1)],
-                                         table_id=1))
+            vpn_routes.append(VppIpRoute(
+                self, dst, 32,
+                [VppRoutePath(
+                    "10.0.0.45",
+                    0xffffffff,
+                    labels=[145],
+                    flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST),
+                 VppRoutePath(
+                     "10.0.0.46",
+                     0xffffffff,
+                     labels=[146],
+                     flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST)],
+                table_id=1))
             vpn_routes[ii].add_vpp_config()
 
             pkts.append(Ether(dst=self.pg2.local_mac,
                               src=self.pg2.remote_mac) /
                         IP(src=self.pg2.remote_ip4, dst=dst) /
                         UDP(sport=1234, dport=1234) /
-                        Raw('\xa5' * 100))
+                        Raw(b'\xa5' * 100))
 
         #
         # Send the packet stream (one pkt to each VPN route)
@@ -1594,13 +1889,16 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg0._get_capture(1)
-        rx1 = self.pg1._get_capture(1)
+        rx0 = self.pg0._get_capture(NUM_PKTS)
+        rx1 = self.pg1._get_capture(NUM_PKTS)
 
         # not testing the LB hashing algorithm so we're not concerned
         # with the split ratio, just as long as neither is 0
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
         #
         # use a test CLI command to stop the FIB walk process, this
@@ -1622,7 +1920,10 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg0.get_capture(len(pkts))
+        rx0 = self.pg0.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
         # enable the FIB walk process to converge the FIB
@@ -1636,7 +1937,10 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg0.get_capture(64)
+        rx0 = self.pg0.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
         # Add the IGP route back and we return to load-balancing
@@ -1647,15 +1951,18 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg0._get_capture(1)
-        rx1 = self.pg1._get_capture(1)
+        rx0 = self.pg0._get_capture(NUM_PKTS)
+        rx1 = self.pg1._get_capture(NUM_PKTS)
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
     def test_mpls_ebgp_pic(self):
-        """ MPLS eBGP PIC edge convergence
+        """ MPLS eBGP Prefix-Independent Convergence (PIC) edge convergence
 
-        1) setup many eBGP VPN routes via a pair of eBGP peers
+        1) setup many eBGP VPN routes via a pair of eBGP peers.
         2) Check EMCP forwarding to these peers
         3) withdraw one eBGP path - expect LB across remaining eBGP
         """
@@ -1667,19 +1974,22 @@ class TestMPLSPIC(VppTestCase):
         vpn_routes = []
         vpn_bindings = []
         pkts = []
-        for ii in range(64):
+        for ii in range(NUM_PKTS):
             dst = "192.168.1.%d" % ii
             local_label = 1600 + ii
-            vpn_routes.append(VppIpRoute(self, dst, 32,
-                                         [VppRoutePath(self.pg2.remote_ip4,
-                                                       0xffffffff,
-                                                       nh_table_id=1,
-                                                       is_resolve_attached=1),
-                                          VppRoutePath(self.pg3.remote_ip4,
-                                                       0xffffffff,
-                                                       nh_table_id=1,
-                                                       is_resolve_attached=1)],
-                                         table_id=1))
+            vpn_routes.append(VppIpRoute(
+                self, dst, 32,
+                [VppRoutePath(
+                    self.pg2.remote_ip4,
+                    0xffffffff,
+                    nh_table_id=1,
+                    flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
+                 VppRoutePath(
+                     self.pg3.remote_ip4,
+                     0xffffffff,
+                     nh_table_id=1,
+                     flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
+                table_id=1))
             vpn_routes[ii].add_vpp_config()
 
             vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
@@ -1691,16 +2001,26 @@ class TestMPLSPIC(VppTestCase):
                         MPLS(label=local_label, ttl=64) /
                         IP(src=self.pg0.remote_ip4, dst=dst) /
                         UDP(sport=1234, dport=1234) /
-                        Raw('\xa5' * 100))
+                        Raw(b'\xa5' * 100))
 
+        #
+        # Send the packet stream (one pkt to each VPN route)
+        #  - expect a 50-50 split of the traffic
+        #
         self.pg0.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg2._get_capture(1)
-        rx1 = self.pg3._get_capture(1)
+        rx0 = self.pg2._get_capture(NUM_PKTS)
+        rx1 = self.pg3._get_capture(NUM_PKTS)
+
+        # not testing the LB hashing algorithm so we're not concerned
+        # with the split ratio, just as long as neither is 0
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
         #
         # use a test CLI command to stop the FIB walk process, this
@@ -1721,34 +2041,48 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg3.get_capture(len(pkts))
+        rx0 = self.pg3.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
         # enable the FIB walk process to converge the FIB
         #
         self.vapi.ppcli("test fib-walk-process enable")
+
+        #
+        # packets should still be forwarded through the remaining peer
+        #
         self.pg0.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg3.get_capture(len(pkts))
+        rx0 = self.pg3.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
-        # put the connecteds back
+        # put the connected routes back
         #
         self.pg2.config_ip4()
+        self.pg2.resolve_arp()
 
         self.pg0.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg2._get_capture(1)
-        rx1 = self.pg3._get_capture(1)
+        rx0 = self.pg2._get_capture(NUM_PKTS)
+        rx1 = self.pg3._get_capture(NUM_PKTS)
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
     def test_mpls_v6_ebgp_pic(self):
-        """ MPLSv6 eBGP PIC edge convergence
+        """ MPLSv6 eBGP Prefix-Independent Convergence (PIC) edge convergence
 
         1) setup many eBGP VPNv6 routes via a pair of eBGP peers
         2) Check EMCP forwarding to these peers
@@ -1762,28 +2096,26 @@ class TestMPLSPIC(VppTestCase):
         vpn_routes = []
         vpn_bindings = []
         pkts = []
-        for ii in range(64):
+        for ii in range(NUM_PKTS):
             dst = "3000::%d" % ii
             local_label = 1600 + ii
             vpn_routes.append(VppIpRoute(
                 self, dst, 128,
-                [VppRoutePath(self.pg2.remote_ip6,
-                              0xffffffff,
-                              nh_table_id=1,
-                              is_resolve_attached=1,
-                              proto=DpoProto.DPO_PROTO_IP6),
-                 VppRoutePath(self.pg3.remote_ip6,
-                              0xffffffff,
-                              nh_table_id=1,
-                              proto=DpoProto.DPO_PROTO_IP6,
-                              is_resolve_attached=1)],
-                table_id=1,
-                is_ip6=1))
+                [VppRoutePath(
+                    self.pg2.remote_ip6,
+                    0xffffffff,
+                    nh_table_id=1,
+                    flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
+                 VppRoutePath(
+                     self.pg3.remote_ip6,
+                     0xffffffff,
+                     nh_table_id=1,
+                     flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
+                table_id=1))
             vpn_routes[ii].add_vpp_config()
 
             vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
-                                              ip_table_id=1,
-                                              is_ip6=1))
+                                              ip_table_id=1))
             vpn_bindings[ii].add_vpp_config()
 
             pkts.append(Ether(dst=self.pg0.local_mac,
@@ -1791,16 +2123,20 @@ class TestMPLSPIC(VppTestCase):
                         MPLS(label=local_label, ttl=64) /
                         IPv6(src=self.pg0.remote_ip6, dst=dst) /
                         UDP(sport=1234, dport=1234) /
-                        Raw('\xa5' * 100))
+                        Raw(b'\xa5' * 100))
+            self.logger.info(self.vapi.cli("sh ip6 fib %s" % dst))
 
         self.pg0.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg2._get_capture(1)
-        rx1 = self.pg3._get_capture(1)
+        rx0 = self.pg2._get_capture(NUM_PKTS)
+        rx1 = self.pg3._get_capture(NUM_PKTS)
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
         #
         # use a test CLI command to stop the FIB walk process, this
@@ -1823,7 +2159,10 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg3.get_capture(len(pkts))
+        rx0 = self.pg3.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
         # enable the FIB walk process to converge the FIB
@@ -1833,22 +2172,30 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg3.get_capture(len(pkts))
+        rx0 = self.pg3.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
-        # put the connecteds back
+        # put the connected routes back
         #
+        self.logger.info(self.vapi.cli("sh log"))
         self.pg2.admin_up()
         self.pg2.config_ip6()
+        self.pg2.resolve_ndp()
 
         self.pg0.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg2._get_capture(1)
-        rx1 = self.pg3._get_capture(1)
+        rx0 = self.pg2._get_capture(NUM_PKTS)
+        rx1 = self.pg3._get_capture(NUM_PKTS)
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
 
 class TestMPLSL2(VppTestCase):
@@ -1874,10 +2221,9 @@ class TestMPLSL2(VppTestCase):
         tbl.add_vpp_config()
         self.tables.append(tbl)
 
-        # use pg0 as the core facing interface
+        # use pg0 as the core facing interface, don't resolve ARP
         self.pg0.admin_up()
         self.pg0.config_ip4()
-        self.pg0.resolve_arp()
         self.pg0.enable_mpls()
 
         # use the other 2 for customer facing L2 links
@@ -1911,6 +2257,22 @@ class TestMPLSL2(VppTestCase):
             self.assertEqual(rx_eth.src, tx_eth.src)
             self.assertEqual(rx_eth.dst, tx_eth.dst)
 
+    def verify_arp_req(self, rx, smac, sip, dip):
+        ether = rx[Ether]
+        self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff")
+        self.assertEqual(ether.src, smac)
+
+        arp = rx[ARP]
+        self.assertEqual(arp.hwtype, 1)
+        self.assertEqual(arp.ptype, 0x800)
+        self.assertEqual(arp.hwlen, 6)
+        self.assertEqual(arp.plen, 4)
+        self.assertEqual(arp.op, ARP.who_has)
+        self.assertEqual(arp.hwsrc, smac)
+        self.assertEqual(arp.hwdst, "00:00:00:00:00:00")
+        self.assertEqual(arp.psrc, sip)
+        self.assertEqual(arp.pdst, dip)
+
     def test_vpws(self):
         """ Virtual Private Wire Service """
 
@@ -1935,8 +2297,9 @@ class TestMPLSL2(VppTestCase):
             self, 55, 1,
             [VppRoutePath("0.0.0.0",
                           mpls_tun_1.sw_if_index,
-                          is_interface_rx=1,
-                          proto=DpoProto.DPO_PROTO_ETHERNET)])
+                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+                          proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
         route_55_eos.add_vpp_config()
 
         #
@@ -1959,9 +2322,9 @@ class TestMPLSL2(VppTestCase):
                        src="00:00:de:ad:be:ef") /
                  IP(src="10.10.10.10", dst="11.11.11.11") /
                  UDP(sport=1234, dport=1234) /
-                 Raw('\xa5' * 100))
+                 Raw(b'\xa5' * 100))
 
-        tx0 = pcore * 65
+        tx0 = pcore * NUM_PKTS
         rx0 = self.send_and_expect(self.pg0, tx0, self.pg1)
         payload = pcore[MPLS].payload
 
@@ -1970,99 +2333,163 @@ class TestMPLSL2(VppTestCase):
 
         #
         # Inject a packet from the customer/L2 side
+        # there's no resolved ARP entry so the first packet we see should be
+        # an ARP request
         #
-        tx1 = pcore[MPLS].payload * 65
+        tx1 = pcore[MPLS].payload
+        rx1 = self.send_and_expect(self.pg1, [tx1], self.pg0)
+
+        self.verify_arp_req(rx1[0],
+                            self.pg0.local_mac,
+                            self.pg0.local_ip4,
+                            self.pg0.remote_ip4)
+
+        #
+        # resolve the ARP entries and send again
+        #
+        self.pg0.resolve_arp()
+        tx1 = pcore[MPLS].payload * NUM_PKTS
         rx1 = self.send_and_expect(self.pg1, tx1, self.pg0)
 
         self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)])
 
     def test_vpls(self):
         """ Virtual Private LAN Service """
+
+        # we skipped this in the setup
+        self.pg0.resolve_arp()
+
         #
-        # Create an L2 MPLS tunnel
+        # Create a L2 MPLS tunnels
         #
-        mpls_tun = VppMPLSTunnelInterface(
+        mpls_tun1 = VppMPLSTunnelInterface(
             self,
             [VppRoutePath(self.pg0.remote_ip4,
                           self.pg0.sw_if_index,
                           labels=[VppMplsLabel(42)])],
             is_l2=1)
-        mpls_tun.add_vpp_config()
-        mpls_tun.admin_up()
+        mpls_tun1.add_vpp_config()
+        mpls_tun1.admin_up()
+
+        mpls_tun2 = VppMPLSTunnelInterface(
+            self,
+            [VppRoutePath(self.pg0.remote_ip4,
+                          self.pg0.sw_if_index,
+                          labels=[VppMplsLabel(43)])],
+            is_l2=1)
+        mpls_tun2.add_vpp_config()
+        mpls_tun2.admin_up()
 
         #
-        # Create a label entry to for 55 that does L2 input to the tunnel
+        # Create a label entries, 55 and 56, that do L2 input to the tunnel
+        # the latter includes a Psuedo Wire Control Word
         #
         route_55_eos = VppMplsRoute(
             self, 55, 1,
             [VppRoutePath("0.0.0.0",
-                          mpls_tun.sw_if_index,
-                          is_interface_rx=1,
-                          proto=DpoProto.DPO_PROTO_ETHERNET)])
+                          mpls_tun1.sw_if_index,
+                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+                          proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
+
+        route_56_eos = VppMplsRoute(
+            self, 56, 1,
+            [VppRoutePath("0.0.0.0",
+                          mpls_tun2.sw_if_index,
+                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+                          flags=FibPathFlags.FIB_PATH_FLAG_POP_PW_CW,
+                          proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
+
+        # move me
+        route_56_eos.add_vpp_config()
         route_55_eos.add_vpp_config()
 
+        self.logger.info(self.vapi.cli("sh mpls fib 56"))
+
         #
         # add to tunnel to the customers bridge-domain
         #
         self.vapi.sw_interface_set_l2_bridge(
-            rx_sw_if_index=mpls_tun.sw_if_index, bd_id=1)
+            rx_sw_if_index=mpls_tun1.sw_if_index, bd_id=1)
+        self.vapi.sw_interface_set_l2_bridge(
+            rx_sw_if_index=mpls_tun2.sw_if_index, bd_id=1)
         self.vapi.sw_interface_set_l2_bridge(
             rx_sw_if_index=self.pg1.sw_if_index, bd_id=1)
 
         #
-        # Packet from the customer interface and from the core
-        #
-        p_cust = (Ether(dst="00:00:de:ad:ba:be",
-                        src="00:00:de:ad:be:ef") /
-                  IP(src="10.10.10.10", dst="11.11.11.11") /
-                  UDP(sport=1234, dport=1234) /
-                  Raw('\xa5' * 100))
-        p_core = (Ether(src="00:00:de:ad:ba:be",
-                        dst="00:00:de:ad:be:ef") /
-                  IP(dst="10.10.10.10", src="11.11.11.11") /
-                  UDP(sport=1234, dport=1234) /
-                  Raw('\xa5' * 100))
+        # Packet from host on the customer interface to each host
+        # reachable over the core, and vice-versa
+        #
+        p_cust1 = (Ether(dst="00:00:de:ad:ba:b1",
+                         src="00:00:de:ad:be:ef") /
+                   IP(src="10.10.10.10", dst="11.11.11.11") /
+                   UDP(sport=1234, dport=1234) /
+                   Raw(b'\xa5' * 100))
+        p_cust2 = (Ether(dst="00:00:de:ad:ba:b2",
+                         src="00:00:de:ad:be:ef") /
+                   IP(src="10.10.10.10", dst="11.11.11.12") /
+                   UDP(sport=1234, dport=1234) /
+                   Raw(b'\xa5' * 100))
+        p_core1 = (Ether(dst=self.pg0.local_mac,
+                         src=self.pg0.remote_mac) /
+                   MPLS(label=55, ttl=64) /
+                   Ether(src="00:00:de:ad:ba:b1",
+                         dst="00:00:de:ad:be:ef") /
+                   IP(dst="10.10.10.10", src="11.11.11.11") /
+                   UDP(sport=1234, dport=1234) /
+                   Raw(b'\xa5' * 100))
+        p_core2 = (Ether(dst=self.pg0.local_mac,
+                         src=self.pg0.remote_mac) /
+                   MPLS(label=56, ttl=64) /
+                   Raw(b'\x01' * 4) /  # PW CW
+                   Ether(src="00:00:de:ad:ba:b2",
+                         dst="00:00:de:ad:be:ef") /
+                   IP(dst="10.10.10.10", src="11.11.11.12") /
+                   UDP(sport=1234, dport=1234) /
+                   Raw(b'\xa5' * 100))
 
         #
         # The BD is learning, so send in one of each packet to learn
         #
-        p_core_encap = (Ether(dst=self.pg0.local_mac,
-                              src=self.pg0.remote_mac) /
-                        MPLS(label=55, ttl=64) /
-                        p_core)
 
-        self.pg1.add_stream(p_cust)
-        self.pg_enable_capture(self.pg_interfaces)
-        self.pg_start()
-        self.pg0.add_stream(p_core_encap)
-        self.pg_enable_capture(self.pg_interfaces)
-        self.pg_start()
+        # 2 packets due to BD flooding
+        rx = self.send_and_expect(self.pg1, p_cust1, self.pg0, n_rx=2)
+        rx = self.send_and_expect(self.pg1, p_cust2, self.pg0, n_rx=2)
 
-        # we've learnt this so expect it be be forwarded
-        rx0 = self.pg1.get_capture(1)
+        # we've learnt this so expect it be be forwarded not flooded
+        rx = self.send_and_expect(self.pg0, [p_core1], self.pg1)
+        self.assertEqual(rx[0][Ether].dst, p_cust1[Ether].src)
+        self.assertEqual(rx[0][Ether].src, p_cust1[Ether].dst)
 
-        self.assertEqual(rx0[0][Ether].dst, p_core[Ether].dst)
-        self.assertEqual(rx0[0][Ether].src, p_core[Ether].src)
+        rx = self.send_and_expect(self.pg0, [p_core2], self.pg1)
+        self.assertEqual(rx[0][Ether].dst, p_cust2[Ether].src)
+        self.assertEqual(rx[0][Ether].src, p_cust2[Ether].dst)
 
         #
-        # now a stream in each direction
+        # now a stream in each direction from each host
         #
-        self.pg1.add_stream(p_cust * 65)
-        self.pg_enable_capture(self.pg_interfaces)
-        self.pg_start()
+        rx = self.send_and_expect(self.pg1, p_cust1 * NUM_PKTS, self.pg0)
+        self.verify_capture_tunneled_ethernet(rx, p_cust1 * NUM_PKTS,
+                                              [VppMplsLabel(42)])
 
-        rx0 = self.pg0.get_capture(65)
+        rx = self.send_and_expect(self.pg1, p_cust2 * NUM_PKTS, self.pg0)
+        self.verify_capture_tunneled_ethernet(rx, p_cust2 * NUM_PKTS,
+                                              [VppMplsLabel(43)])
 
-        self.verify_capture_tunneled_ethernet(rx0, p_cust*65,
-                                              [VppMplsLabel(42)])
+        rx = self.send_and_expect(self.pg0, p_core1 * NUM_PKTS, self.pg1)
+        rx = self.send_and_expect(self.pg0, p_core2 * NUM_PKTS, self.pg1)
 
         #
         # remove interfaces from customers bridge-domain
         #
         self.vapi.sw_interface_set_l2_bridge(
-            rx_sw_if_index=mpls_tun.sw_if_index, bd_id=1, enable=0)
+            rx_sw_if_index=mpls_tun1.sw_if_index, bd_id=1, enable=0)
+        self.vapi.sw_interface_set_l2_bridge(
+            rx_sw_if_index=mpls_tun2.sw_if_index, bd_id=1, enable=0)
         self.vapi.sw_interface_set_l2_bridge(
             rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0)
 
+
 if __name__ == '__main__':
     unittest.main(testRunner=VppTestRunner)