fib: fib api updates
[vpp.git] / test / test_mpls.py
index 33fed68..d068bc3 100644 (file)
@@ -4,18 +4,23 @@ import unittest
 import socket
 
 from framework import VppTestCase, VppTestRunner
+from vpp_ip import DpoProto
 from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
     VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
-    MRouteItfFlags, MRouteEntryFlags, DpoProto, VppIpTable, VppMplsTable, \
-    VppMplsLabel, MplsLspMode
+    MRouteItfFlags, MRouteEntryFlags, VppIpTable, VppMplsTable, \
+    VppMplsLabel, MplsLspMode, find_mpls_route, \
+    FibPathProto, FibPathType, FibPathFlags, VppMplsLabel, MplsLspMode
 from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
 
+import scapy.compat
 from scapy.packet import Raw
 from scapy.layers.l2 import Ether
 from scapy.layers.inet import IP, UDP, ICMP
 from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded
 from scapy.contrib.mpls import MPLS
 
+NUM_PKTS = 67
+
 
 def verify_filter(capture, sent):
     if not len(capture) == len(sent):
@@ -50,6 +55,14 @@ def verify_mpls_stack(tst, rx, mpls_labels):
 class TestMPLS(VppTestCase):
     """ MPLS Test Case """
 
+    @classmethod
+    def setUpClass(cls):
+        super(TestMPLS, cls).setUpClass()
+
+    @classmethod
+    def tearDownClass(cls):
+        super(TestMPLS, cls).tearDownClass()
+
     def setUp(self):
         super(TestMPLS, self).setUp()
 
@@ -359,7 +372,7 @@ class TestMPLS(VppTestCase):
                 # ICMP sourced from the interface's address
                 self.assertEqual(rx_ip.src, src_if.local_ip6)
                 # hop-limit reset to 255 for IMCP packet
-                self.assertEqual(rx_ip.hlim, 254)
+                self.assertEqual(rx_ip.hlim, 255)
 
                 icmp = rx[ICMPv6TimeExceeded]
 
@@ -378,6 +391,12 @@ class TestMPLS(VppTestCase):
                                                   labels=[VppMplsLabel(33)])])
         route_32_eos.add_vpp_config()
 
+        self.assertTrue(
+            find_mpls_route(self, 0, 32, 1,
+                            [VppRoutePath(self.pg0.remote_ip4,
+                                          self.pg0.sw_if_index,
+                                          labels=[VppMplsLabel(33)])]))
+
         #
         # a stream that matches the route for 10.0.0.1
         # PG0 is in the default table
@@ -388,6 +407,8 @@ class TestMPLS(VppTestCase):
         self.verify_capture_labelled(self.pg0, rx, tx,
                                      [VppMplsLabel(33, ttl=31, exp=1)])
 
+        self.assertEqual(route_32_eos.get_stats_to()['packets'], 257)
+
         #
         # A simple MPLS xconnect - non-eos label in label out
         #
@@ -408,6 +429,7 @@ class TestMPLS(VppTestCase):
         self.verify_capture_labelled(self.pg0, rx, tx,
                                      [VppMplsLabel(33, ttl=20, exp=7),
                                       VppMplsLabel(99)])
+        self.assertEqual(route_32_neos.get_stats_to()['packets'], 257)
 
         #
         # A simple MPLS xconnect - non-eos label in label out, uniform mode
@@ -441,7 +463,7 @@ class TestMPLS(VppTestCase):
         self.verify_capture_ip4(self.pg0, rx, tx)
 
         #
-        # disposed packets have an invalid IPv4 checkusm
+        # disposed packets have an invalid IPv4 checksum
         #
         tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)],
                                              dst_ip=self.pg0.remote_ip4,
@@ -477,8 +499,8 @@ class TestMPLS(VppTestCase):
             self, 333, 1,
             [VppRoutePath(self.pg0.remote_ip6,
                           self.pg0.sw_if_index,
-                          labels=[],
-                          proto=DpoProto.DPO_PROTO_IP6)])
+                          labels=[])],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
         route_333_eos.add_vpp_config()
 
         tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(333)])
@@ -502,8 +524,8 @@ class TestMPLS(VppTestCase):
             self, 334, 1,
             [VppRoutePath(self.pg0.remote_ip6,
                           self.pg0.sw_if_index,
-                          labels=[VppMplsLabel(3)],
-                          proto=DpoProto.DPO_PROTO_IP6)])
+                          labels=[VppMplsLabel(3)])],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
         route_334_eos.add_vpp_config()
 
         tx = self.create_stream_labelled_ip6(self.pg0,
@@ -518,8 +540,8 @@ class TestMPLS(VppTestCase):
             self, 335, 1,
             [VppRoutePath(self.pg0.remote_ip6,
                           self.pg0.sw_if_index,
-                          labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)],
-                          proto=DpoProto.DPO_PROTO_IP6)])
+                          labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
         route_335_eos.add_vpp_config()
 
         tx = self.create_stream_labelled_ip6(
@@ -565,6 +587,7 @@ class TestMPLS(VppTestCase):
                                                   labels=[VppMplsLabel(44),
                                                           VppMplsLabel(45)])])
         route_34_eos.add_vpp_config()
+        self.logger.info(self.vapi.cli("sh mpls fib 34"))
 
         tx = self.create_stream_labelled_ip4(self.pg0,
                                              [VppMplsLabel(34, ttl=3)])
@@ -574,6 +597,9 @@ class TestMPLS(VppTestCase):
                                       VppMplsLabel(44),
                                       VppMplsLabel(45, ttl=2)])
 
+        self.assertEqual(route_34_eos.get_stats_to()['packets'], 257)
+        self.assertEqual(route_32_neos.get_stats_via()['packets'], 257)
+
         #
         # A recursive EOS x-connect, which resolves through another x-connect
         # in uniform mode
@@ -634,6 +660,7 @@ class TestMPLS(VppTestCase):
                                           VppMplsLabel(44),
                                           VppMplsLabel(46),
                                           VppMplsLabel(55)])
+        self.assertEqual(ip_10_0_0_1.get_stats_to()['packets'], 257)
 
         ip_10_0_0_1.remove_vpp_config()
         route_34_neos.remove_vpp_config()
@@ -750,10 +777,8 @@ class TestMPLS(VppTestCase):
             self, "2001::3", 128,
             [VppRoutePath(self.pg0.remote_ip6,
                           self.pg0.sw_if_index,
-                          proto=DpoProto.DPO_PROTO_IP6,
                           labels=[VppMplsLabel(32,
-                                               mode=MplsLspMode.UNIFORM)])],
-            is_ip6=1)
+                                               mode=MplsLspMode.UNIFORM)])])
         route_2001_3.add_vpp_config()
 
         tx = self.create_stream_ip6(self.pg0, "2001::3",
@@ -781,6 +806,8 @@ class TestMPLS(VppTestCase):
                                          [VppMplsLabel(32),
                                           VppMplsLabel(44)])
 
+        self.assertEqual(route_11_0_0_1.get_stats_to()['packets'], 257)
+
         #
         # add a recursive path, with 2 labels, via the 3 label route
         #
@@ -804,6 +831,18 @@ class TestMPLS(VppTestCase):
                                           VppMplsLabel(44),
                                           VppMplsLabel(45)])
 
+        self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 257)
+
+        rx = self.send_and_expect(self.pg0, tx, self.pg0)
+        self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+                                         [VppMplsLabel(32),
+                                          VppMplsLabel(33),
+                                          VppMplsLabel(34),
+                                          VppMplsLabel(44),
+                                          VppMplsLabel(45)])
+
+        self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 514)
+
         #
         # cleanup
         #
@@ -928,6 +967,19 @@ class TestMPLS(VppTestCase):
                                           VppMplsLabel(46, ttl=47),
                                           VppMplsLabel(33, ttl=47)])
 
+    def test_mpls_tunnel_many(self):
+        """ MPLS Multiple Tunnels """
+
+        for ii in range(10):
+            mpls_tun = VppMPLSTunnelInterface(
+                self,
+                [VppRoutePath(self.pg0.remote_ip4,
+                              self.pg0.sw_if_index,
+                              labels=[VppMplsLabel(44, ttl=32),
+                                      VppMplsLabel(46, MplsLspMode.UNIFORM)])])
+            mpls_tun.add_vpp_config()
+            mpls_tun.admin_up()
+
     def test_v4_exp_null(self):
         """ MPLS V4 Explicit NULL test """
 
@@ -1059,10 +1111,11 @@ class TestMPLS(VppTestCase):
         # if the packet egresses, then we must have swapped to pg1
         # so as to have matched the route in table 1
         #
-        route_34_eos = VppMplsRoute(self, 34, 1,
-                                    [VppRoutePath("0.0.0.0",
-                                                  self.pg1.sw_if_index,
-                                                  is_interface_rx=1)])
+        route_34_eos = VppMplsRoute(
+            self, 34, 1,
+            [VppRoutePath("0.0.0.0",
+                          self.pg1.sw_if_index,
+                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)])
         route_34_eos.add_vpp_config()
 
         #
@@ -1102,7 +1155,7 @@ class TestMPLS(VppTestCase):
                           labels=[VppMplsLabel(3402)]),
              VppRoutePath("0.0.0.0",
                           self.pg1.sw_if_index,
-                          is_interface_rx=1)],
+                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)],
             is_multicast=1)
         route_3400_eos.add_vpp_config()
 
@@ -1183,6 +1236,7 @@ class TestMPLS(VppTestCase):
              VppMRoutePath(mpls_tun._sw_if_index,
                            MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
         route_232_1_1_1.add_vpp_config()
+        self.logger.info(self.vapi.cli("sh ip mfib index 0"))
 
         self.vapi.cli("clear trace")
         tx = self.create_stream_ip4(self.pg0, "232.1.1.1")
@@ -1221,12 +1275,14 @@ class TestMPLS(VppTestCase):
         # if the packet egresses, then we must have matched the route in
         # table 1
         #
-        route_34_eos = VppMplsRoute(self, 34, 1,
-                                    [VppRoutePath("0.0.0.0",
-                                                  self.pg1.sw_if_index,
-                                                  nh_table_id=1,
-                                                  rpf_id=55)],
-                                    is_multicast=1)
+        route_34_eos = VppMplsRoute(
+            self, 34, 1,
+            [VppRoutePath("0.0.0.0",
+                          0xffffffff,
+                          nh_table_id=1,
+                          rpf_id=55)],
+            is_multicast=1,
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)
 
         route_34_eos.add_vpp_config()
 
@@ -1239,9 +1295,10 @@ class TestMPLS(VppTestCase):
         self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none")
 
         #
-        # set the RPF-ID of the enrtry to match the input packet's
+        # set the RPF-ID of the entry to match the input packet's
         #
         route_232_1_1_1.update_rpf_id(55)
+        self.logger.info(self.vapi.cli("sh ip mfib index 1 232.1.1.1"))
 
         tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
                                              dst_ip="232.1.1.1")
@@ -1249,7 +1306,7 @@ class TestMPLS(VppTestCase):
         self.verify_capture_ip4(self.pg1, rx, tx)
 
         #
-        # disposed packets have an invalid IPv4 checkusm
+        # disposed packets have an invalid IPv4 checksum
         #
         tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
                                              dst_ip="232.1.1.1", n=65,
@@ -1278,8 +1335,8 @@ class TestMPLS(VppTestCase):
             MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
             table_id=1,
             paths=[VppMRoutePath(self.pg1.sw_if_index,
-                                 MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
-            is_ip6=1)
+                                 MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+                                 proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
         route_ff.add_vpp_config()
 
         #
@@ -1293,11 +1350,11 @@ class TestMPLS(VppTestCase):
         route_34_eos = VppMplsRoute(
             self, 34, 1,
             [VppRoutePath("::",
-                          self.pg1.sw_if_index,
+                          0xffffffff,
                           nh_table_id=1,
-                          rpf_id=55,
-                          proto=DpoProto.DPO_PROTO_IP6)],
-            is_multicast=1)
+                          rpf_id=55)],
+            is_multicast=1,
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
 
         route_34_eos.add_vpp_config()
 
@@ -1309,7 +1366,7 @@ class TestMPLS(VppTestCase):
         self.send_and_assert_no_replies(self.pg0, tx, "RPF Miss")
 
         #
-        # set the RPF-ID of the enrtry to match the input packet's
+        # set the RPF-ID of the entry to match the input packet's
         #
         route_ff.update_rpf_id(55)
 
@@ -1329,7 +1386,7 @@ class TestMPLS(VppTestCase):
         self.verify_capture_ip6_icmp(self.pg0, rx, tx)
 
         #
-        # set the RPF-ID of the enrtry to not match the input packet's
+        # set the RPF-ID of the entry to not match the input packet's
         #
         route_ff.update_rpf_id(56)
         tx = self.create_stream_labelled_ip6(self.pg0,
@@ -1341,6 +1398,14 @@ class TestMPLS(VppTestCase):
 class TestMPLSDisabled(VppTestCase):
     """ MPLS disabled """
 
+    @classmethod
+    def setUpClass(cls):
+        super(TestMPLSDisabled, cls).setUpClass()
+
+    @classmethod
+    def tearDownClass(cls):
+        super(TestMPLSDisabled, cls).tearDownClass()
+
     def setUp(self):
         super(TestMPLSDisabled, self).setUp()
 
@@ -1350,7 +1415,7 @@ class TestMPLSDisabled(VppTestCase):
         self.tbl = VppMplsTable(self, 0)
         self.tbl.add_vpp_config()
 
-        # PG0 is MPLS enalbed
+        # PG0 is MPLS enabled
         self.pg0.admin_up()
         self.pg0.config_ip4()
         self.pg0.resolve_arp()
@@ -1418,7 +1483,15 @@ class TestMPLSDisabled(VppTestCase):
 
 
 class TestMPLSPIC(VppTestCase):
-    """ MPLS PIC edge convergence """
+    """ MPLS Prefix-Independent Convergence (PIC) edge convergence """
+
+    @classmethod
+    def setUpClass(cls):
+        super(TestMPLSPIC, cls).setUpClass()
+
+    @classmethod
+    def tearDownClass(cls):
+        super(TestMPLSPIC, cls).tearDownClass()
 
     def setUp(self):
         super(TestMPLSPIC, self).setUp()
@@ -1438,6 +1511,7 @@ class TestMPLSPIC(VppTestCase):
         self.pg0.config_ip4()
         self.pg0.resolve_arp()
         self.pg0.enable_mpls()
+
         self.pg1.admin_up()
         self.pg1.config_ip4()
         self.pg1.resolve_arp()
@@ -1451,6 +1525,7 @@ class TestMPLSPIC(VppTestCase):
         self.pg2.set_table_ip6(1)
         self.pg2.config_ip6()
         self.pg2.resolve_ndp()
+
         self.pg3.admin_up()
         self.pg3.set_table_ip4(1)
         self.pg3.config_ip4()
@@ -1471,7 +1546,7 @@ class TestMPLSPIC(VppTestCase):
         super(TestMPLSPIC, self).tearDown()
 
     def test_mpls_ibgp_pic(self):
-        """ MPLS iBGP PIC edge convergence
+        """ MPLS iBGP Prefix-Independent Convergence (PIC) edge convergence
 
         1) setup many iBGP VPN routes via a pair of iBGP peers.
         2) Check EMCP forwarding to these peers
@@ -1500,18 +1575,21 @@ class TestMPLSPIC(VppTestCase):
         #
         vpn_routes = []
         pkts = []
-        for ii in range(64):
+        for ii in range(NUM_PKTS):
             dst = "192.168.1.%d" % ii
-            vpn_routes.append(VppIpRoute(self, dst, 32,
-                                         [VppRoutePath("10.0.0.45",
-                                                       0xffffffff,
-                                                       labels=[145],
-                                                       is_resolve_host=1),
-                                          VppRoutePath("10.0.0.46",
-                                                       0xffffffff,
-                                                       labels=[146],
-                                                       is_resolve_host=1)],
-                                         table_id=1))
+            vpn_routes.append(VppIpRoute(
+                self, dst, 32,
+                [VppRoutePath(
+                    "10.0.0.45",
+                    0xffffffff,
+                    labels=[145],
+                    flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST),
+                 VppRoutePath(
+                     "10.0.0.46",
+                     0xffffffff,
+                     labels=[146],
+                     flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST)],
+                table_id=1))
             vpn_routes[ii].add_vpp_config()
 
             pkts.append(Ether(dst=self.pg2.local_mac,
@@ -1528,18 +1606,21 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg0._get_capture(1)
-        rx1 = self.pg1._get_capture(1)
+        rx0 = self.pg0._get_capture(NUM_PKTS)
+        rx1 = self.pg1._get_capture(NUM_PKTS)
 
-        # not testig the LB hashing algorithm so we're not concerned
+        # not testing the LB hashing algorithm so we're not concerned
         # with the split ratio, just as long as neither is 0
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
         #
         # use a test CLI command to stop the FIB walk process, this
         # will prevent the FIB converging the VPN routes and thus allow
-        # us to probe the interim (psot-fail, pre-converge) state
+        # us to probe the interim (post-fail, pre-converge) state
         #
         self.vapi.ppcli("test fib-walk-process disable")
 
@@ -1556,7 +1637,10 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg0.get_capture(len(pkts))
+        rx0 = self.pg0.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
         # enable the FIB walk process to converge the FIB
@@ -1570,7 +1654,10 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg0.get_capture(64)
+        rx0 = self.pg0.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
         # Add the IGP route back and we return to load-balancing
@@ -1581,15 +1668,18 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg0._get_capture(1)
-        rx1 = self.pg1._get_capture(1)
+        rx0 = self.pg0._get_capture(NUM_PKTS)
+        rx1 = self.pg1._get_capture(NUM_PKTS)
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
     def test_mpls_ebgp_pic(self):
-        """ MPLS eBGP PIC edge convergence
+        """ MPLS eBGP Prefix-Independent Convergence (PIC) edge convergence
 
-        1) setup many eBGP VPN routes via a pair of eBGP peers
+        1) setup many eBGP VPN routes via a pair of eBGP peers.
         2) Check EMCP forwarding to these peers
         3) withdraw one eBGP path - expect LB across remaining eBGP
         """
@@ -1601,19 +1691,22 @@ class TestMPLSPIC(VppTestCase):
         vpn_routes = []
         vpn_bindings = []
         pkts = []
-        for ii in range(64):
+        for ii in range(NUM_PKTS):
             dst = "192.168.1.%d" % ii
             local_label = 1600 + ii
-            vpn_routes.append(VppIpRoute(self, dst, 32,
-                                         [VppRoutePath(self.pg2.remote_ip4,
-                                                       0xffffffff,
-                                                       nh_table_id=1,
-                                                       is_resolve_attached=1),
-                                          VppRoutePath(self.pg3.remote_ip4,
-                                                       0xffffffff,
-                                                       nh_table_id=1,
-                                                       is_resolve_attached=1)],
-                                         table_id=1))
+            vpn_routes.append(VppIpRoute(
+                self, dst, 32,
+                [VppRoutePath(
+                    self.pg2.remote_ip4,
+                    0xffffffff,
+                    nh_table_id=1,
+                    flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
+                 VppRoutePath(
+                     self.pg3.remote_ip4,
+                     0xffffffff,
+                     nh_table_id=1,
+                     flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
+                table_id=1))
             vpn_routes[ii].add_vpp_config()
 
             vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
@@ -1627,19 +1720,29 @@ class TestMPLSPIC(VppTestCase):
                         UDP(sport=1234, dport=1234) /
                         Raw('\xa5' * 100))
 
+        #
+        # Send the packet stream (one pkt to each VPN route)
+        #  - expect a 50-50 split of the traffic
+        #
         self.pg0.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg2._get_capture(1)
-        rx1 = self.pg3._get_capture(1)
+        rx0 = self.pg2._get_capture(NUM_PKTS)
+        rx1 = self.pg3._get_capture(NUM_PKTS)
+
+        # not testing the LB hashing algorithm so we're not concerned
+        # with the split ratio, just as long as neither is 0
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
         #
         # use a test CLI command to stop the FIB walk process, this
         # will prevent the FIB converging the VPN routes and thus allow
-        # us to probe the interim (psot-fail, pre-converge) state
+        # us to probe the interim (post-fail, pre-converge) state
         #
         self.vapi.ppcli("test fib-walk-process disable")
 
@@ -1655,34 +1758,48 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg3.get_capture(len(pkts))
+        rx0 = self.pg3.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
         # enable the FIB walk process to converge the FIB
         #
         self.vapi.ppcli("test fib-walk-process enable")
+
+        #
+        # packets should still be forwarded through the remaining peer
+        #
         self.pg0.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg3.get_capture(len(pkts))
+        rx0 = self.pg3.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
-        # put the connecteds back
+        # put the connected routes back
         #
         self.pg2.config_ip4()
+        self.pg2.resolve_arp()
 
         self.pg0.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg2._get_capture(1)
-        rx1 = self.pg3._get_capture(1)
+        rx0 = self.pg2._get_capture(NUM_PKTS)
+        rx1 = self.pg3._get_capture(NUM_PKTS)
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
     def test_mpls_v6_ebgp_pic(self):
-        """ MPLSv6 eBGP PIC edge convergence
+        """ MPLSv6 eBGP Prefix-Independent Convergence (PIC) edge convergence
 
         1) setup many eBGP VPNv6 routes via a pair of eBGP peers
         2) Check EMCP forwarding to these peers
@@ -1696,28 +1813,26 @@ class TestMPLSPIC(VppTestCase):
         vpn_routes = []
         vpn_bindings = []
         pkts = []
-        for ii in range(64):
+        for ii in range(NUM_PKTS):
             dst = "3000::%d" % ii
             local_label = 1600 + ii
             vpn_routes.append(VppIpRoute(
                 self, dst, 128,
-                [VppRoutePath(self.pg2.remote_ip6,
-                              0xffffffff,
-                              nh_table_id=1,
-                              is_resolve_attached=1,
-                              proto=DpoProto.DPO_PROTO_IP6),
-                 VppRoutePath(self.pg3.remote_ip6,
-                              0xffffffff,
-                              nh_table_id=1,
-                              proto=DpoProto.DPO_PROTO_IP6,
-                              is_resolve_attached=1)],
-                table_id=1,
-                is_ip6=1))
+                [VppRoutePath(
+                    self.pg2.remote_ip6,
+                    0xffffffff,
+                    nh_table_id=1,
+                    flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
+                 VppRoutePath(
+                     self.pg3.remote_ip6,
+                     0xffffffff,
+                     nh_table_id=1,
+                     flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
+                table_id=1))
             vpn_routes[ii].add_vpp_config()
 
             vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
-                                              ip_table_id=1,
-                                              is_ip6=1))
+                                              ip_table_id=1))
             vpn_bindings[ii].add_vpp_config()
 
             pkts.append(Ether(dst=self.pg0.local_mac,
@@ -1726,20 +1841,24 @@ class TestMPLSPIC(VppTestCase):
                         IPv6(src=self.pg0.remote_ip6, dst=dst) /
                         UDP(sport=1234, dport=1234) /
                         Raw('\xa5' * 100))
+            self.logger.info(self.vapi.cli("sh ip6 fib %s" % dst))
 
         self.pg0.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg2._get_capture(1)
-        rx1 = self.pg3._get_capture(1)
+        rx0 = self.pg2._get_capture(NUM_PKTS)
+        rx1 = self.pg3._get_capture(NUM_PKTS)
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
         #
         # use a test CLI command to stop the FIB walk process, this
         # will prevent the FIB converging the VPN routes and thus allow
-        # us to probe the interim (psot-fail, pre-converge) state
+        # us to probe the interim (post-fail, pre-converge) state
         #
         self.vapi.ppcli("test fib-walk-process disable")
 
@@ -1757,7 +1876,10 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg3.get_capture(len(pkts))
+        rx0 = self.pg3.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
         # enable the FIB walk process to converge the FIB
@@ -1767,27 +1889,42 @@ class TestMPLSPIC(VppTestCase):
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg3.get_capture(len(pkts))
+        rx0 = self.pg3.get_capture(NUM_PKTS)
+        self.assertEqual(len(pkts), len(rx0),
+                         "Expected all (%s) packets across single path. "
+                         "rx0: %s." % (len(pkts), len(rx0)))
 
         #
-        # put the connecteds back
+        # put the connected routes back
         #
         self.pg2.admin_up()
         self.pg2.config_ip6()
+        self.pg2.resolve_ndp()
 
         self.pg0.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg2._get_capture(1)
-        rx1 = self.pg3._get_capture(1)
+        rx0 = self.pg2._get_capture(NUM_PKTS)
+        rx1 = self.pg3._get_capture(NUM_PKTS)
         self.assertNotEqual(0, len(rx0))
         self.assertNotEqual(0, len(rx1))
+        self.assertEqual(len(pkts), len(rx0) + len(rx1),
+                         "Expected all (%s) packets across both ECMP paths. "
+                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
 
 
 class TestMPLSL2(VppTestCase):
     """ MPLS-L2 """
 
+    @classmethod
+    def setUpClass(cls):
+        super(TestMPLSL2, cls).setUpClass()
+
+    @classmethod
+    def tearDownClass(cls):
+        super(TestMPLSL2, cls).tearDownClass()
+
     def setUp(self):
         super(TestMPLSL2, self).setUp()
 
@@ -1832,7 +1969,7 @@ class TestMPLSL2(VppTestCase):
             verify_mpls_stack(self, rx, mpls_labels)
 
             tx_eth = tx[Ether]
-            rx_eth = Ether(str(rx[MPLS].payload))
+            rx_eth = Ether(scapy.compat.raw(rx[MPLS].payload))
 
             self.assertEqual(rx_eth.src, tx_eth.src)
             self.assertEqual(rx_eth.dst, tx_eth.dst)
@@ -1861,8 +1998,9 @@ class TestMPLSL2(VppTestCase):
             self, 55, 1,
             [VppRoutePath("0.0.0.0",
                           mpls_tun_1.sw_if_index,
-                          is_interface_rx=1,
-                          proto=DpoProto.DPO_PROTO_ETHERNET)])
+                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+                          proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
         route_55_eos.add_vpp_config()
 
         #
@@ -1887,7 +2025,7 @@ class TestMPLSL2(VppTestCase):
                  UDP(sport=1234, dport=1234) /
                  Raw('\xa5' * 100))
 
-        tx0 = pcore * 65
+        tx0 = pcore * NUM_PKTS
         rx0 = self.send_and_expect(self.pg0, tx0, self.pg1)
         payload = pcore[MPLS].payload
 
@@ -1895,9 +2033,9 @@ class TestMPLSL2(VppTestCase):
         self.assertEqual(rx0[0][Ether].src, payload[Ether].src)
 
         #
-        # Inject a packet from the custoer/L2 side
+        # Inject a packet from the customer/L2 side
         #
-        tx1 = pcore[MPLS].payload * 65
+        tx1 = pcore[MPLS].payload * NUM_PKTS
         rx1 = self.send_and_expect(self.pg1, tx1, self.pg0)
 
         self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)])
@@ -1923,17 +2061,18 @@ class TestMPLSL2(VppTestCase):
             self, 55, 1,
             [VppRoutePath("0.0.0.0",
                           mpls_tun.sw_if_index,
-                          is_interface_rx=1,
-                          proto=DpoProto.DPO_PROTO_ETHERNET)])
+                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+                          proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
         route_55_eos.add_vpp_config()
 
         #
         # add to tunnel to the customers bridge-domain
         #
-        self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index,
-                                             bd_id=1)
-        self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index,
-                                             bd_id=1)
+        self.vapi.sw_interface_set_l2_bridge(
+            rx_sw_if_index=mpls_tun.sw_if_index, bd_id=1)
+        self.vapi.sw_interface_set_l2_bridge(
+            rx_sw_if_index=self.pg1.sw_if_index, bd_id=1)
 
         #
         # Packet from the customer interface and from the core
@@ -1973,24 +2112,23 @@ class TestMPLSL2(VppTestCase):
         #
         # now a stream in each direction
         #
-        self.pg1.add_stream(p_cust * 65)
+        self.pg1.add_stream(p_cust * NUM_PKTS)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
 
-        rx0 = self.pg0.get_capture(65)
+        rx0 = self.pg0.get_capture(NUM_PKTS)
 
-        self.verify_capture_tunneled_ethernet(rx0, p_cust*65,
+        self.verify_capture_tunneled_ethernet(rx0, p_cust*NUM_PKTS,
                                               [VppMplsLabel(42)])
 
         #
         # remove interfaces from customers bridge-domain
         #
-        self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index,
-                                             bd_id=1,
-                                             enable=0)
-        self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index,
-                                             bd_id=1,
-                                             enable=0)
+        self.vapi.sw_interface_set_l2_bridge(
+            rx_sw_if_index=mpls_tun.sw_if_index, bd_id=1, enable=0)
+        self.vapi.sw_interface_set_l2_bridge(
+            rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0)
+
 
 if __name__ == '__main__':
     unittest.main(testRunner=VppTestRunner)