MPLS disposition actions at the tail of unicast LSPs 77/9377/3
authorNeale Ranns <nranns@cisco.com>
Tue, 31 Oct 2017 19:28:22 +0000 (12:28 -0700)
committerDamjan Marion <dmarion.lists@gmail.com>
Sat, 11 Nov 2017 14:23:45 +0000 (14:23 +0000)
Change-Id: I8c42e26152f2ed1246f91b789887bfc923418bdf
Signed-off-by: Neale Ranns <nranns@cisco.com>
src/vnet/fib/fib_entry_src.c
src/vnet/fib/fib_entry_src_mpls.c
src/vnet/fib/fib_path.c
src/vnet/fib/fib_path_ext.c
src/vnet/fib/fib_test.c
src/vnet/fib/fib_test.h
test/test_dvr.py
test/test_ip4.py
test/test_ip6.py
test/test_mpls.py

index 667aa48..214dafe 100644 (file)
@@ -259,6 +259,23 @@ fib_entry_chain_type_fixup (const fib_entry_t *entry,
     return (dfct);
 }
 
     return (dfct);
 }
 
+static dpo_proto_t
+fib_prefix_get_payload_proto (const fib_prefix_t *pfx)
+{
+    switch (pfx->fp_proto)
+    {
+    case FIB_PROTOCOL_IP4:
+        return (DPO_PROTO_IP4);
+    case FIB_PROTOCOL_IP6:
+        return (DPO_PROTO_IP6);
+    case FIB_PROTOCOL_MPLS:
+        return (pfx->fp_payload_proto);
+    }
+
+    ASSERT(0);
+    return (DPO_PROTO_IP4);
+}
+
 static void
 fib_entry_src_get_path_forwarding (fib_node_index_t path_index,
                                    fib_entry_src_collect_forwarding_ctx_t *ctx)
 static void
 fib_entry_src_get_path_forwarding (fib_node_index_t path_index,
                                    fib_entry_src_collect_forwarding_ctx_t *ctx)
@@ -313,7 +330,7 @@ fib_entry_src_get_path_forwarding (fib_node_index_t path_index,
                                                                       ctx->fct),
                                            &nh->path_dpo);
             fib_path_stack_mpls_disp(path_index,
                                                                       ctx->fct),
                                            &nh->path_dpo);
             fib_path_stack_mpls_disp(path_index,
-                                     ctx->fib_entry->fe_prefix.fp_payload_proto,
+                                     fib_prefix_get_payload_proto(&ctx->fib_entry->fe_prefix),
                                      &nh->path_dpo);
 
             break;
                                      &nh->path_dpo);
 
             break;
index 6fdd5c0..f80d42a 100644 (file)
@@ -170,7 +170,7 @@ static u8*
 fib_entry_src_mpls_format (fib_entry_src_t *src,
                           u8* s)
 {
 fib_entry_src_mpls_format (fib_entry_src_t *src,
                           u8* s)
 {
-    return (format(s, "MPLS local-label:%d", src->mpls.fesm_label));
+    return (format(s, " local-label:%d", src->mpls.fesm_label));
 }
 
 const static fib_entry_src_vft_t mpls_src_vft = {
 }
 
 const static fib_entry_src_vft_t mpls_src_vft = {
index 3e03192..8dabfdf 100644 (file)
@@ -2259,6 +2259,18 @@ fib_path_stack_mpls_disp (fib_node_index_t path_index,
 
     switch (path->fp_type)
     {
 
     switch (path->fp_type)
     {
+    case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+    {
+        dpo_id_t tmp = DPO_INVALID;
+
+        dpo_copy(&tmp, dpo);
+        dpo_set(dpo,
+                DPO_MPLS_DISPOSITION,
+                payload_proto,
+                mpls_disp_dpo_create(payload_proto, ~0, &tmp));
+        dpo_reset(&tmp);
+        break;
+    }                
     case FIB_PATH_TYPE_DEAG:
     {
         dpo_id_t tmp = DPO_INVALID;
     case FIB_PATH_TYPE_DEAG:
     {
         dpo_id_t tmp = DPO_INVALID;
@@ -2275,7 +2287,6 @@ fib_path_stack_mpls_disp (fib_node_index_t path_index,
     }
     case FIB_PATH_TYPE_RECEIVE:
     case FIB_PATH_TYPE_ATTACHED:
     }
     case FIB_PATH_TYPE_RECEIVE:
     case FIB_PATH_TYPE_ATTACHED:
-    case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
     case FIB_PATH_TYPE_RECURSIVE:
     case FIB_PATH_TYPE_INTF_RX:
     case FIB_PATH_TYPE_UDP_ENCAP:
     case FIB_PATH_TYPE_RECURSIVE:
     case FIB_PATH_TYPE_INTF_RX:
     case FIB_PATH_TYPE_UDP_ENCAP:
index 4438671..a285ba0 100644 (file)
@@ -255,6 +255,15 @@ fib_path_ext_stack (fib_path_ext_t *path_ext,
                     chain_proto,
                     mldi);
        }
                     chain_proto,
                     mldi);
        }
+        else if (child_fct == FIB_FORW_CHAIN_TYPE_MPLS_EOS)
+        {
+            /*
+             * MPLS EOS packets using an imp-null. Insert the disposition.
+             */
+            fib_path_stack_mpls_disp(nh->path_index,
+                                     fib_forw_chain_type_to_dpo_proto(parent_fct),
+                                     &nh->path_dpo);
+        }
     }
     dpo_reset(&via_dpo);
 
     }
     dpo_reset(&via_dpo);
 
index 2658eb2..b74ec33 100644 (file)
@@ -29,6 +29,7 @@
 #include <vnet/dpo/interface_rx_dpo.h>
 #include <vnet/dpo/replicate_dpo.h>
 #include <vnet/dpo/l2_bridge_dpo.h>
 #include <vnet/dpo/interface_rx_dpo.h>
 #include <vnet/dpo/replicate_dpo.h>
 #include <vnet/dpo/l2_bridge_dpo.h>
+#include <vnet/dpo/mpls_disposition.h>
 
 #include <vnet/mpls/mpls.h>
 
 
 #include <vnet/mpls/mpls.h>
 
@@ -514,6 +515,30 @@ fib_test_validate_lb_v (const load_balance_t *lb,
                        bucket,
                        exp->adj.adj);
            break;
                        bucket,
                        exp->adj.adj);
            break;
+       case FT_LB_MPLS_DISP_O_ADJ:
+        {
+            const mpls_disp_dpo_t *mdd;
+
+            FIB_TEST_I((DPO_MPLS_DISPOSITION == dpo->dpoi_type),
+                      "bucket %d stacks on %U",
+                      bucket,
+                      format_dpo_type, dpo->dpoi_type);
+           
+            mdd = mpls_disp_dpo_get(dpo->dpoi_index);
+
+            dpo = &mdd->mdd_dpo;
+
+           FIB_TEST_I(((DPO_ADJACENCY == dpo->dpoi_type) ||
+                       (DPO_ADJACENCY_INCOMPLETE == dpo->dpoi_type)),
+                      "bucket %d stacks on %U",
+                      bucket,
+                      format_dpo_type, dpo->dpoi_type);
+           FIB_TEST_LB((exp->adj.adj == dpo->dpoi_index),
+                       "bucket %d stacks on adj %d",
+                       bucket,
+                       exp->adj.adj);
+           break;
+        }
        case FT_LB_INTF:
            FIB_TEST_I((DPO_INTERFACE_RX == dpo->dpoi_type),
                       "bucket %d stacks on %U",
        case FT_LB_INTF:
            FIB_TEST_I((DPO_INTERFACE_RX == dpo->dpoi_type),
                       "bucket %d stacks on %U",
@@ -6380,6 +6405,12 @@ fib_test_label (void)
        .fp_label = 24001,
        .fp_eos = MPLS_NON_EOS,
     };
        .fp_label = 24001,
        .fp_eos = MPLS_NON_EOS,
     };
+    fib_test_lb_bucket_t disp_o_10_10_11_1 = {
+       .type = FT_LB_MPLS_DISP_O_ADJ,
+       .adj = {
+           .adj = ai_v4_10_10_11_1,
+       },
+    };
 
     /*
      * The EOS entry should link to both the paths,
 
     /*
      * The EOS entry should link to both the paths,
@@ -6393,10 +6424,10 @@ fib_test_label (void)
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     2,
                                     &l99_eos_o_10_10_10_1,
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     2,
                                     &l99_eos_o_10_10_10_1,
-                                    &a_o_10_10_11_1),
+                                    &disp_o_10_10_11_1),
             "24001/eos LB 2 buckets via: "
             "label 99 over 10.10.10.1, "
             "24001/eos LB 2 buckets via: "
             "label 99 over 10.10.10.1, "
-            "adj over 10.10.11.1");
+            "mpls disp adj over 10.10.11.1");
 
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
 
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
@@ -6419,6 +6450,13 @@ fib_test_label (void)
            .adj = ai_v4_10_10_11_2,
        },
     };
            .adj = ai_v4_10_10_11_2,
        },
     };
+    fib_test_lb_bucket_t disp_o_10_10_11_2 = {
+       .type = FT_LB_MPLS_DISP_O_ADJ,
+       .adj = {
+           .adj = ai_v4_10_10_11_2,
+       },
+    };
+
 
     fei = fib_table_entry_path_add(fib_index,
                                   &pfx_1_1_1_1_s_32,
 
     fei = fib_table_entry_path_add(fib_index,
                                   &pfx_1_1_1_1_s_32,
@@ -6567,11 +6605,11 @@ fib_test_label (void)
     FIB_TEST(fib_test_validate_entry(fei, 
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     2,
     FIB_TEST(fib_test_validate_entry(fei, 
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     2,
-                                    &a_o_10_10_11_1,
-                                    &adj_o_10_10_11_2),
+                                    &disp_o_10_10_11_1,
+                                    &disp_o_10_10_11_2),
             "24001/eos LB 2 buckets via: "
             "24001/eos LB 2 buckets via: "
-            "adj over 10.10.11.1, ",
-            "adj-v4 over 10.10.11.2");
+            "mpls-disp adj over 10.10.11.1, ",
+            "mpls-disp adj-v4 over 10.10.11.2");
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_24001_neos);
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_24001_neos);
@@ -6644,20 +6682,20 @@ fib_test_label (void)
                                     &l99_eos_o_10_10_10_1,
                                     &l99_eos_o_10_10_10_1,
                                     &l99_eos_o_10_10_10_1,
                                     &l99_eos_o_10_10_10_1,
                                     &l99_eos_o_10_10_10_1,
                                     &l99_eos_o_10_10_10_1,
-                                    &a_o_10_10_11_1,
-                                    &a_o_10_10_11_1,
-                                    &a_o_10_10_11_1,
-                                    &a_o_10_10_11_1,
-                                    &a_o_10_10_11_1,
-                                    &adj_o_10_10_11_2,
-                                    &adj_o_10_10_11_2,
-                                    &adj_o_10_10_11_2,
-                                    &adj_o_10_10_11_2,
-                                    &adj_o_10_10_11_2),
+                                    &disp_o_10_10_11_1,
+                                    &disp_o_10_10_11_1,
+                                    &disp_o_10_10_11_1,
+                                    &disp_o_10_10_11_1,
+                                    &disp_o_10_10_11_1,
+                                    &disp_o_10_10_11_2,
+                                    &disp_o_10_10_11_2,
+                                    &disp_o_10_10_11_2,
+                                    &disp_o_10_10_11_2,
+                                    &disp_o_10_10_11_2),
             "24001/eos LB 16 buckets via: "
             "label 99 over 10.10.10.1, "
             "24001/eos LB 16 buckets via: "
             "label 99 over 10.10.10.1, "
-            "adj over 10.10.11.1",
-            "adj-v4 over 10.10.11.2");
+            "MPLS disp adj over 10.10.11.1",
+            "MPLS disp adj-v4 over 10.10.11.2");
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_24001_neos);
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_24001_neos);
@@ -6698,11 +6736,11 @@ fib_test_label (void)
     FIB_TEST(fib_test_validate_entry(fei, 
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     2,
     FIB_TEST(fib_test_validate_entry(fei, 
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     2,
-                                    &a_o_10_10_11_1,
-                                    &adj_o_10_10_11_2),
+                                    &disp_o_10_10_11_1,
+                                    &disp_o_10_10_11_2),
             "24001/eos LB 2 buckets via: "
             "24001/eos LB 2 buckets via: "
-            "adj over 10.10.11.1, "
-            "adj-v4 over 10.10.11.2");
+            "MPLS disp adj over 10.10.11.1, "
+            "MPLS disp adj-v4 over 10.10.11.2");
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_24001_neos);
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_24001_neos);
@@ -6750,9 +6788,9 @@ fib_test_label (void)
     FIB_TEST(fib_test_validate_entry(fei, 
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     1,
     FIB_TEST(fib_test_validate_entry(fei, 
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     1,
-                                    &adj_o_10_10_11_2),
+                                    &disp_o_10_10_11_2),
             "24001/eos LB 1 buckets via: "
             "24001/eos LB 1 buckets via: "
-            "adj over 10.10.11.2");
+            "MPLS disp adj over 10.10.11.2");
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_24001_neos);
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_24001_neos);
@@ -6796,10 +6834,10 @@ fib_test_label (void)
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     2,
                                     &l99_eos_o_10_10_10_1,
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     2,
                                     &l99_eos_o_10_10_10_1,
-                                    &adj_o_10_10_11_2),
+                                    &disp_o_10_10_11_2),
             "24001/eos LB 2 buckets via: "
             "label 99 over 10.10.10.1, "
             "24001/eos LB 2 buckets via: "
             "label 99 over 10.10.10.1, "
-            "adj over 10.10.11.2");
+            "MPLS disp adj over 10.10.11.2");
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_24001_neos);
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_24001_neos);
@@ -6841,10 +6879,10 @@ fib_test_label (void)
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     2,
                                     &l99_eos_o_10_10_10_1,
                                     FIB_FORW_CHAIN_TYPE_MPLS_EOS,
                                     2,
                                     &l99_eos_o_10_10_10_1,
-                                    &adj_o_10_10_11_2),
+                                    &disp_o_10_10_11_2),
             "25005/eos LB 2 buckets via: "
             "label 99 over 10.10.10.1, "
             "25005/eos LB 2 buckets via: "
             "label 99 over 10.10.10.1, "
-            "adj over 10.10.11.2");
+            "MPLS disp adj over 10.10.11.2");
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_25005_neos);
 
     fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
                           &pfx_25005_neos);
index f3d8346..53697cb 100644 (file)
@@ -29,6 +29,7 @@ typedef enum fib_test_lb_bucket_type_t_ {
     FT_LB_LABEL_STACK_O_ADJ,
     FT_LB_LABEL_O_LB,
     FT_LB_O_LB,
     FT_LB_LABEL_STACK_O_ADJ,
     FT_LB_LABEL_O_LB,
     FT_LB_O_LB,
+    FT_LB_MPLS_DISP_O_ADJ,
     FT_LB_INTF,
     FT_LB_L2,
     FT_LB_BIER_TABLE,
     FT_LB_INTF,
     FT_LB_L2,
     FT_LB_BIER_TABLE,
index 27522a5..f5d5e54 100644 (file)
@@ -15,7 +15,7 @@ from util import ppp
 
 
 class TestDVR(VppTestCase):
 
 
 class TestDVR(VppTestCase):
-    """ IPv4 Load-Balancing """
+    """ Distributed Virtual Router """
 
     def setUp(self):
         super(TestDVR, self).setUp()
 
     def setUp(self):
         super(TestDVR, self).setUp()
@@ -83,8 +83,6 @@ class TestDVR(VppTestCase):
                                                   L2_VTR_OP.L2_POP_1,
                                                   93)
 
                                                   L2_VTR_OP.L2_POP_1,
                                                   93)
 
-        self.logger.error(self.vapi.ppcli("show bridge-domain 1 detail"))
-
         #
         # Add routes to bridge the traffic via a tagged an nontagged interface
         #
         #
         # Add routes to bridge the traffic via a tagged an nontagged interface
         #
index b05635f..12fbced 100644 (file)
@@ -1360,7 +1360,6 @@ class TestIPInput(VppTestCase):
         self.assertEqual(icmp.src, self.pg0.remote_ip4)
         self.assertEqual(icmp.dst, self.pg1.remote_ip4)
 
         self.assertEqual(icmp.src, self.pg0.remote_ip4)
         self.assertEqual(icmp.dst, self.pg1.remote_ip4)
 
-        self.logger.error(self.vapi.cli("sh error"))
 
 if __name__ == '__main__':
     unittest.main(testRunner=VppTestRunner)
 
 if __name__ == '__main__':
     unittest.main(testRunner=VppTestRunner)
index 0a0d56c..684eff5 100644 (file)
@@ -1278,6 +1278,7 @@ class TestIP6LoadBalance(VppTestCase):
         super(TestIP6LoadBalance, self).tearDown()
 
     def send_and_expect_load_balancing(self, input, pkts, outputs):
         super(TestIP6LoadBalance, self).tearDown()
 
     def send_and_expect_load_balancing(self, input, pkts, outputs):
+        self.vapi.cli("clear trace")
         input.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
         input.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
@@ -1286,6 +1287,7 @@ class TestIP6LoadBalance(VppTestCase):
             self.assertNotEqual(0, len(rx))
 
     def send_and_expect_one_itf(self, input, pkts, itf):
             self.assertNotEqual(0, len(rx))
 
     def send_and_expect_one_itf(self, input, pkts, itf):
+        self.vapi.cli("clear trace")
         input.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
         input.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
         self.pg_start()
@@ -1691,8 +1693,6 @@ class TestIP6Input(VppTestCase):
         # 0: "hop limit exceeded in transit",
         self.assertEqual(icmp.code, 0)
 
         # 0: "hop limit exceeded in transit",
         self.assertEqual(icmp.code, 0)
 
-        self.logger.error(self.vapi.cli("sh error"))
-
 
 if __name__ == '__main__':
     unittest.main(testRunner=VppTestRunner)
 
 if __name__ == '__main__':
     unittest.main(testRunner=VppTestRunner)
index d265e85..9590519 100644 (file)
@@ -12,7 +12,7 @@ from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
 from scapy.packet import Raw
 from scapy.layers.l2 import Ether
 from scapy.layers.inet import IP, UDP, ICMP
 from scapy.packet import Raw
 from scapy.layers.l2 import Ether
 from scapy.layers.inet import IP, UDP, ICMP
-from scapy.layers.inet6 import IPv6
+from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded
 from scapy.contrib.mpls import MPLS
 
 
 from scapy.contrib.mpls import MPLS
 
 
@@ -288,6 +288,32 @@ class TestMPLS(VppTestCase):
         except:
             raise
 
         except:
             raise
 
+    def verify_capture_ip6_icmp(self, src_if, capture, sent):
+        try:
+            self.assertEqual(len(capture), len(sent))
+
+            for i in range(len(capture)):
+                tx = sent[i]
+                rx = capture[i]
+
+                # the rx'd packet has the MPLS label popped
+                eth = rx[Ether]
+                self.assertEqual(eth.type, 0x86DD)
+
+                tx_ip = tx[IPv6]
+                rx_ip = rx[IPv6]
+
+                self.assertEqual(rx_ip.dst, tx_ip.src)
+                # ICMP sourced from the interface's address
+                self.assertEqual(rx_ip.src, src_if.local_ip6)
+                # hop-limit reset to 255 for IMCP packet
+                self.assertEqual(rx_ip.hlim, 254)
+
+                icmp = rx[ICMPv6TimeExceeded]
+
+        except:
+            raise
+
     def send_and_assert_no_replies(self, intf, pkts, remark):
         intf.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
     def send_and_assert_no_replies(self, intf, pkts, remark):
         intf.add_stream(pkts)
         self.pg_enable_capture(self.pg_interfaces)
@@ -363,6 +389,91 @@ class TestMPLS(VppTestCase):
         rx = self.pg0.get_capture()
         self.verify_capture_ip4(self.pg0, rx, tx)
 
         rx = self.pg0.get_capture()
         self.verify_capture_ip4(self.pg0, rx, tx)
 
+        #
+        # disposed packets have an invalid IPv4 checkusm
+        #
+        tx = self.create_stream_labelled_ip4(self.pg0, [33],
+                                             dst_ip=self.pg0.remote_ip4,
+                                             n=65,
+                                             chksum=1)
+        self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")
+
+        #
+        # An MPLS xconnect - EOS label in IPv6 out
+        #
+        route_333_eos = VppMplsRoute(
+            self, 333, 1,
+            [VppRoutePath(self.pg0.remote_ip6,
+                          self.pg0.sw_if_index,
+                          labels=[],
+                          proto=DpoProto.DPO_PROTO_IP6)])
+        route_333_eos.add_vpp_config()
+
+        self.vapi.cli("clear trace")
+        tx = self.create_stream_labelled_ip6(self.pg0, [333], 64)
+        self.pg0.add_stream(tx)
+
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx = self.pg0.get_capture()
+        self.verify_capture_ip6(self.pg0, rx, tx)
+
+        #
+        # disposed packets have an TTL expired
+        #
+        tx = self.create_stream_labelled_ip6(self.pg0, [333], 64,
+                                             dst_ip=self.pg1.remote_ip6,
+                                             hlim=1)
+
+        self.vapi.cli("clear trace")
+        tx = self.create_stream_labelled_ip6(self.pg0, [333], 64,
+                                             dst_ip=self.pg1.remote_ip6,
+                                             hlim=0)
+        self.pg0.add_stream(tx)
+
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx = self.pg0.get_capture()
+        self.verify_capture_ip6_icmp(self.pg0, rx, tx)
+
+        #
+        # An MPLS xconnect - EOS label in IPv6 out w imp-null
+        #
+        route_334_eos = VppMplsRoute(
+            self, 334, 1,
+            [VppRoutePath(self.pg0.remote_ip6,
+                          self.pg0.sw_if_index,
+                          labels=[3],
+                          proto=DpoProto.DPO_PROTO_IP6)])
+        route_334_eos.add_vpp_config()
+
+        self.vapi.cli("clear trace")
+        tx = self.create_stream_labelled_ip6(self.pg0, [334], 64)
+        self.pg0.add_stream(tx)
+
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx = self.pg0.get_capture()
+        self.verify_capture_ip6(self.pg0, rx, tx)
+
+        #
+        # disposed packets have an TTL expired
+        #
+        self.vapi.cli("clear trace")
+        tx = self.create_stream_labelled_ip6(self.pg0, [334], 64,
+                                             dst_ip=self.pg1.remote_ip6,
+                                             hlim=0)
+        self.pg0.add_stream(tx)
+
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx = self.pg0.get_capture()
+        self.verify_capture_ip6_icmp(self.pg0, rx, tx)
+
         #
         # An MPLS xconnect - non-EOS label in IP out - an invalid configuration
         # so this traffic should be dropped.
         #
         # An MPLS xconnect - non-EOS label in IP out - an invalid configuration
         # so this traffic should be dropped.
@@ -1043,7 +1154,6 @@ class TestMPLS(VppTestCase):
         tx = self.create_stream_labelled_ip4(self.pg0, [34],
                                              dst_ip="232.1.1.1")
         self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
         tx = self.create_stream_labelled_ip4(self.pg0, [34],
                                              dst_ip="232.1.1.1")
         self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
-        self.logger.error(self.vapi.cli("sh error"))
 
     def test_mcast_ip6_tail(self):
         """ MPLS IPv6 Multicast Tail """
 
     def test_mcast_ip6_tail(self):
         """ MPLS IPv6 Multicast Tail """