ipsec: Support MPLS over IPSec[46] interface
[vpp.git] / src / vnet / ipsec / esp_encrypt.c
index 15f590a..bd6e764 100644 (file)
 #include <vnet/vnet.h>
 #include <vnet/api_errno.h>
 #include <vnet/ip/ip.h>
-#include <vnet/udp/udp.h>
 
 #include <vnet/crypto/crypto.h>
 
 #include <vnet/ipsec/ipsec.h>
 #include <vnet/ipsec/ipsec_tun.h>
 #include <vnet/ipsec/esp.h>
+#include <vnet/tunnel/tunnel_dp.h>
 
-#define foreach_esp_encrypt_next                   \
-_(DROP, "error-drop")                              \
-_(PENDING, "pending")                              \
-_(HANDOFF, "handoff")                              \
-_(INTERFACE_OUTPUT, "interface-output")
+#define foreach_esp_encrypt_next                                              \
+  _ (DROP4, "ip4-drop")                                                       \
+  _ (DROP6, "ip6-drop")                                                       \
+  _ (DROP_MPLS, "mpls-drop")                                                  \
+  _ (HANDOFF4, "handoff4")                                                    \
+  _ (HANDOFF6, "handoff6")                                                    \
+  _ (HANDOFF_MPLS, "handoff-mpls")                                            \
+  _ (INTERFACE_OUTPUT, "interface-output")
 
 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
 typedef enum
@@ -110,17 +113,17 @@ format_esp_post_encrypt_trace (u8 * s, va_list * args)
 /* pad packet in input buffer */
 static_always_inline u8 *
 esp_add_footer_and_icv (vlib_main_t * vm, vlib_buffer_t ** last,
-                       u8 block_size, u8 icv_sz,
+                       u8 esp_align, u8 icv_sz,
                        u16 * next, vlib_node_runtime_t * node,
                        u16 buffer_data_size, uword total_len)
 {
   static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
     0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
-    0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x00, 0x00,
+    0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
   };
 
   u16 min_length = total_len + sizeof (esp_footer_t);
-  u16 new_length = round_pow2 (min_length, block_size);
+  u16 new_length = round_pow2 (min_length, esp_align);
   u8 pad_bytes = new_length - min_length;
   esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) +
                                      last[0]->current_length + pad_bytes);
@@ -235,7 +238,8 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
 static_always_inline void
 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
                         vnet_crypto_op_t * ops, vlib_buffer_t * b[],
-                        u16 * nexts, vnet_crypto_op_chunk_t * chunks)
+                        u16 * nexts, vnet_crypto_op_chunk_t * chunks,
+                        u16 drop_next)
 {
   u32 n_fail, n_ops = vec_len (ops);
   vnet_crypto_op_t *op = ops;
@@ -253,7 +257,7 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
        {
          u32 bi = op->user_data;
          b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
-         nexts[bi] = ESP_ENCRYPT_NEXT_DROP;
+         nexts[bi] = drop_next;
          n_fail--;
        }
       op++;
@@ -262,7 +266,8 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
 
 static_always_inline void
 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
-                vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
+                vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
+                u16 drop_next)
 {
   u32 n_fail, n_ops = vec_len (ops);
   vnet_crypto_op_t *op = ops;
@@ -280,7 +285,7 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
        {
          u32 bi = op->user_data;
          b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
-         nexts[bi] = ESP_ENCRYPT_NEXT_DROP;
+         nexts[bi] = drop_next;
          n_fail--;
        }
       op++;
@@ -471,7 +476,7 @@ esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
                         vnet_crypto_async_frame_t ** async_frame,
                         ipsec_sa_t * sa, vlib_buffer_t * b,
                         esp_header_t * esp, u8 * payload, u32 payload_len,
-                        u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
+                        u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
                         u16 async_next, vlib_buffer_t * lb)
 {
   esp_post_data_t *post = esp_post_data (b);
@@ -481,8 +486,7 @@ esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
   i16 crypto_start_offset, integ_start_offset = 0;
   u16 crypto_total_len, integ_total_len;
 
-  post->next_index = next[0];
-  next[0] = ESP_ENCRYPT_NEXT_PENDING;
+  post->next_index = next;
 
   /* crypto */
   crypto_start_offset = payload - b->data;
@@ -552,23 +556,9 @@ out:
                                         iv, tag, aad, flag);
 }
 
-/* when submitting a frame is failed, drop all buffers in the frame */
-static_always_inline void
-esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f,
-                                vlib_buffer_t ** b, u16 * next)
-{
-  u32 n_drop = f->n_elts;
-  while (--n_drop)
-    {
-      (b - n_drop)[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
-      (next - n_drop)[0] = ESP_ENCRYPT_NEXT_DROP;
-    }
-  vnet_crypto_async_reset_frame (f);
-}
-
 always_inline uword
-esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-                   vlib_frame_t * frame, int is_ip6, int is_tun,
+esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+                   vlib_frame_t *frame, vnet_link_t lt, int is_tun,
                    u16 async_next)
 {
   ipsec_main_t *im = &ipsec_main;
@@ -582,7 +572,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
   u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
   u32 current_sa_index = ~0, current_sa_packets = 0;
   u32 current_sa_bytes = 0, spi = 0;
-  u8 block_sz = 0, iv_sz = 0, icv_sz = 0;
+  u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
   ipsec_sa_t *sa0 = 0;
   vlib_buffer_t *lb;
   vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
@@ -590,6 +580,15 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
   vnet_crypto_async_frame_t *async_frame = 0;
   int is_async = im->async_mode;
   vnet_crypto_async_op_id_t last_async_op = ~0;
+  u16 drop_next =
+    (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
+                          (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
+                                                 ESP_ENCRYPT_NEXT_DROP_MPLS));
+  u16 handoff_next = (lt == VNET_LINK_IP6 ?
+                       ESP_ENCRYPT_NEXT_HANDOFF6 :
+                       (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
+                                              ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
+  u16 n_async_drop = 0;
 
   vlib_get_buffers (vm, from, b, n_left);
   if (!is_async)
@@ -618,6 +617,9 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
          p -= CLIB_CACHE_LINE_BYTES;
          CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+         /* speculate that the trailer goes in the first buffer */
+         CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
+                        CLIB_CACHE_LINE_BYTES, LOAD);
        }
 
       if (is_tun)
@@ -640,9 +642,13 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          current_sa_packets = current_sa_bytes = 0;
 
          sa0 = pool_elt_at_index (im->sad, sa_index0);
+
+         /* fetch the second cacheline ASAP */
+         CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
+
          current_sa_index = sa_index0;
          spi = clib_net_to_host_u32 (sa0->spi);
-         block_sz = sa0->crypto_block_size;
+         esp_align = sa0->esp_block_align;
          icv_sz = sa0->integ_icv_size;
          iv_sz = sa0->crypto_iv_size;
 
@@ -651,9 +657,11 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
            {
              if (async_frame && async_frame->n_elts)
                {
-                 if (vnet_crypto_async_submit_open_frame (vm, async_frame)
-                     < 0)
-                   esp_async_recycle_failed_submit (async_frame, b, next);
+                 if (vnet_crypto_async_submit_open_frame (vm, async_frame))
+                   esp_async_recycle_failed_submit (async_frame, b, from,
+                                                    nexts, &n_async_drop,
+                                                    drop_next,
+                                                    ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
                }
              async_frame =
                vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id);
@@ -670,9 +678,10 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
                                    ipsec_sa_assign_thread (thread_index));
        }
 
-      if (PREDICT_TRUE (thread_index != sa0->encrypt_thread_index))
+      if (PREDICT_FALSE (thread_index != sa0->encrypt_thread_index))
        {
-         next[0] = ESP_ENCRYPT_NEXT_HANDOFF;
+         esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                             &n_async_drop, handoff_next, next);
          goto trace;
        }
 
@@ -681,29 +690,23 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (n_bufs == 0)
        {
          b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
-         next[0] = ESP_ENCRYPT_NEXT_DROP;
+         esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                             &n_async_drop, drop_next, next);
          goto trace;
        }
 
       if (n_bufs > 1)
        {
-         crypto_ops = &ptd->chained_crypto_ops;
-         integ_ops = &ptd->chained_integ_ops;
-
          /* find last buffer in the chain */
          while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
            lb = vlib_get_buffer (vm, lb->next_buffer);
        }
-      else
-       {
-         crypto_ops = &ptd->crypto_ops;
-         integ_ops = &ptd->integ_ops;
-       }
 
       if (PREDICT_FALSE (esp_seq_advance (sa0)))
        {
          b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
-         next[0] = ESP_ENCRYPT_NEXT_DROP;
+         esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                             &n_async_drop, drop_next, next);
          goto trace;
        }
 
@@ -713,7 +716,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (ipsec_sa_is_set_IS_TUNNEL (sa0))
        {
          payload = vlib_buffer_get_current (b[0]);
-         next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, block_sz, icv_sz,
+         next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
                                                 next, node,
                                                 buffer_data_size,
                                                 vlib_buffer_length_in_chain
@@ -721,7 +724,8 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          if (!next_hdr_ptr)
            {
              b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
-             next[0] = ESP_ENCRYPT_NEXT_DROP;
+             esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                                 &n_async_drop, drop_next, next);
              goto trace;
            }
          b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -747,9 +751,32 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
              u16 len = sizeof (ip6_header_t);
              hdr_len += len;
              ip6 = (ip6_header_t *) (payload - hdr_len);
-             clib_memcpy_fast (ip6, &sa0->ip6_hdr, len);
-             *next_hdr_ptr = (is_ip6 ?
-                              IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
+             clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
+
+             if (VNET_LINK_IP6 == lt)
+               {
+                 *next_hdr_ptr = IP_PROTOCOL_IPV6;
+                 tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
+                                         (const ip6_header_t *) payload,
+                                         ip6);
+               }
+             else if (VNET_LINK_IP4 == lt)
+               {
+                 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
+                 tunnel_encap_fixup_4o6 (sa0->tunnel_flags,
+                                         (const ip4_header_t *) payload,
+                                         ip6);
+               }
+             else if (VNET_LINK_MPLS == lt)
+               {
+                 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
+                 tunnel_encap_fixup_mplso6 (
+                   sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
+                   ip6);
+               }
+             else
+               ASSERT (0);
+
              len = payload_len_total + hdr_len - len;
              ip6->payload_length = clib_net_to_host_u16 (len);
            }
@@ -759,9 +786,32 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
              u16 len = sizeof (ip4_header_t);
              hdr_len += len;
              ip4 = (ip4_header_t *) (payload - hdr_len);
-             clib_memcpy_fast (ip4, &sa0->ip4_hdr, len);
-             *next_hdr_ptr = (is_ip6 ?
-                              IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
+             clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
+
+             if (VNET_LINK_IP6 == lt)
+               {
+                 *next_hdr_ptr = IP_PROTOCOL_IPV6;
+                 tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
+                                                  (const ip6_header_t *)
+                                                  payload, ip4);
+               }
+             else if (VNET_LINK_IP4 == lt)
+               {
+                 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
+                 tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
+                                                  (const ip4_header_t *)
+                                                  payload, ip4);
+               }
+             else if (VNET_LINK_MPLS == lt)
+               {
+                 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
+                 tunnel_encap_fixup_mplso4_w_chksum (
+                   sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
+                   ip4);
+               }
+             else
+               ASSERT (0);
+
              len = payload_len_total + hdr_len;
              esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
            }
@@ -783,19 +833,24 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          u16 udp_len = 0;
          u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
 
-         ip_len = is_ip6 ?
-           esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
-           ip4_header_bytes ((ip4_header_t *) old_ip_hdr);
+         ip_len =
+           (VNET_LINK_IP6 == lt ?
+              esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
+              ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
 
          vlib_buffer_advance (b[0], ip_len);
          payload = vlib_buffer_get_current (b[0]);
-         next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, block_sz, icv_sz,
+         next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
                                                 next, node,
                                                 buffer_data_size,
                                                 vlib_buffer_length_in_chain
                                                 (vm, b[0]));
          if (!next_hdr_ptr)
-           goto trace;
+           {
+             esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                                 &n_async_drop, drop_next, next);
+             goto trace;
+           }
 
          b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
          payload_len = b[0]->current_length;
@@ -829,7 +884,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          else
            l2_len = 0;
 
-         if (is_ip6)
+         if (VNET_LINK_IP6 == lt)
            {
              ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
              if (PREDICT_TRUE (NULL == ext_hdr))
@@ -846,7 +901,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
                clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
                                      sizeof (ip6_header_t));
            }
-         else
+         else if (VNET_LINK_IP4 == lt)
            {
              u16 len;
              ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
@@ -871,20 +926,42 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
        }
 
+      if (lb != b[0])
+       {
+         crypto_ops = &ptd->chained_crypto_ops;
+         integ_ops = &ptd->chained_integ_ops;
+       }
+      else
+       {
+         crypto_ops = &ptd->crypto_ops;
+         integ_ops = &ptd->integ_ops;
+       }
+
       esp->spi = spi;
       esp->seq = clib_net_to_host_u32 (sa0->seq);
 
       if (is_async)
        {
          if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0))
-           goto trace;
+           {
+             esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                                 &n_async_drop, drop_next, next);
+             goto trace;
+           }
 
          if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp,
                                       payload, payload_len, iv_sz,
-                                      icv_sz, from[b - bufs], next, hdr_len,
-                                      async_next, lb))
+                                      icv_sz, from[b - bufs], next[0],
+                                      hdr_len, async_next, lb))
            {
-             esp_async_recycle_failed_submit (async_frame, b, next);
+             /* The fail only caused by submission, free the whole frame. */
+             if (async_frame->n_elts)
+               esp_async_recycle_failed_submit (async_frame, b, from, nexts,
+                                                &n_async_drop, drop_next,
+                                                ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+             b[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
+             esp_set_next_index (1, from, nexts, from[b - bufs],
+                                 &n_async_drop, drop_next, next);
              goto trace;
            }
        }
@@ -924,18 +1001,30 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
                                   current_sa_bytes);
   if (!is_async)
     {
-      esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts);
+      esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, drop_next);
       esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
-                              ptd->chunks);
+                              ptd->chunks, drop_next);
 
-      esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
+      esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next);
       esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
-                              ptd->chunks);
+                              ptd->chunks, drop_next);
     }
-  else if (async_frame && async_frame->n_elts)
+  else
     {
-      if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
-       esp_async_recycle_failed_submit (async_frame, b, next);
+      if (async_frame && async_frame->n_elts)
+       {
+         if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
+           esp_async_recycle_failed_submit (async_frame, b, from, nexts,
+                                            &n_async_drop, drop_next,
+                                            ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+       }
+      vlib_node_increment_counter (vm, node->node_index,
+                                  ESP_ENCRYPT_ERROR_RX_PKTS,
+                                  frame->n_vectors);
+      if (n_async_drop)
+       vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
+
+      return frame->n_vectors;
     }
 
   vlib_node_increment_counter (vm, node->node_index,
@@ -1035,7 +1124,7 @@ VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
                                  vlib_node_runtime_t * node,
                                  vlib_frame_t * from_frame)
 {
-  return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 0,
+  return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 0,
                             esp_encrypt_async_next.esp4_post_next);
 }
 
@@ -1046,16 +1135,17 @@ VLIB_REGISTER_NODE (esp4_encrypt_node) = {
   .format_trace = format_esp_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
+  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
   .error_strings = esp_encrypt_error_strings,
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
-  .next_nodes = {
-    [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop",
-    [ESP_ENCRYPT_NEXT_HANDOFF] = "esp4-encrypt-handoff",
-    [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
-    [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
-  },
+  .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
+                 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+                 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+                 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
+                 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
+                 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
+                 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
 };
 /* *INDENT-ON* */
 
@@ -1083,7 +1173,7 @@ VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
                                  vlib_node_runtime_t * node,
                                  vlib_frame_t * from_frame)
 {
-  return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 0,
+  return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 0,
                             esp_encrypt_async_next.esp6_post_next);
 }
 
@@ -1093,17 +1183,10 @@ VLIB_REGISTER_NODE (esp6_encrypt_node) = {
   .vector_size = sizeof (u32),
   .format_trace = format_esp_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
+  .sibling_of = "esp4-encrypt",
 
   .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
   .error_strings = esp_encrypt_error_strings,
-
-  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
-  .next_nodes = {
-    [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop",
-    [ESP_ENCRYPT_NEXT_HANDOFF] = "esp6-encrypt-handoff",
-    [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
-    [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
-  },
 };
 /* *INDENT-ON* */
 
@@ -1120,7 +1203,7 @@ VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
   .vector_size = sizeof (u32),
   .format_trace = format_esp_post_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
-  .sibling_of = "esp6-encrypt",
+  .sibling_of = "esp4-encrypt",
 
   .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
   .error_strings = esp_encrypt_error_strings,
@@ -1131,7 +1214,7 @@ VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
                                      vlib_node_runtime_t * node,
                                      vlib_frame_t * from_frame)
 {
-  return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 1,
+  return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 1,
                             esp_encrypt_async_next.esp4_tun_post_next);
 }
 
@@ -1147,10 +1230,13 @@ VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
   .next_nodes = {
-    [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop",
-    [ESP_ENCRYPT_NEXT_HANDOFF] = "esp4-encrypt-tun-handoff",
+    [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
+    [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+    [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+    [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
+    [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
+    [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
     [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
-    [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
   },
 };
 
@@ -1178,7 +1264,7 @@ VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
                                      vlib_node_runtime_t * node,
                                      vlib_frame_t * from_frame)
 {
-  return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 1,
+  return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 1,
                             esp_encrypt_async_next.esp6_tun_post_next);
 }
 
@@ -1194,9 +1280,12 @@ VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
   .next_nodes = {
-    [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop",
-    [ESP_ENCRYPT_NEXT_HANDOFF] = "esp6-encrypt-tun-handoff",
-    [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
+    [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
+    [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+    [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+    [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
+    [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
+    [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
     [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
   },
 };
@@ -1216,13 +1305,58 @@ VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
   .vector_size = sizeof (u32),
   .format_trace = format_esp_post_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
-  .sibling_of = "esp6-encrypt-tun",
+  .sibling_of = "esp-mpls-encrypt-tun",
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
+  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
   .error_strings = esp_encrypt_error_strings,
 };
 /* *INDENT-ON* */
 
+VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+  return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_MPLS, 1,
+                            esp_encrypt_async_next.esp_mpls_tun_post_next);
+}
+
+VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
+  .name = "esp-mpls-encrypt-tun",
+  .vector_size = sizeof (u32),
+  .format_trace = format_esp_encrypt_trace,
+  .type = VLIB_NODE_TYPE_INTERNAL,
+
+  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
+  .error_strings = esp_encrypt_error_strings,
+
+  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
+  .next_nodes = {
+    [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
+    [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+    [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+    [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
+    [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
+    [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
+    [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
+  },
+};
+
+VLIB_NODE_FN (esp_mpls_encrypt_tun_post_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+  return esp_encrypt_post_inline (vm, node, from_frame);
+}
+
+VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
+  .name = "esp-mpls-encrypt-tun-post",
+  .vector_size = sizeof (u32),
+  .format_trace = format_esp_post_encrypt_trace,
+  .type = VLIB_NODE_TYPE_INTERNAL,
+  .sibling_of = "esp-mpls-encrypt-tun",
+
+  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
+  .error_strings = esp_encrypt_error_strings,
+};
+
 typedef struct
 {
   u32 sa_index;
@@ -1337,23 +1471,6 @@ VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FN (esp_encrypt_pending_node) (vlib_main_t * vm,
-                                        vlib_node_runtime_t * node,
-                                        vlib_frame_t * from_frame)
-{
-  return from_frame->n_vectors;
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp_encrypt_pending_node) = {
-  .name = "esp-encrypt-pending",
-  .vector_size = sizeof (u32),
-  .type = VLIB_NODE_TYPE_INTERNAL,
-
-  .n_next_nodes = 0
-};
-/* *INDENT-ON* */
-
 /*
  * fd.io coding-style-patch-verification: ON
  *