ip: add support for buffer offload metadata in ip midchain
[vpp.git] / src / vnet / ipsec / esp_encrypt.c
index 4f6976b..dd47053 100644 (file)
 #include <vnet/vnet.h>
 #include <vnet/api_errno.h>
 #include <vnet/ip/ip.h>
+#include <vnet/interface_output.h>
 
 #include <vnet/crypto/crypto.h>
 
 #include <vnet/ipsec/ipsec.h>
 #include <vnet/ipsec/ipsec_tun.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
 #include <vnet/ipsec/esp.h>
 #include <vnet/tunnel/tunnel_dp.h>
 
@@ -43,28 +45,6 @@ typedef enum
     ESP_ENCRYPT_N_NEXT,
 } esp_encrypt_next_t;
 
-#define foreach_esp_encrypt_error                               \
- _(RX_PKTS, "ESP pkts received")                                \
- _(POST_RX_PKTS, "ESP-post pkts received")                      \
- _(SEQ_CYCLED, "sequence number cycled (packet dropped)")       \
- _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _(CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)")     \
- _(NO_BUFFERS, "no buffers (packet dropped)")                   \
-
-typedef enum
-{
-#define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
-  foreach_esp_encrypt_error
-#undef _
-    ESP_ENCRYPT_N_ERROR,
-} esp_encrypt_error_t;
-
-static char *esp_encrypt_error_strings[] = {
-#define _(sym,string) string,
-  foreach_esp_encrypt_error
-#undef _
-};
-
 typedef struct
 {
   u32 sa_index;
@@ -81,6 +61,8 @@ typedef struct
   u32 next_index;
 } esp_encrypt_post_trace_t;
 
+typedef vl_counter_esp_encrypt_enum_t esp_encrypt_error_t;
+
 /* packet trace format function */
 static u8 *
 format_esp_encrypt_trace (u8 * s, va_list * args)
@@ -112,10 +94,8 @@ format_esp_post_encrypt_trace (u8 * s, va_list * args)
 
 /* pad packet in input buffer */
 static_always_inline u8 *
-esp_add_footer_and_icv (vlib_main_t * vm, vlib_buffer_t ** last,
-                       u8 esp_align, u8 icv_sz,
-                       u16 * next, vlib_node_runtime_t * node,
-                       u16 buffer_data_size, uword total_len)
+esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
+                       u8 icv_sz, u16 buffer_data_size, uword total_len)
 {
   static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
     0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
@@ -129,7 +109,8 @@ esp_add_footer_and_icv (vlib_main_t * vm, vlib_buffer_t ** last,
                                      last[0]->current_length + pad_bytes);
   u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
 
-  if (last[0]->current_length + tail_sz > buffer_data_size)
+  if (last[0]->current_data + last[0]->current_length + tail_sz >
+      buffer_data_size)
     {
       u32 tmp_bi = 0;
       if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
@@ -168,11 +149,9 @@ esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
   if (is_transport)
     {
       u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
-
-      sum = ip_csum_update (ip4->checksum, ip4->protocol,
-                           prot, ip4_header_t, protocol);
+      sum = ip_csum_update (ip4->checksum, ip4->protocol, prot, ip4_header_t,
+                           protocol);
       ip4->protocol = prot;
-
       sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
     }
   else
@@ -201,9 +180,9 @@ ext_hdr_is_pre_esp (u8 nexthdr)
 
   return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
 #else
-  return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
-         (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
-         (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
+  return (!(nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) ||
+         !(nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) ||
+         !(nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION));
 #endif
 }
 
@@ -222,9 +201,8 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
       return len;
     }
 
-  p = (void *) (ip6 + 1);
+  p = ip6_next_header (ip6);
   len += ip6_ext_header_len (p);
-
   while (ext_hdr_is_pre_esp (p->next_hdr))
     {
       len += ip6_ext_header_len (p);
@@ -235,6 +213,25 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
   return len;
 }
 
+/* IPsec IV generation: IVs requirements differ depending of the
+ * encryption mode: IVs must be unpredictable for AES-CBC whereas it can
+ * be predictable but should never be reused with the same key material
+ * for CTR and GCM.
+ * To avoid reusing the same IVs between multiple VPP instances and between
+ * restarts, we use a properly chosen PRNG to generate IVs. To ensure the IV is
+ * unpredictable for CBC, it is then encrypted using the same key as the
+ * message. You can refer to NIST SP800-38a and NIST SP800-38d for more
+ * details. */
+static_always_inline void *
+esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
+{
+  ASSERT (iv_sz >= sizeof (u64));
+  u64 *iv = (u64 *) (payload - iv_sz);
+  clib_memset_u8 (iv, 0, iv_sz);
+  *iv = clib_pcg64i_random_r (&sa->iv_prng);
+  return iv;
+}
+
 static_always_inline void
 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
                         vnet_crypto_op_t * ops, vlib_buffer_t * b[],
@@ -256,8 +253,10 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
        {
          u32 bi = op->user_data;
-         b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
-         nexts[bi] = drop_next;
+         esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
+                                     ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+                                     bi, nexts, drop_next,
+                                     vnet_buffer (b[bi])->ipsec.sad_index);
          n_fail--;
        }
       op++;
@@ -284,22 +283,16 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
        {
          u32 bi = op->user_data;
-         b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
-         nexts[bi] = drop_next;
+         esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
+                                     ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+                                     bi, nexts, drop_next,
+                                     vnet_buffer (b[bi])->ipsec.sad_index);
          n_fail--;
        }
       op++;
     }
 }
 
-typedef struct
-{
-  u32 salt;
-  u64 iv;
-} __clib_packed esp_gcm_nonce_t;
-
-STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12);
-
 static_always_inline u32
 esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
                          ipsec_sa_t * sa0, vlib_buffer_t * b,
@@ -384,56 +377,83 @@ esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
 }
 
 always_inline void
-esp_prepare_sync_op (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
-                    vnet_crypto_op_t ** crypto_ops,
-                    vnet_crypto_op_t ** integ_ops, ipsec_sa_t * sa0,
-                    u8 * payload, u16 payload_len, u8 iv_sz, u8 icv_sz,
-                    vlib_buffer_t ** bufs, vlib_buffer_t ** b,
-                    vlib_buffer_t * lb, u32 hdr_len, esp_header_t * esp,
-                    esp_gcm_nonce_t * nonce)
+esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+                    vnet_crypto_op_t **crypto_ops,
+                    vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
+                    u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
+                    vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
+                    esp_header_t *esp)
 {
   if (sa0->crypto_enc_op_id)
     {
       vnet_crypto_op_t *op;
       vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
       vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
+      u8 *crypto_start = payload;
+      /* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
+       * have enough space for ESP header and footer which includes ICV */
+      ASSERT (payload_len > icv_sz);
+      u16 crypto_len = payload_len - icv_sz;
+
+      /* generate the IV in front of the payload */
+      void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
 
-      op->src = op->dst = payload;
       op->key_index = sa0->crypto_key_index;
-      op->len = payload_len - icv_sz;
-      op->user_data = b - bufs;
+      op->user_data = bi;
 
-      if (ipsec_sa_is_set_IS_AEAD (sa0))
+      if (ipsec_sa_is_set_IS_CTR (sa0))
        {
-         /*
-          * construct the AAD in a scratch space in front
-          * of the IP header.
-          */
-         op->aad = payload - hdr_len - sizeof (esp_aead_t);
-         op->aad_len = esp_aad_fill (op->aad, esp, sa0);
-
-         op->tag = payload + op->len;
-         op->tag_len = 16;
+         /* construct nonce in a scratch space in front of the IP header */
+         esp_ctr_nonce_t *nonce =
+           (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
+         if (ipsec_sa_is_set_IS_AEAD (sa0))
+           {
+             /* constuct aad in a scratch space in front of the nonce */
+             op->aad = (u8 *) nonce - sizeof (esp_aead_t);
+             op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
+             op->tag = payload + crypto_len;
+             op->tag_len = 16;
+             if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+               {
+                 /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
+                 crypto_start -= iv_sz;
+                 crypto_len += iv_sz;
+               }
+           }
+         else
+           {
+             nonce->ctr = clib_host_to_net_u32 (1);
+           }
 
-         u64 *iv = (u64 *) (payload - iv_sz);
          nonce->salt = sa0->salt;
-         nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
+         nonce->iv = *(u64 *) pkt_iv;
          op->iv = (u8 *) nonce;
        }
       else
        {
-         op->iv = payload - iv_sz;
-         op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
+         /* construct zero iv in front of the IP header */
+         op->iv = pkt_iv - hdr_len - iv_sz;
+         clib_memset_u8 (op->iv, 0, iv_sz);
+         /* include iv field in crypto */
+         crypto_start -= iv_sz;
+         crypto_len += iv_sz;
        }
 
-      if (lb != b[0])
+      if (PREDICT_FALSE (lb != b[0]))
        {
          /* is chained */
          op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
          op->chunk_index = vec_len (ptd->chunks);
          op->tag = vlib_buffer_get_tail (lb) - icv_sz;
-         esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
-                                   payload_len, &op->n_chunks);
+         esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
+                                   crypto_start, crypto_len + icv_sz,
+                                   &op->n_chunks);
+       }
+      else
+       {
+         /* not chained */
+         op->src = op->dst = crypto_start;
+         op->len = crypto_len;
        }
     }
 
@@ -447,7 +467,7 @@ esp_prepare_sync_op (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
       op->key_index = sa0->integ_key_index;
       op->digest_len = icv_sz;
       op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
-      op->user_data = b - bufs;
+      op->user_data = bi;
 
       if (lb != b[0])
        {
@@ -464,122 +484,134 @@ esp_prepare_sync_op (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
        }
       else if (ipsec_sa_is_set_USE_ESN (sa0))
        {
-         u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
-         clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
+         u32 tmp = clib_net_to_host_u32 (seq_hi);
+         clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
          op->len += sizeof (seq_hi);
        }
     }
 }
 
-static_always_inline int
-esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
-                        vnet_crypto_async_frame_t *async_frame,
-                        ipsec_sa_t * sa, vlib_buffer_t * b,
-                        esp_header_t * esp, u8 * payload, u32 payload_len,
-                        u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
-                        u16 async_next, vlib_buffer_t * lb)
+static_always_inline void
+esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+                        vnet_crypto_async_frame_t *async_frame,
+                        ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
+                        u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
+                        u32 bi, u16 next, u32 hdr_len, u16 async_next,
+                        vlib_buffer_t *lb)
 {
   esp_post_data_t *post = esp_post_data (b);
   u8 *tag, *iv, *aad = 0;
   u8 flag = 0;
-  u32 key_index;
-  i16 crypto_start_offset, integ_start_offset = 0;
+  const u32 key_index = sa->crypto_key_index;
+  i16 crypto_start_offset, integ_start_offset;
   u16 crypto_total_len, integ_total_len;
 
   post->next_index = next;
 
   /* crypto */
-  crypto_start_offset = payload - b->data;
+  crypto_start_offset = integ_start_offset = payload - b->data;
   crypto_total_len = integ_total_len = payload_len - icv_sz;
   tag = payload + crypto_total_len;
 
-  /* aead */
-  if (ipsec_sa_is_set_IS_AEAD (sa))
+  /* generate the IV in front of the payload */
+  void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
+
+  if (ipsec_sa_is_set_IS_CTR (sa))
     {
-      esp_gcm_nonce_t *nonce;
-      u64 *pkt_iv = (u64 *) (payload - iv_sz);
+      /* construct nonce in a scratch space in front of the IP header */
+      esp_ctr_nonce_t *nonce =
+       (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
+      if (ipsec_sa_is_set_IS_AEAD (sa))
+       {
+         /* constuct aad in a scratch space in front of the nonce */
+         aad = (u8 *) nonce - sizeof (esp_aead_t);
+         esp_aad_fill (aad, esp, sa, sa->seq_hi);
+         if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa)))
+           {
+             /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
+             crypto_start_offset -= iv_sz;
+             crypto_total_len += iv_sz;
+           }
+       }
+      else
+       {
+         nonce->ctr = clib_host_to_net_u32 (1);
+       }
 
-      aad = payload - hdr_len - sizeof (esp_aead_t);
-      esp_aad_fill (aad, esp, sa);
-      nonce = (esp_gcm_nonce_t *) (aad - sizeof (*nonce));
       nonce->salt = sa->salt;
-      nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->gcm_iv_counter++);
+      nonce->iv = *(u64 *) pkt_iv;
       iv = (u8 *) nonce;
-      key_index = sa->crypto_key_index;
-
-      if (lb != b)
-       {
-         /* chain */
-         flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
-         tag = vlib_buffer_get_tail (lb) - icv_sz;
-         crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb,
-                                                      icv_sz, payload,
-                                                      payload_len, 0);
-       }
-      goto out;
+    }
+  else
+    {
+      /* construct zero iv in front of the IP header */
+      iv = pkt_iv - hdr_len - iv_sz;
+      clib_memset_u8 (iv, 0, iv_sz);
+      /* include iv field in crypto */
+      crypto_start_offset -= iv_sz;
+      crypto_total_len += iv_sz;
     }
 
-  /* cipher then hash */
-  iv = payload - iv_sz;
-  integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
-  integ_total_len += iv_sz + sizeof (esp_header_t);
-  flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
-  key_index = sa->linked_key_index;
-
-  if (b != lb)
+  if (lb != b)
     {
+      /* chain */
       flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
-      crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb,
-                                                  icv_sz, payload,
-                                                  payload_len, 0);
       tag = vlib_buffer_get_tail (lb) - icv_sz;
-      integ_total_len = esp_encrypt_chain_integ (vm, ptd, sa, b, lb, icv_sz,
-                                                payload - iv_sz -
-                                                sizeof (esp_header_t),
-                                                payload_len + iv_sz +
-                                                sizeof (esp_header_t),
-                                                tag, 0);
+      crypto_total_len = esp_encrypt_chain_crypto (
+       vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
+       crypto_total_len + icv_sz, 0);
     }
-  else if (ipsec_sa_is_set_USE_ESN (sa) && !ipsec_sa_is_set_IS_AEAD (sa))
+
+  if (sa->integ_op_id)
     {
-      u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
-      clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
-      integ_total_len += sizeof (seq_hi);
+      integ_start_offset -= iv_sz + sizeof (esp_header_t);
+      integ_total_len += iv_sz + sizeof (esp_header_t);
+
+      if (b != lb)
+       {
+         integ_total_len = esp_encrypt_chain_integ (
+           vm, ptd, sa, b, lb, icv_sz,
+           payload - iv_sz - sizeof (esp_header_t),
+           payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
+       }
+      else if (ipsec_sa_is_set_USE_ESN (sa))
+       {
+         u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
+         clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
+         integ_total_len += sizeof (seq_hi);
+       }
     }
 
-out:
-  return vnet_crypto_async_add_to_frame (vm, async_frame, key_index,
-                                        crypto_total_len,
-                                        integ_total_len - crypto_total_len,
-                                        crypto_start_offset,
-                                        integ_start_offset, bi, async_next,
-                                        iv, tag, aad, flag);
+  /* this always succeeds because we know the frame is not full */
+  vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
+                                 integ_total_len - crypto_total_len,
+                                 crypto_start_offset, integ_start_offset, bi,
+                                 async_next, iv, tag, aad, flag);
 }
 
 always_inline uword
 esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
                    vlib_frame_t *frame, vnet_link_t lt, int is_tun,
-                   u16 async_next)
+                   u16 async_next_node)
 {
   ipsec_main_t *im = &ipsec_main;
   ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
   u32 *from = vlib_frame_vector_args (frame);
   u32 n_left = frame->n_vectors;
   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
-  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
-  esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces;
   u32 thread_index = vm->thread_index;
   u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
   u32 current_sa_index = ~0, current_sa_packets = 0;
   u32 current_sa_bytes = 0, spi = 0;
   u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
   ipsec_sa_t *sa0 = 0;
+  u8 sa_drop_no_crypto = 0;
   vlib_buffer_t *lb;
   vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
   vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
-  vnet_crypto_async_frame_t *async_frame = 0;
+  vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
   int is_async = im->async_mode;
-  vnet_crypto_async_op_id_t last_async_op = ~0;
+  vnet_crypto_async_op_id_t async_op = ~0;
   u16 drop_next =
     (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
                           (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
@@ -588,17 +620,23 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
                        ESP_ENCRYPT_NEXT_HANDOFF6 :
                        (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
                                               ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
-  u16 n_async_drop = 0;
+  vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+  u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+  u16 n_async = 0;
+  u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
+  u32 sync_bi[VLIB_FRAME_SIZE];
+  u32 noop_bi[VLIB_FRAME_SIZE];
+  esp_encrypt_error_t err;
 
   vlib_get_buffers (vm, from, b, n_left);
-  if (!is_async)
-    {
-      vec_reset_length (ptd->crypto_ops);
-      vec_reset_length (ptd->integ_ops);
-      vec_reset_length (ptd->chained_crypto_ops);
-      vec_reset_length (ptd->chained_integ_ops);
-    }
+
+  vec_reset_length (ptd->crypto_ops);
+  vec_reset_length (ptd->integ_ops);
+  vec_reset_length (ptd->chained_crypto_ops);
+  vec_reset_length (ptd->chained_integ_ops);
+  vec_reset_length (ptd->async_frames);
   vec_reset_length (ptd->chunks);
+  clib_memset (async_frames, 0, sizeof (async_frames));
 
   while (n_left > 0)
     {
@@ -609,25 +647,39 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       u16 payload_len, payload_len_total, n_bufs;
       u32 hdr_len;
 
+      err = ESP_ENCRYPT_ERROR_RX_PKTS;
+
       if (n_left > 2)
        {
          u8 *p;
          vlib_prefetch_buffer_header (b[2], LOAD);
          p = vlib_buffer_get_current (b[1]);
-         CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+         clib_prefetch_load (p);
          p -= CLIB_CACHE_LINE_BYTES;
-         CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+         clib_prefetch_load (p);
          /* speculate that the trailer goes in the first buffer */
          CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
                         CLIB_CACHE_LINE_BYTES, LOAD);
        }
 
+      vnet_calc_checksums_inline (vm, b[0], b[0]->flags & VNET_BUFFER_F_IS_IP4,
+                                 b[0]->flags & VNET_BUFFER_F_IS_IP6);
+      vnet_calc_outer_checksums_inline (vm, b[0]);
+
       if (is_tun)
        {
          /* we are on a ipsec tunnel's feature arc */
          vnet_buffer (b[0])->ipsec.sad_index =
            sa_index0 = ipsec_tun_protect_get_sa_out
            (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+
+         if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
+           {
+             err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
+             noop_nexts[n_noop] = drop_next;
+             b[0]->error = node->errors[err];
+             goto trace;
+           }
        }
       else
        sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
@@ -635,53 +687,55 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       if (sa_index0 != current_sa_index)
        {
          if (current_sa_packets)
-           vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
-                                            current_sa_index,
-                                            current_sa_packets,
-                                            current_sa_bytes);
+           vlib_increment_combined_counter (
+             &ipsec_sa_counters, thread_index, current_sa_index,
+             current_sa_packets, current_sa_bytes);
          current_sa_packets = current_sa_bytes = 0;
 
-         sa0 = pool_elt_at_index (im->sad, sa_index0);
+         sa0 = ipsec_sa_get (sa_index0);
+         current_sa_index = sa_index0;
+
+         sa_drop_no_crypto = ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
+                               sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
+                              !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0));
+
+         vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+                                         current_sa_index);
 
          /* fetch the second cacheline ASAP */
-         CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
+         clib_prefetch_load (sa0->cacheline1);
 
-         current_sa_index = sa_index0;
          spi = clib_net_to_host_u32 (sa0->spi);
          esp_align = sa0->esp_block_align;
          icv_sz = sa0->integ_icv_size;
          iv_sz = sa0->crypto_iv_size;
+         is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
+       }
 
-         /* submit frame when op_id is different then the old one */
-         if (is_async && sa0->crypto_async_enc_op_id != last_async_op)
-           {
-             if (async_frame && async_frame->n_elts)
-               {
-                 if (vnet_crypto_async_submit_open_frame (vm, async_frame))
-                   esp_async_recycle_failed_submit (async_frame, b, from,
-                                                    nexts, &n_async_drop,
-                                                    drop_next,
-                                                    ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
-               }
-             async_frame =
-               vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id);
-             last_async_op = sa0->crypto_async_enc_op_id;
-           }
+      if (PREDICT_FALSE (sa_drop_no_crypto != 0))
+       {
+         err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
+         esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, drop_next, sa_index0);
+         goto trace;
        }
 
-      if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index))
+      if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
        {
          /* this is the first packet to use this SA, claim the SA
           * for this thread. this could happen simultaneously on
           * another thread */
-         clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0,
+         clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
                                    ipsec_sa_assign_thread (thread_index));
        }
 
-      if (PREDICT_FALSE (thread_index != sa0->encrypt_thread_index))
+      if (PREDICT_FALSE (thread_index != sa0->thread_index))
        {
-         esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                             &n_async_drop, handoff_next, next);
+         vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+         err = ESP_ENCRYPT_ERROR_HANDOFF;
+         esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, handoff_next,
+                                     current_sa_index);
          goto trace;
        }
 
@@ -689,9 +743,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
       if (n_bufs == 0)
        {
-         b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
-         esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                             &n_async_drop, drop_next, next);
+         err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+         esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, drop_next, current_sa_index);
          goto trace;
        }
 
@@ -704,9 +758,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
 
       if (PREDICT_FALSE (esp_seq_advance (sa0)))
        {
-         b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
-         esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                             &n_async_drop, drop_next, next);
+         err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
+         esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, drop_next, current_sa_index);
          goto trace;
        }
 
@@ -716,16 +770,15 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       if (ipsec_sa_is_set_IS_TUNNEL (sa0))
        {
          payload = vlib_buffer_get_current (b[0]);
-         next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
-                                                next, node,
-                                                buffer_data_size,
-                                                vlib_buffer_length_in_chain
-                                                (vm, b[0]));
+         next_hdr_ptr = esp_add_footer_and_icv (
+           vm, &lb, esp_align, icv_sz, buffer_data_size,
+           vlib_buffer_length_in_chain (vm, b[0]));
          if (!next_hdr_ptr)
            {
-             b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
-             esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                                 &n_async_drop, drop_next, next);
+             err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+             esp_encrypt_set_next_index (b[0], node, thread_index, err,
+                                         n_noop, noop_nexts, drop_next,
+                                         current_sa_index);
              goto trace;
            }
          b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -763,16 +816,15 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
              else if (VNET_LINK_IP4 == lt)
                {
                  *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
-                 tunnel_encap_fixup_4o6 (sa0->tunnel_flags,
-                                         (const ip4_header_t *) payload,
-                                         ip6);
+                 tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
+                                         (const ip4_header_t *) payload, ip6);
                }
              else if (VNET_LINK_MPLS == lt)
                {
                  *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
                  tunnel_encap_fixup_mplso6 (
-                   sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
-                   ip6);
+                   sa0->tunnel_flags, b[0],
+                   (const mpls_unicast_header_t *) payload, ip6);
                }
              else
                ASSERT (0);
@@ -820,36 +872,50 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          dpo = &sa0->dpo;
          if (!is_tun)
            {
-             next[0] = dpo->dpoi_next_node;
+             sync_next[0] = dpo->dpoi_next_node;
              vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
            }
          else
-           next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+           sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+         b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
        }
       else                     /* transport mode */
        {
-         u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
+         u8 *l2_hdr, l2_len, *ip_hdr;
+         u16 ip_len;
          ip6_ext_header_t *ext_hdr;
          udp_header_t *udp = 0;
          u16 udp_len = 0;
          u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
 
+         /*
+          * Get extension header chain length. It might be longer than the
+          * buffer's pre_data area.
+          */
          ip_len =
            (VNET_LINK_IP6 == lt ?
               esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
               ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
+         if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
+           {
+             err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+             esp_encrypt_set_next_index (b[0], node, thread_index, err,
+                                         n_noop, noop_nexts, drop_next,
+                                         current_sa_index);
+             goto trace;
+           }
 
          vlib_buffer_advance (b[0], ip_len);
          payload = vlib_buffer_get_current (b[0]);
-         next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
-                                                next, node,
-                                                buffer_data_size,
-                                                vlib_buffer_length_in_chain
-                                                (vm, b[0]));
+         next_hdr_ptr = esp_add_footer_and_icv (
+           vm, &lb, esp_align, icv_sz, buffer_data_size,
+           vlib_buffer_length_in_chain (vm, b[0]));
          if (!next_hdr_ptr)
            {
-             esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                                 &n_async_drop, drop_next, next);
+             err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+             esp_encrypt_set_next_index (b[0], node, thread_index, err,
+                                         n_noop, noop_nexts, drop_next,
+                                         current_sa_index);
              goto trace;
            }
 
@@ -885,46 +951,44 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          else
            l2_len = 0;
 
+         u16 len;
+         len = payload_len_total + hdr_len - l2_len;
+
          if (VNET_LINK_IP6 == lt)
            {
              ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
              if (PREDICT_TRUE (NULL == ext_hdr))
                {
                  *next_hdr_ptr = ip6->protocol;
-                 ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
+                 ip6->protocol =
+                   (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
                }
              else
                {
                  *next_hdr_ptr = ext_hdr->next_hdr;
-                 ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
+                 ext_hdr->next_hdr =
+                   (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
                }
              ip6->payload_length =
-               clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
-                                     sizeof (ip6_header_t));
+               clib_host_to_net_u16 (len - sizeof (ip6_header_t));
            }
          else if (VNET_LINK_IP4 == lt)
            {
-             u16 len;
              ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
              *next_hdr_ptr = ip4->protocol;
-             len = payload_len_total + hdr_len - l2_len;
-             if (udp)
-               {
-                 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
-                 udp_len = len - ip_len;
-               }
-             else
-               esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
+             esp_update_ip4_hdr (ip4, len, /* is_transport */ 1,
+                                 (udp != NULL));
            }
 
          clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
 
          if (udp)
            {
+             udp_len = len - ip_len;
              esp_fill_udp_hdr (sa0, udp, udp_len);
            }
 
-         next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+         sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
        }
 
       if (lb != b[0])
@@ -943,35 +1007,38 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
 
       if (is_async)
        {
-         if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0))
-           {
-             esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                                 &n_async_drop, drop_next, next);
-             goto trace;
-           }
+         async_op = sa0->crypto_async_enc_op_id;
 
-         if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp,
-                                      payload, payload_len, iv_sz,
-                                      icv_sz, from[b - bufs], next[0],
-                                      hdr_len, async_next, lb))
+         /* get a frame for this op if we don't yet have one or it's full
+          */
+         if (NULL == async_frames[async_op] ||
+             vnet_crypto_async_frame_is_full (async_frames[async_op]))
            {
-             /* The fail only caused by submission, free the whole frame. */
-             if (async_frame->n_elts)
-               esp_async_recycle_failed_submit (async_frame, b, from, nexts,
-                                                &n_async_drop, drop_next,
-                                                ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
-             b[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
-             esp_set_next_index (1, from, nexts, from[b - bufs],
-                                 &n_async_drop, drop_next, next);
-             goto trace;
+             async_frames[async_op] =
+               vnet_crypto_async_get_frame (vm, async_op);
+
+             if (PREDICT_FALSE (!async_frames[async_op]))
+               {
+                 err = ESP_ENCRYPT_ERROR_NO_AVAIL_FRAME;
+                 esp_encrypt_set_next_index (b[0], node, thread_index, err,
+                                             n_noop, noop_nexts, drop_next,
+                                             current_sa_index);
+                 goto trace;
+               }
+
+             /* Save the frame to the list we'll submit at the end */
+             vec_add1 (ptd->async_frames, async_frames[async_op]);
            }
+
+         esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
+                                  esp, payload, payload_len, iv_sz, icv_sz,
+                                  from[b - bufs], sync_next[0], hdr_len,
+                                  async_next_node, lb);
        }
       else
-       {
-         esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
-                              payload_len, iv_sz, icv_sz, bufs, b, lb,
-                              hdr_len, esp, nonce++);
-       }
+       esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
+                            payload, payload_len, iv_sz, icv_sz, n_sync, b,
+                            lb, hdr_len, esp);
 
       vlib_buffer_advance (b[0], 0LL - hdr_len);
 
@@ -983,55 +1050,83 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        {
          esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
                                                    sizeof (*tr));
-         tr->sa_index = sa_index0;
-         tr->spi = sa0->spi;
-         tr->seq = sa0->seq;
-         tr->sa_seq_hi = sa0->seq_hi;
-         tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
-         tr->crypto_alg = sa0->crypto_alg;
-         tr->integ_alg = sa0->integ_alg;
+         if (INDEX_INVALID == sa_index0)
+           clib_memset_u8 (tr, 0xff, sizeof (*tr));
+         else
+           {
+             tr->sa_index = sa_index0;
+             tr->spi = sa0->spi;
+             tr->seq = sa0->seq;
+             tr->sa_seq_hi = sa0->seq_hi;
+             tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
+             tr->crypto_alg = sa0->crypto_alg;
+             tr->integ_alg = sa0->integ_alg;
+           }
        }
+
       /* next */
+      if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
+       {
+         noop_bi[n_noop] = from[b - bufs];
+         n_noop++;
+       }
+      else if (!is_async)
+       {
+         sync_bi[n_sync] = from[b - bufs];
+         sync_bufs[n_sync] = b[0];
+         n_sync++;
+         sync_next++;
+       }
+      else
+       {
+         n_async++;
+       }
       n_left -= 1;
-      next += 1;
       b += 1;
     }
 
-  vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
-                                  current_sa_index, current_sa_packets,
-                                  current_sa_bytes);
-  if (!is_async)
+  if (INDEX_INVALID != current_sa_index)
+    vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+                                    current_sa_index, current_sa_packets,
+                                    current_sa_bytes);
+  if (n_sync)
     {
-      esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, drop_next);
-      esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
-                              ptd->chunks, drop_next);
+      esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
+                      drop_next);
+      esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
+                              sync_nexts, ptd->chunks, drop_next);
 
-      esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next);
-      esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
-                              ptd->chunks, drop_next);
+      esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
+                      drop_next);
+      esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
+                              sync_nexts, ptd->chunks, drop_next);
+
+      vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
     }
-  else
+  if (n_async)
     {
-      if (async_frame && async_frame->n_elts)
+      /* submit all of the open frames */
+      vnet_crypto_async_frame_t **async_frame;
+
+      vec_foreach (async_frame, ptd->async_frames)
        {
-         if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
-           esp_async_recycle_failed_submit (async_frame, b, from, nexts,
-                                            &n_async_drop, drop_next,
-                                            ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+         if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
+           {
+             n_noop += esp_async_recycle_failed_submit (
+               vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+               IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi,
+               noop_nexts, drop_next, true);
+             vnet_crypto_async_reset_frame (*async_frame);
+             vnet_crypto_async_free_frame (vm, *async_frame);
+           }
        }
-      vlib_node_increment_counter (vm, node->node_index,
-                                  ESP_ENCRYPT_ERROR_RX_PKTS,
-                                  frame->n_vectors);
-      if (n_async_drop)
-       vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
-
-      return frame->n_vectors;
     }
+  if (n_noop)
+    vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
 
-  vlib_node_increment_counter (vm, node->node_index,
-                              ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
+  vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
+                              frame->n_vectors);
 
-  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
   return frame->n_vectors;
 }
 
@@ -1129,15 +1224,14 @@ VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
                             esp_encrypt_async_next.esp4_post_next);
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp4_encrypt_node) = {
   .name = "esp4-encrypt",
   .vector_size = sizeof (u32),
   .format_trace = format_esp_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
   .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
@@ -1148,7 +1242,6 @@ VLIB_REGISTER_NODE (esp4_encrypt_node) = {
                  [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
                  [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
 };
-/* *INDENT-ON* */
 
 VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
                                       vlib_node_runtime_t * node,
@@ -1157,7 +1250,6 @@ VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
   return esp_encrypt_post_inline (vm, node, from_frame);
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
   .name = "esp4-encrypt-post",
   .vector_size = sizeof (u32),
@@ -1165,10 +1257,9 @@ VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp4-encrypt",
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
-/* *INDENT-ON* */
 
 VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
                                  vlib_node_runtime_t * node,
@@ -1178,7 +1269,6 @@ VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
                             esp_encrypt_async_next.esp6_post_next);
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp6_encrypt_node) = {
   .name = "esp6-encrypt",
   .vector_size = sizeof (u32),
@@ -1186,10 +1276,9 @@ VLIB_REGISTER_NODE (esp6_encrypt_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp4-encrypt",
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
-/* *INDENT-ON* */
 
 VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
                                       vlib_node_runtime_t * node,
@@ -1198,7 +1287,6 @@ VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
   return esp_encrypt_post_inline (vm, node, from_frame);
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
   .name = "esp6-encrypt-post",
   .vector_size = sizeof (u32),
@@ -1206,10 +1294,9 @@ VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp4-encrypt",
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
-/* *INDENT-ON* */
 
 VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
                                      vlib_node_runtime_t * node,
@@ -1219,15 +1306,14 @@ VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
                             esp_encrypt_async_next.esp4_tun_post_next);
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
   .name = "esp4-encrypt-tun",
   .vector_size = sizeof (u32),
   .format_trace = format_esp_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
   .next_nodes = {
@@ -1248,7 +1334,6 @@ VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
   return esp_encrypt_post_inline (vm, node, from_frame);
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
   .name = "esp4-encrypt-tun-post",
   .vector_size = sizeof (u32),
@@ -1256,10 +1341,9 @@ VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp4-encrypt-tun",
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
-/* *INDENT-ON* */
 
 VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
                                      vlib_node_runtime_t * node,
@@ -1269,15 +1353,14 @@ VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
                             esp_encrypt_async_next.esp6_tun_post_next);
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
   .name = "esp6-encrypt-tun",
   .vector_size = sizeof (u32),
   .format_trace = format_esp_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
   .next_nodes = {
@@ -1291,7 +1374,6 @@ VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
   },
 };
 
-/* *INDENT-ON* */
 
 VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
                                           vlib_node_runtime_t * node,
@@ -1300,7 +1382,6 @@ VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
   return esp_encrypt_post_inline (vm, node, from_frame);
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
   .name = "esp6-encrypt-tun-post",
   .vector_size = sizeof (u32),
@@ -1308,10 +1389,9 @@ VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp-mpls-encrypt-tun",
 
-  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
-/* *INDENT-ON* */
 
 VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
@@ -1326,8 +1406,8 @@ VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
   .format_trace = format_esp_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
   .next_nodes = {
@@ -1354,123 +1434,34 @@ VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp-mpls-encrypt-tun",
 
-  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
 
-typedef struct
-{
-  u32 sa_index;
-} esp_no_crypto_trace_t;
+#ifndef CLIB_MARCH_VARIANT
 
-static u8 *
-format_esp_no_crypto_trace (u8 * s, va_list * args)
+static clib_error_t *
+esp_encrypt_init (vlib_main_t *vm)
 {
-  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
-  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
-
-  s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
-
-  return s;
-}
-
-enum
-{
-  ESP_NO_CRYPTO_NEXT_DROP,
-  ESP_NO_CRYPTO_N_NEXT,
-};
-
-enum
-{
-  ESP_NO_CRYPTO_ERROR_RX_PKTS,
-};
-
-static char *esp_no_crypto_error_strings[] = {
-  "Outbound ESP packets received",
-};
-
-always_inline uword
-esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-                     vlib_frame_t * frame)
-{
-  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
-  u32 *from = vlib_frame_vector_args (frame);
-  u32 n_left = frame->n_vectors;
-
-  vlib_get_buffers (vm, from, b, n_left);
-
-  while (n_left > 0)
-    {
-      u32 sa_index0;
-
-      /* packets are always going to be dropped, but get the sa_index */
-      sa_index0 = ipsec_tun_protect_get_sa_out
-       (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
-
-      if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
-       {
-         esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
-                                                     sizeof (*tr));
-         tr->sa_index = sa_index0;
-       }
-
-      n_left -= 1;
-      b += 1;
-    }
-
-  vlib_node_increment_counter (vm, node->node_index,
-                              ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
-
-  vlib_buffer_enqueue_to_single_next (vm, node, from,
-                                     ESP_NO_CRYPTO_NEXT_DROP,
-                                     frame->n_vectors);
-
-  return frame->n_vectors;
-}
+  ipsec_main_t *im = &ipsec_main;
 
-VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
-                                       vlib_node_runtime_t * node,
-                                       vlib_frame_t * from_frame)
-{
-  return esp_no_crypto_inline (vm, node, from_frame);
+  im->esp4_enc_fq_index =
+    vlib_frame_queue_main_init (esp4_encrypt_node.index, 0);
+  im->esp6_enc_fq_index =
+    vlib_frame_queue_main_init (esp6_encrypt_node.index, 0);
+  im->esp4_enc_tun_fq_index =
+    vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0);
+  im->esp6_enc_tun_fq_index =
+    vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0);
+  im->esp_mpls_enc_tun_fq_index =
+    vlib_frame_queue_main_init (esp_mpls_encrypt_tun_node.index, 0);
+
+  return 0;
 }
 
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
-{
-  .name = "esp4-no-crypto",
-  .vector_size = sizeof (u32),
-  .format_trace = format_esp_no_crypto_trace,
-  .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
-  .error_strings = esp_no_crypto_error_strings,
-  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
-  .next_nodes = {
-    [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
-  },
-};
-
-VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
-                                       vlib_node_runtime_t * node,
-                                       vlib_frame_t * from_frame)
-{
-  return esp_no_crypto_inline (vm, node, from_frame);
-}
+VLIB_INIT_FUNCTION (esp_encrypt_init);
 
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
-{
-  .name = "esp6-no-crypto",
-  .vector_size = sizeof (u32),
-  .format_trace = format_esp_no_crypto_trace,
-  .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
-  .error_strings = esp_no_crypto_error_strings,
-  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
-  .next_nodes = {
-    [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
-  },
-};
-/* *INDENT-ON* */
+#endif
 
 /*
  * fd.io coding-style-patch-verification: ON