ipsec: fix async buffer leak
[vpp.git] / src / vnet / ipsec / esp_encrypt.c
index 30c2bf9..7fa5ec9 100644 (file)
@@ -50,7 +50,9 @@ typedef enum
   _ (SEQ_CYCLED, "sequence number cycled (packet dropped)")                   \
   _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)")             \
   _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)")                 \
-  _ (NO_BUFFERS, "no buffers (packet dropped)")
+  _ (NO_BUFFERS, "no buffers (packet dropped)")                               \
+  _ (NO_PROTECTION, "no protecting SA (packet dropped)")                      \
+  _ (NO_ENCRYPTION, "no Encrypting SA (packet dropped)")
 
 typedef enum
 {
@@ -129,7 +131,8 @@ esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
                                      last[0]->current_length + pad_bytes);
   u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
 
-  if (last[0]->current_length + tail_sz > buffer_data_size)
+  if (last[0]->current_data + last[0]->current_length + tail_sz >
+      buffer_data_size)
     {
       u32 tmp_bi = 0;
       if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
@@ -203,7 +206,7 @@ ext_hdr_is_pre_esp (u8 nexthdr)
 #else
   return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
          (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
-         (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
+         ((nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0));
 #endif
 }
 
@@ -222,9 +225,8 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
       return len;
     }
 
-  p = (void *) (ip6 + 1);
+  p = ip6_next_header (ip6);
   len += ip6_ext_header_len (p);
-
   while (ext_hdr_is_pre_esp (p->next_hdr))
     {
       len += ip6_ext_header_len (p);
@@ -378,7 +380,7 @@ esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
 always_inline void
 esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
                     vnet_crypto_op_t **crypto_ops,
-                    vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0,
+                    vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
                     u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
                     vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
                     esp_header_t *esp)
@@ -407,7 +409,7 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
            {
              /* constuct aad in a scratch space in front of the nonce */
              op->aad = (u8 *) nonce - sizeof (esp_aead_t);
-             op->aad_len = esp_aad_fill (op->aad, esp, sa0);
+             op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
              op->tag = payload + op->len;
              op->tag_len = 16;
            }
@@ -464,8 +466,8 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
        }
       else if (ipsec_sa_is_set_USE_ESN (sa0))
        {
-         u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
-         clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
+         u32 tmp = clib_net_to_host_u32 (seq_hi);
+         clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
          op->len += sizeof (seq_hi);
        }
     }
@@ -507,7 +509,7 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
        {
          /* constuct aad in a scratch space in front of the nonce */
          aad = (u8 *) nonce - sizeof (esp_aead_t);
-         esp_aad_fill (aad, esp, sa);
+         esp_aad_fill (aad, esp, sa, sa->seq_hi);
          key_index = sa->crypto_key_index;
        }
       else
@@ -625,9 +627,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          u8 *p;
          vlib_prefetch_buffer_header (b[2], LOAD);
          p = vlib_buffer_get_current (b[1]);
-         CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+         clib_prefetch_load (p);
          p -= CLIB_CACHE_LINE_BYTES;
-         CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+         clib_prefetch_load (p);
          /* speculate that the trailer goes in the first buffer */
          CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
                         CLIB_CACHE_LINE_BYTES, LOAD);
@@ -639,6 +641,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vnet_buffer (b[0])->ipsec.sad_index =
            sa_index0 = ipsec_tun_protect_get_sa_out
            (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+
+         if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
+           {
+             err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
+             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                                 drop_next);
+             goto trace;
+           }
        }
       else
        sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
@@ -654,8 +664,17 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
 
          sa0 = ipsec_sa_get (sa_index0);
 
+         if (PREDICT_FALSE ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
+                             sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
+                            !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0)))
+           {
+             err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
+             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                                 drop_next);
+             goto trace;
+           }
          /* fetch the second cacheline ASAP */
-         CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
+         clib_prefetch_load (sa0->cacheline1);
 
          current_sa_index = sa_index0;
          spi = clib_net_to_host_u32 (sa0->spi);
@@ -822,16 +841,28 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        }
       else                     /* transport mode */
        {
-         u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
+         u8 *l2_hdr, l2_len, *ip_hdr;
+         u16 ip_len;
          ip6_ext_header_t *ext_hdr;
          udp_header_t *udp = 0;
          u16 udp_len = 0;
          u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
 
+         /*
+          * Get extension header chain length. It might be longer than the
+          * buffer's pre_data area.
+          */
          ip_len =
            (VNET_LINK_IP6 == lt ?
               esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
               ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
+         if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
+           {
+             err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                                 drop_next);
+             goto trace;
+           }
 
          vlib_buffer_advance (b[0], ip_len);
          payload = vlib_buffer_get_current (b[0]);
@@ -955,9 +986,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
                                   async_next_node, lb);
        }
       else
-       esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
-                            payload_len, iv_sz, icv_sz, n_sync, b, lb,
-                            hdr_len, esp);
+       esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
+                            payload, payload_len, iv_sz, icv_sz, n_sync, b,
+                            lb, hdr_len, esp);
 
       vlib_buffer_advance (b[0], 0LL - hdr_len);
 
@@ -969,13 +1000,19 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        {
          esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
                                                    sizeof (*tr));
-         tr->sa_index = sa_index0;
-         tr->spi = sa0->spi;
-         tr->seq = sa0->seq;
-         tr->sa_seq_hi = sa0->seq_hi;
-         tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
-         tr->crypto_alg = sa0->crypto_alg;
-         tr->integ_alg = sa0->integ_alg;
+         if (INDEX_INVALID == sa_index0)
+           clib_memset_u8 (tr, 0xff, sizeof (*tr));
+         else
+           {
+             tr->sa_index = sa_index0;
+             tr->spi = sa0->spi;
+             tr->spi = sa0->spi;
+             tr->seq = sa0->seq;
+             tr->sa_seq_hi = sa0->seq_hi;
+             tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
+             tr->crypto_alg = sa0->crypto_alg;
+             tr->integ_alg = sa0->integ_alg;
+           }
        }
 
       /* next */
@@ -1001,9 +1038,10 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       b += 1;
     }
 
-  vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
-                                  current_sa_index, current_sa_packets,
-                                  current_sa_bytes);
+  if (INDEX_INVALID != current_sa_index)
+    vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+                                    current_sa_index, current_sa_packets,
+                                    current_sa_bytes);
   if (n_sync)
     {
       esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
@@ -1029,7 +1067,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
            {
              n_noop += esp_async_recycle_failed_submit (
                vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
-               n_sync, noop_bi, noop_nexts, drop_next);
+               n_noop, noop_bi, noop_nexts, drop_next);
              vnet_crypto_async_reset_frame (*async_frame);
              vnet_crypto_async_free_frame (vm, *async_frame);
            }
@@ -1367,120 +1405,6 @@ VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
   .error_strings = esp_encrypt_error_strings,
 };
 
-typedef struct
-{
-  u32 sa_index;
-} esp_no_crypto_trace_t;
-
-static u8 *
-format_esp_no_crypto_trace (u8 * s, va_list * args)
-{
-  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
-  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
-
-  s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
-
-  return s;
-}
-
-enum
-{
-  ESP_NO_CRYPTO_NEXT_DROP,
-  ESP_NO_CRYPTO_N_NEXT,
-};
-
-enum
-{
-  ESP_NO_CRYPTO_ERROR_RX_PKTS,
-};
-
-static char *esp_no_crypto_error_strings[] = {
-  "Outbound ESP packets received",
-};
-
-always_inline uword
-esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-                     vlib_frame_t * frame)
-{
-  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
-  u32 *from = vlib_frame_vector_args (frame);
-  u32 n_left = frame->n_vectors;
-
-  vlib_get_buffers (vm, from, b, n_left);
-
-  while (n_left > 0)
-    {
-      u32 sa_index0;
-
-      /* packets are always going to be dropped, but get the sa_index */
-      sa_index0 = ipsec_tun_protect_get_sa_out
-       (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
-
-      if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
-       {
-         esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
-                                                     sizeof (*tr));
-         tr->sa_index = sa_index0;
-       }
-
-      n_left -= 1;
-      b += 1;
-    }
-
-  vlib_node_increment_counter (vm, node->node_index,
-                              ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
-
-  vlib_buffer_enqueue_to_single_next (vm, node, from,
-                                     ESP_NO_CRYPTO_NEXT_DROP,
-                                     frame->n_vectors);
-
-  return frame->n_vectors;
-}
-
-VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
-                                       vlib_node_runtime_t * node,
-                                       vlib_frame_t * from_frame)
-{
-  return esp_no_crypto_inline (vm, node, from_frame);
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
-{
-  .name = "esp4-no-crypto",
-  .vector_size = sizeof (u32),
-  .format_trace = format_esp_no_crypto_trace,
-  .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
-  .error_strings = esp_no_crypto_error_strings,
-  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
-  .next_nodes = {
-    [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
-  },
-};
-
-VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
-                                       vlib_node_runtime_t * node,
-                                       vlib_frame_t * from_frame)
-{
-  return esp_no_crypto_inline (vm, node, from_frame);
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
-{
-  .name = "esp6-no-crypto",
-  .vector_size = sizeof (u32),
-  .format_trace = format_esp_no_crypto_trace,
-  .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
-  .error_strings = esp_no_crypto_error_strings,
-  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
-  .next_nodes = {
-    [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
-  },
-};
-/* *INDENT-ON* */
-
 #ifndef CLIB_MARCH_VARIANT
 
 static clib_error_t *