ipsec: Use .api declared error counters
[vpp.git] / src / vnet / ipsec / esp_encrypt.c
index 33484e9..d28f4f5 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <vnet/ipsec/ipsec.h>
 #include <vnet/ipsec/ipsec_tun.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
 #include <vnet/ipsec/esp.h>
 #include <vnet/tunnel/tunnel_dp.h>
 
@@ -43,29 +44,6 @@ typedef enum
     ESP_ENCRYPT_N_NEXT,
 } esp_encrypt_next_t;
 
-#define foreach_esp_encrypt_error                                             \
-  _ (RX_PKTS, "ESP pkts received")                                            \
-  _ (POST_RX_PKTS, "ESP-post pkts received")                                  \
-  _ (HANDOFF, "Hand-off")                                                     \
-  _ (SEQ_CYCLED, "sequence number cycled (packet dropped)")                   \
-  _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)")             \
-  _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)")                 \
-  _ (NO_BUFFERS, "no buffers (packet dropped)")
-
-typedef enum
-{
-#define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
-  foreach_esp_encrypt_error
-#undef _
-    ESP_ENCRYPT_N_ERROR,
-} esp_encrypt_error_t;
-
-static char *esp_encrypt_error_strings[] = {
-#define _(sym,string) string,
-  foreach_esp_encrypt_error
-#undef _
-};
-
 typedef struct
 {
   u32 sa_index;
@@ -82,6 +60,8 @@ typedef struct
   u32 next_index;
 } esp_encrypt_post_trace_t;
 
+typedef vl_counter_esp_encrypt_enum_t esp_encrypt_error_t;
+
 /* packet trace format function */
 static u8 *
 format_esp_encrypt_trace (u8 * s, va_list * args)
@@ -223,9 +203,8 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
       return len;
     }
 
-  p = (void *) (ip6 + 1);
+  p = ip6_next_header (ip6);
   len += ip6_ext_header_len (p);
-
   while (ext_hdr_is_pre_esp (p->next_hdr))
     {
       len += ip6_ext_header_len (p);
@@ -640,6 +619,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vnet_buffer (b[0])->ipsec.sad_index =
            sa_index0 = ipsec_tun_protect_get_sa_out
            (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+
+         if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
+           {
+             err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
+             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                                 drop_next);
+             goto trace;
+           }
        }
       else
        sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
@@ -655,6 +642,15 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
 
          sa0 = ipsec_sa_get (sa_index0);
 
+         if (PREDICT_FALSE ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
+                             sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
+                            !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0)))
+           {
+             err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
+             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                                 drop_next);
+             goto trace;
+           }
          /* fetch the second cacheline ASAP */
          clib_prefetch_load (sa0->cacheline1);
 
@@ -823,16 +819,28 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        }
       else                     /* transport mode */
        {
-         u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
+         u8 *l2_hdr, l2_len, *ip_hdr;
+         u16 ip_len;
          ip6_ext_header_t *ext_hdr;
          udp_header_t *udp = 0;
          u16 udp_len = 0;
          u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
 
+         /*
+          * Get extension header chain length. It might be longer than the
+          * buffer's pre_data area.
+          */
          ip_len =
            (VNET_LINK_IP6 == lt ?
               esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
               ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
+         if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
+           {
+             err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                                 drop_next);
+             goto trace;
+           }
 
          vlib_buffer_advance (b[0], ip_len);
          payload = vlib_buffer_get_current (b[0]);
@@ -970,13 +978,18 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        {
          esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
                                                    sizeof (*tr));
-         tr->sa_index = sa_index0;
-         tr->spi = sa0->spi;
-         tr->seq = sa0->seq;
-         tr->sa_seq_hi = sa0->seq_hi;
-         tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
-         tr->crypto_alg = sa0->crypto_alg;
-         tr->integ_alg = sa0->integ_alg;
+         if (INDEX_INVALID == sa_index0)
+           clib_memset_u8 (tr, 0xff, sizeof (*tr));
+         else
+           {
+             tr->sa_index = sa_index0;
+             tr->spi = sa0->spi;
+             tr->seq = sa0->seq;
+             tr->sa_seq_hi = sa0->seq_hi;
+             tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
+             tr->crypto_alg = sa0->crypto_alg;
+             tr->integ_alg = sa0->integ_alg;
+           }
        }
 
       /* next */
@@ -1002,9 +1015,10 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       b += 1;
     }
 
-  vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
-                                  current_sa_index, current_sa_packets,
-                                  current_sa_bytes);
+  if (INDEX_INVALID != current_sa_index)
+    vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+                                    current_sa_index, current_sa_packets,
+                                    current_sa_bytes);
   if (n_sync)
     {
       esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
@@ -1030,7 +1044,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
            {
              n_noop += esp_async_recycle_failed_submit (
                vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
-               n_sync, noop_bi, noop_nexts, drop_next);
+               n_noop, noop_bi, noop_nexts, drop_next);
              vnet_crypto_async_reset_frame (*async_frame);
              vnet_crypto_async_free_frame (vm, *async_frame);
            }
@@ -1146,8 +1160,8 @@ VLIB_REGISTER_NODE (esp4_encrypt_node) = {
   .format_trace = format_esp_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
   .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
@@ -1175,8 +1189,8 @@ VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp4-encrypt",
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
 /* *INDENT-ON* */
 
@@ -1196,8 +1210,8 @@ VLIB_REGISTER_NODE (esp6_encrypt_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp4-encrypt",
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
 /* *INDENT-ON* */
 
@@ -1216,8 +1230,8 @@ VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp4-encrypt",
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
 /* *INDENT-ON* */
 
@@ -1236,8 +1250,8 @@ VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
   .format_trace = format_esp_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
   .next_nodes = {
@@ -1266,8 +1280,8 @@ VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp4-encrypt-tun",
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
 /* *INDENT-ON* */
 
@@ -1286,8 +1300,8 @@ VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
   .format_trace = format_esp_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
   .next_nodes = {
@@ -1318,8 +1332,8 @@ VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp-mpls-encrypt-tun",
 
-  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
 /* *INDENT-ON* */
 
@@ -1336,8 +1350,8 @@ VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
   .format_trace = format_esp_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 
   .n_next_nodes = ESP_ENCRYPT_N_NEXT,
   .next_nodes = {
@@ -1364,123 +1378,9 @@ VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
   .type = VLIB_NODE_TYPE_INTERNAL,
   .sibling_of = "esp-mpls-encrypt-tun",
 
-  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
-  .error_strings = esp_encrypt_error_strings,
-};
-
-typedef struct
-{
-  u32 sa_index;
-} esp_no_crypto_trace_t;
-
-static u8 *
-format_esp_no_crypto_trace (u8 * s, va_list * args)
-{
-  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
-  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
-
-  s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
-
-  return s;
-}
-
-enum
-{
-  ESP_NO_CRYPTO_NEXT_DROP,
-  ESP_NO_CRYPTO_N_NEXT,
-};
-
-enum
-{
-  ESP_NO_CRYPTO_ERROR_RX_PKTS,
-};
-
-static char *esp_no_crypto_error_strings[] = {
-  "Outbound ESP packets received",
-};
-
-always_inline uword
-esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-                     vlib_frame_t * frame)
-{
-  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
-  u32 *from = vlib_frame_vector_args (frame);
-  u32 n_left = frame->n_vectors;
-
-  vlib_get_buffers (vm, from, b, n_left);
-
-  while (n_left > 0)
-    {
-      u32 sa_index0;
-
-      /* packets are always going to be dropped, but get the sa_index */
-      sa_index0 = ipsec_tun_protect_get_sa_out
-       (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
-
-      if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
-       {
-         esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
-                                                     sizeof (*tr));
-         tr->sa_index = sa_index0;
-       }
-
-      n_left -= 1;
-      b += 1;
-    }
-
-  vlib_node_increment_counter (vm, node->node_index,
-                              ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
-
-  vlib_buffer_enqueue_to_single_next (vm, node, from,
-                                     ESP_NO_CRYPTO_NEXT_DROP,
-                                     frame->n_vectors);
-
-  return frame->n_vectors;
-}
-
-VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
-                                       vlib_node_runtime_t * node,
-                                       vlib_frame_t * from_frame)
-{
-  return esp_no_crypto_inline (vm, node, from_frame);
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
-{
-  .name = "esp4-no-crypto",
-  .vector_size = sizeof (u32),
-  .format_trace = format_esp_no_crypto_trace,
-  .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
-  .error_strings = esp_no_crypto_error_strings,
-  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
-  .next_nodes = {
-    [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
-  },
-};
-
-VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
-                                       vlib_node_runtime_t * node,
-                                       vlib_frame_t * from_frame)
-{
-  return esp_no_crypto_inline (vm, node, from_frame);
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
-{
-  .name = "esp6-no-crypto",
-  .vector_size = sizeof (u32),
-  .format_trace = format_esp_no_crypto_trace,
-  .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
-  .error_strings = esp_no_crypto_error_strings,
-  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
-  .next_nodes = {
-    [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
-  },
+  .n_errors = ESP_ENCRYPT_N_ERROR,
+  .error_counters = esp_encrypt_error_counters,
 };
-/* *INDENT-ON* */
 
 #ifndef CLIB_MARCH_VARIANT