ipsec: Use .api declared error counters
[vpp.git] / src / vnet / ipsec / ah_encrypt.c
index d12ca67..7116a16 100644 (file)
 #include <vnet/ipsec/ipsec.h>
 #include <vnet/ipsec/esp.h>
 #include <vnet/ipsec/ah.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
+#include <vnet/tunnel/tunnel_dp.h>
 
 #define foreach_ah_encrypt_next \
-  _ (DROP, "error-drop")            \
-  _ (IP4_LOOKUP, "ip4-lookup")      \
-  _ (IP6_LOOKUP, "ip6-lookup")      \
+  _ (DROP, "error-drop")                           \
+  _ (HANDOFF, "handoff")                           \
   _ (INTERFACE_OUTPUT, "interface-output")
 
 
@@ -38,26 +39,6 @@ typedef enum
     AH_ENCRYPT_N_NEXT,
 } ah_encrypt_next_t;
 
-#define foreach_ah_encrypt_error                                \
- _(RX_PKTS, "AH pkts received")                                 \
- _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _(SEQ_CYCLED, "sequence number cycled")
-
-
-typedef enum
-{
-#define _(sym,str) AH_ENCRYPT_ERROR_##sym,
-  foreach_ah_encrypt_error
-#undef _
-    AH_ENCRYPT_N_ERROR,
-} ah_encrypt_error_t;
-
-static char *ah_encrypt_error_strings[] = {
-#define _(sym,string) string,
-  foreach_ah_encrypt_error
-#undef _
-};
-
 typedef struct
 {
   u32 sa_index;
@@ -75,8 +56,8 @@ format_ah_encrypt_trace (u8 * s, va_list * args)
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
   ah_encrypt_trace_t *t = va_arg (*args, ah_encrypt_trace_t *);
 
-  s = format (s, "ah: sa-index %d spi %u seq %u:%u integrity %U",
-             t->sa_index, t->spi, t->seq_hi, t->seq_lo,
+  s = format (s, "ah: sa-index %d spi %u (0x%08x) seq %u:%u integrity %U",
+             t->sa_index, t->spi, t->spi, t->seq_hi, t->seq_lo,
              format_ipsec_integ_alg, t->integ_alg);
   return s;
 }
@@ -112,20 +93,21 @@ typedef struct
 {
   union
   {
+    /* Variable fields in the IP header not covered by the AH
+     * integrity check */
     struct
     {
-      u8 hop_limit;
       u32 ip_version_traffic_class_and_flow_label;
+      u8 hop_limit;
     };
-
     struct
     {
       u8 ttl;
       u8 tos;
     };
   };
-  i16 current_data;
   u8 skip;
+  i16 current_data;
   u32 sa_index;
 } ah_encrypt_packet_data_t;
 
@@ -175,7 +157,7 @@ ah_encrypt_inline (vlib_main_t * vm,
                                             current_sa_pkts,
                                             current_sa_bytes);
          current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
-         sa0 = pool_elt_at_index (im->sad, current_sa_index);
+         sa0 = ipsec_sa_get (current_sa_index);
 
          current_sa_bytes = current_sa_pkts = 0;
        }
@@ -183,6 +165,22 @@ ah_encrypt_inline (vlib_main_t * vm,
       pd->sa_index = current_sa_index;
       next[0] = AH_ENCRYPT_NEXT_DROP;
 
+      if (PREDICT_FALSE (~0 == sa0->thread_index))
+       {
+         /* this is the first packet to use this SA, claim the SA
+          * for this thread. this could happen simultaneously on
+          * another thread */
+         clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+                                   ipsec_sa_assign_thread (thread_index));
+       }
+
+      if (PREDICT_TRUE (thread_index != sa0->thread_index))
+       {
+         vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+         next[0] = AH_ENCRYPT_NEXT_HANDOFF;
+         goto next;
+       }
+
       if (PREDICT_FALSE (esp_seq_advance (sa0)))
        {
          b[0]->error = node->errors[AH_ENCRYPT_ERROR_SEQ_CYCLED];
@@ -195,8 +193,6 @@ ah_encrypt_inline (vlib_main_t * vm,
 
       ssize_t adv;
       ih0 = vlib_buffer_get_current (b[0]);
-      pd->ttl = ih0->ip4.ttl;
-      pd->tos = ih0->ip4.tos;
 
       if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
        {
@@ -216,12 +212,12 @@ ah_encrypt_inline (vlib_main_t * vm,
       /* transport mode save the eth header before it is overwritten */
       if (PREDICT_FALSE (!ipsec_sa_is_set_IS_TUNNEL (sa0)))
        {
-         ethernet_header_t *ieh0 = (ethernet_header_t *)
-           ((u8 *) vlib_buffer_get_current (b[0]) -
-            sizeof (ethernet_header_t));
-         ethernet_header_t *oeh0 =
-           (ethernet_header_t *) ((u8 *) ieh0 + (adv - icv_size));
-         clib_memcpy_fast (oeh0, ieh0, sizeof (ethernet_header_t));
+         const u32 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
+         u8 *l2_hdr_in = (u8 *) vlib_buffer_get_current (b[0]) - l2_len;
+
+         u8 *l2_hdr_out = l2_hdr_in + adv - icv_size;
+
+         clib_memcpy_le32 (l2_hdr_out, l2_hdr_in, l2_len);
        }
 
       vlib_buffer_advance (b[0], adv - icv_size);
@@ -232,10 +228,20 @@ ah_encrypt_inline (vlib_main_t * vm,
          ip_hdr_size = sizeof (ip6_header_t);
          oh6_0 = vlib_buffer_get_current (b[0]);
          pd->current_data = b[0]->current_data;
-
          pd->hop_limit = ih6_0->ip6.hop_limit;
-         pd->ip_version_traffic_class_and_flow_label =
+
+         oh6_0->ip6.ip_version_traffic_class_and_flow_label =
            ih6_0->ip6.ip_version_traffic_class_and_flow_label;
+
+         if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+           {
+             ip6_set_dscp_network_order (&oh6_0->ip6, sa0->tunnel.t_dscp);
+             tunnel_encap_fixup_6o6 (sa0->tunnel_flags, &ih6_0->ip6,
+                                     &oh6_0->ip6);
+           }
+         pd->ip_version_traffic_class_and_flow_label =
+           oh6_0->ip6.ip_version_traffic_class_and_flow_label;
+
          if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
            {
              next_hdr_type = IP_PROTOCOL_IPV6;
@@ -261,8 +267,31 @@ ah_encrypt_inline (vlib_main_t * vm,
        {
          ip_hdr_size = sizeof (ip4_header_t);
          oh0 = vlib_buffer_get_current (b[0]);
-         clib_memset (oh0, 0, sizeof (ip4_and_ah_header_t));
+         pd->ttl = ih0->ip4.ttl;
+
+         if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+           {
+             if (sa0->tunnel.t_dscp)
+               pd->tos = sa0->tunnel.t_dscp << 2;
+             else
+               {
+                 pd->tos = ih0->ip4.tos;
+
+                 if (!(sa0->tunnel_flags &
+                       TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_DSCP))
+                   pd->tos &= 0x3;
+                 if (!(sa0->tunnel_flags &
+                       TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_ECN))
+                   pd->tos &= 0xfc;
+               }
+           }
+         else
+           {
+             pd->tos = ih0->ip4.tos;
+           }
+
          pd->current_data = b[0]->current_data;
+         clib_memset (oh0, 0, sizeof (ip4_and_ah_header_t));
 
          if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
            {
@@ -292,7 +321,7 @@ ah_encrypt_inline (vlib_main_t * vm,
        {
          clib_memcpy_fast (&oh0->ip4.address_pair,
                            &sa0->ip4_hdr.address_pair,
-                           sizeof (ip4_address_t));
+                           sizeof (ip4_address_pair_t));
 
          next[0] = sa0->dpo.dpoi_next_node;
          vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
@@ -337,6 +366,18 @@ ah_encrypt_inline (vlib_main_t * vm,
        }
 
     next:
+      if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+       {
+         sa0 = ipsec_sa_get (pd->sa_index);
+         ah_encrypt_trace_t *tr =
+           vlib_add_trace (vm, node, b[0], sizeof (*tr));
+         tr->spi = sa0->spi;
+         tr->seq_lo = sa0->seq;
+         tr->seq_hi = sa0->seq_hi;
+         tr->integ_alg = sa0->integ_alg;
+         tr->sa_index = pd->sa_index;
+       }
+
       n_left -= 1;
       next += 1;
       pd += 1;
@@ -359,7 +400,7 @@ ah_encrypt_inline (vlib_main_t * vm,
   while (n_left)
     {
       if (pd->skip)
-       goto trace;
+       goto next_pkt;
 
       if (is_ip6)
        {
@@ -376,19 +417,7 @@ ah_encrypt_inline (vlib_main_t * vm,
          oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
        }
 
-    trace:
-      if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
-       {
-         sa0 = vec_elt_at_index (im->sad, pd->sa_index);
-         ah_encrypt_trace_t *tr =
-           vlib_add_trace (vm, node, b[0], sizeof (*tr));
-         tr->spi = sa0->spi;
-         tr->seq_lo = sa0->seq;
-         tr->seq_hi = sa0->seq_hi;
-         tr->integ_alg = sa0->integ_alg;
-         tr->sa_index = pd->sa_index;
-       }
-
+    next_pkt:
       n_left -= 1;
       next += 1;
       pd += 1;
@@ -415,14 +444,14 @@ VLIB_REGISTER_NODE (ah4_encrypt_node) = {
   .format_trace = format_ah_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN(ah_encrypt_error_strings),
-  .error_strings = ah_encrypt_error_strings,
+  .n_errors = AH_ENCRYPT_N_ERROR,
+  .error_counters = ah_encrypt_error_counters,
 
   .n_next_nodes = AH_ENCRYPT_N_NEXT,
   .next_nodes = {
-#define _(s,n) [AH_ENCRYPT_NEXT_##s] = n,
-    foreach_ah_encrypt_next
-#undef _
+    [AH_ENCRYPT_NEXT_DROP] = "ip4-drop",
+    [AH_ENCRYPT_NEXT_HANDOFF] = "ah4-encrypt-handoff",
+    [AH_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
   },
 };
 /* *INDENT-ON* */
@@ -441,18 +470,37 @@ VLIB_REGISTER_NODE (ah6_encrypt_node) = {
   .format_trace = format_ah_encrypt_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
 
-  .n_errors = ARRAY_LEN(ah_encrypt_error_strings),
-  .error_strings = ah_encrypt_error_strings,
+  .n_errors = AH_ENCRYPT_N_ERROR,
+  .error_counters = ah_encrypt_error_counters,
 
   .n_next_nodes = AH_ENCRYPT_N_NEXT,
   .next_nodes = {
-#define _(s,n) [AH_ENCRYPT_NEXT_##s] = n,
-    foreach_ah_encrypt_next
-#undef _
+    [AH_ENCRYPT_NEXT_DROP] = "ip6-drop",
+    [AH_ENCRYPT_NEXT_HANDOFF] = "ah6-encrypt-handoff",
+    [AH_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
   },
 };
 /* *INDENT-ON* */
 
+#ifndef CLIB_MARCH_VARIANT
+
+static clib_error_t *
+ah_encrypt_init (vlib_main_t *vm)
+{
+  ipsec_main_t *im = &ipsec_main;
+
+  im->ah4_enc_fq_index =
+    vlib_frame_queue_main_init (ah4_encrypt_node.index, 0);
+  im->ah6_enc_fq_index =
+    vlib_frame_queue_main_init (ah6_encrypt_node.index, 0);
+
+  return 0;
+}
+
+VLIB_INIT_FUNCTION (ah_encrypt_init);
+
+#endif
+
 /*
  * fd.io coding-style-patch-verification: ON
  *