IPSEC: move SA counters into the stats segment
[vpp.git] / src / vnet / ipsec / esp_encrypt.c
index 9c775ef..ffa0211 100644 (file)
@@ -23,7 +23,9 @@
 #include <vnet/ipsec/ipsec.h>
 #include <vnet/ipsec/esp.h>
 
+#ifndef CLIB_MARCH_VARIANT
 ipsec_proto_main_t ipsec_proto_main;
+#endif /* CLIB_MARCH_VARIANT */
 
 #define foreach_esp_encrypt_next                   \
 _(DROP, "error-drop")                              \
@@ -62,6 +64,7 @@ static char *esp_encrypt_error_strings[] = {
 
 typedef struct
 {
+  u32 sa_index;
   u32 spi;
   u32 seq;
   u8 udp_encap;
@@ -77,8 +80,8 @@ format_esp_encrypt_trace (u8 * s, va_list * args)
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
   esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
 
-  s = format (s, "esp: spi %u seq %u crypto %U integrity %U%s",
-             t->spi, t->seq,
+  s = format (s, "esp: sa-index %d spi %u seq %u crypto %U integrity %U%s",
+             t->sa_index, t->spi, t->seq,
              format_ipsec_crypto_alg, t->crypto_alg,
              format_ipsec_integ_alg, t->integ_alg,
              t->udp_encap ? " udp-encap-enabled" : "");
@@ -137,15 +140,9 @@ esp_encrypt_inline (vlib_main_t * vm,
 
   if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from))
     {
-      if (is_ip6)
-       vlib_node_increment_counter (vm, esp6_encrypt_node.index,
-                                    ESP_ENCRYPT_ERROR_NO_BUFFER,
-                                    n_left_from);
-      else
-       vlib_node_increment_counter (vm, esp4_encrypt_node.index,
-                                    ESP_ENCRYPT_ERROR_NO_BUFFER,
-                                    n_left_from);
-      clib_warning ("no enough empty buffers. discarding frame");
+      vlib_node_increment_counter (vm, node->node_index,
+                                  ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from);
+      clib_warning ("not enough empty buffers. discarding frame");
       goto free_buffers_and_exit;
     }
 
@@ -185,16 +182,15 @@ esp_encrypt_inline (vlib_main_t * vm,
          sa_index0 = vnet_buffer (i_b0)->ipsec.sad_index;
          sa0 = pool_elt_at_index (im->sad, sa_index0);
 
+         vlib_prefetch_combined_counter
+           (&ipsec_sa_counters, thread_index, sa_index0);
+
          if (PREDICT_FALSE (esp_seq_advance (sa0)))
            {
              clib_warning ("sequence number counter has cycled SPI %u",
                            sa0->spi);
-             if (is_ip6)
-               vlib_node_increment_counter (vm, esp6_encrypt_node.index,
-                                            ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
-             else
-               vlib_node_increment_counter (vm, esp4_encrypt_node.index,
-                                            ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
+             vlib_node_increment_counter (vm, node->node_index,
+                                          ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
              //TODO: rekey SA
              o_bi0 = i_bi0;
              to_next[0] = o_bi0;
@@ -202,8 +198,6 @@ esp_encrypt_inline (vlib_main_t * vm,
              goto trace;
            }
 
-         sa0->total_data_size += i_b0->current_length;
-
          /* grab free buffer */
          last_empty_buffer = vec_len (empty_buffers) - 1;
          o_bi0 = empty_buffers[last_empty_buffer];
@@ -289,7 +283,9 @@ esp_encrypt_inline (vlib_main_t * vm,
              oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
              oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
 
-             vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+             next0 = sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_next_node;
+             vnet_buffer (o_b0)->ip.adj_index[VLIB_TX] =
+               sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_index;
            }
          else if (is_ip6 && sa0->is_tunnel && sa0->is_tunnel_ip6)
            {
@@ -302,7 +298,9 @@ esp_encrypt_inline (vlib_main_t * vm,
              oh6_0->ip6.dst_address.as_u64[1] =
                sa0->tunnel_dst_addr.ip6.as_u64[1];
 
-             vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+             next0 = sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_next_node;
+             vnet_buffer (o_b0)->ip.adj_index[VLIB_TX] =
+               sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_index;
            }
          else
            {
@@ -316,15 +314,26 @@ esp_encrypt_inline (vlib_main_t * vm,
                                           vlib_buffer_get_current (i_b0) -
                                           sizeof (ethernet_header_t));
                  oeh0 = (ethernet_header_t *) o_b0->data;
-                 clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
+                 clib_memcpy_fast (oeh0, ieh0, sizeof (ethernet_header_t));
                  next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
                  vnet_buffer (o_b0)->sw_if_index[VLIB_TX] =
                    vnet_buffer (i_b0)->sw_if_index[VLIB_TX];
                }
-             vlib_buffer_advance (i_b0, ip_udp_hdr_size);
+
+             if (is_ip6)
+               {
+                 vlib_buffer_advance (i_b0, sizeof (ip6_header_t));
+               }
+             else
+               {
+                 vlib_buffer_advance (i_b0, sizeof (ip4_header_t));
+               }
            }
 
          ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
+         vlib_increment_combined_counter
+           (&ipsec_sa_counters, thread_index, sa_index0,
+            1, i_b0->current_length);
 
          if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE))
            {
@@ -359,27 +368,25 @@ esp_encrypt_inline (vlib_main_t * vm,
                    ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size];
              RAND_bytes (iv, sizeof (iv));
 
-             clib_memcpy ((u8 *) vlib_buffer_get_current (o_b0) +
-                          ip_udp_hdr_size + sizeof (esp_header_t), iv,
-                          em->ipsec_proto_main_crypto_algs[sa0->
-                                                           crypto_alg].iv_size);
+             clib_memcpy_fast ((u8 *) vlib_buffer_get_current (o_b0) +
+                               ip_udp_hdr_size + sizeof (esp_header_t), iv,
+                               em->ipsec_proto_main_crypto_algs[sa0->
+                                                                crypto_alg].iv_size);
 
              esp_encrypt_cbc (vm, sa0->crypto_alg,
                               (u8 *) vlib_buffer_get_current (i_b0),
                               (u8 *) vlib_buffer_get_current (o_b0) +
                               ip_udp_hdr_size + sizeof (esp_header_t) +
                               IV_SIZE, BLOCK_SIZE * blocks,
-                              sa0->crypto_key, iv);
+                              sa0->crypto_key.data, iv);
            }
 
-         o_b0->current_length += hmac_calc (sa0->integ_alg, sa0->integ_key,
-                                            sa0->integ_key_len,
-                                            (u8 *) o_esp0,
-                                            o_b0->current_length -
-                                            ip_udp_hdr_size,
-                                            vlib_buffer_get_current (o_b0) +
-                                            o_b0->current_length,
-                                            sa0->use_esn, sa0->seq_hi);
+         o_b0->current_length +=
+           hmac_calc (sa0->integ_alg, sa0->integ_key.data,
+                      sa0->integ_key.len, (u8 *) o_esp0,
+                      o_b0->current_length - ip_udp_hdr_size,
+                      vlib_buffer_get_current (o_b0) + o_b0->current_length,
+                      sa0->use_esn, sa0->seq_hi);
 
 
          if (is_ip6)
@@ -414,6 +421,7 @@ esp_encrypt_inline (vlib_main_t * vm,
                  o_b0->trace_index = i_b0->trace_index;
                  esp_encrypt_trace_t *tr =
                    vlib_add_trace (vm, node, o_b0, sizeof (*tr));
+                 tr->sa_index = sa_index0;
                  tr->spi = sa0->spi;
                  tr->seq = sa0->seq - 1;
                  tr->udp_encap = sa0->udp_encap;
@@ -428,14 +436,9 @@ esp_encrypt_inline (vlib_main_t * vm,
        }
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-  if (is_ip6)
-    vlib_node_increment_counter (vm, esp6_encrypt_node.index,
-                                ESP_ENCRYPT_ERROR_RX_PKTS,
-                                from_frame->n_vectors);
-  else
-    vlib_node_increment_counter (vm, esp4_encrypt_node.index,
-                                ESP_ENCRYPT_ERROR_RX_PKTS,
-                                from_frame->n_vectors);
+  vlib_node_increment_counter (vm, node->node_index,
+                              ESP_ENCRYPT_ERROR_RX_PKTS,
+                              from_frame->n_vectors);
 
 free_buffers_and_exit:
   if (recycle)
@@ -444,16 +447,15 @@ free_buffers_and_exit:
   return from_frame->n_vectors;
 }
 
-static uword
-esp4_encrypt_node_fn (vlib_main_t * vm,
-                     vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
+                                 vlib_node_runtime_t * node,
+                                 vlib_frame_t * from_frame)
 {
   return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
 }
 
 /* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp4_encrypt_node) = {
-  .function = esp4_encrypt_node_fn,
   .name = "esp4-encrypt",
   .vector_size = sizeof (u32),
   .format_trace = format_esp_encrypt_trace,
@@ -471,18 +473,15 @@ VLIB_REGISTER_NODE (esp4_encrypt_node) = {
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (esp4_encrypt_node, esp4_encrypt_node_fn);
-
-static uword
-esp6_encrypt_node_fn (vlib_main_t * vm,
-                     vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
+                                 vlib_node_runtime_t * node,
+                                 vlib_frame_t * from_frame)
 {
   return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
 }
 
 /* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp6_encrypt_node) = {
-  .function = esp6_encrypt_node_fn,
   .name = "esp6-encrypt",
   .vector_size = sizeof (u32),
   .format_trace = format_esp_encrypt_trace,
@@ -500,7 +499,6 @@ VLIB_REGISTER_NODE (esp6_encrypt_node) = {
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (esp6_encrypt_node, esp6_encrypt_node_fn);
 /*
  * fd.io coding-style-patch-verification: ON
  *