ipsec: compress ipsec_sa_t so data used by dataplane code fits in cacheline
[vpp.git] / src / vnet / ipsec / ah_encrypt.c
index 47f3b38..2eab3ac 100644 (file)
@@ -59,8 +59,10 @@ static char *ah_encrypt_error_strings[] = {
 
 typedef struct
 {
+  u32 sa_index;
   u32 spi;
-  u32 seq;
+  u32 seq_lo;
+  u32 seq_hi;
   ipsec_integ_alg_t integ_alg;
 } ah_encrypt_trace_t;
 
@@ -72,8 +74,9 @@ format_ah_encrypt_trace (u8 * s, va_list * args)
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
   ah_encrypt_trace_t *t = va_arg (*args, ah_encrypt_trace_t *);
 
-  s = format (s, "ah: spi %u seq %u integrity %U",
-             t->spi, t->seq, format_ipsec_integ_alg, t->integ_alg);
+  s = format (s, "ah: sa-index %d spi %u seq %u:%u integrity %U",
+             t->sa_index, t->spi, t->seq_hi, t->seq_lo,
+             format_ipsec_integ_alg, t->integ_alg);
   return s;
 }
 
@@ -82,13 +85,13 @@ ah_encrypt_inline (vlib_main_t * vm,
                   vlib_node_runtime_t * node, vlib_frame_t * from_frame,
                   int is_ip6)
 {
-  u32 n_left_from, *from, *to_next = 0, next_index;
+  u32 n_left_from, *from, *to_next = 0, next_index, thread_index;
   int icv_size = 0;
   from = vlib_frame_vector_args (from_frame);
   n_left_from = from_frame->n_vectors;
   ipsec_main_t *im = &ipsec_main;
-  ipsec_proto_main_t *em = &ipsec_proto_main;
   next_index = node->cached_next_index;
+  thread_index = vm->thread_index;
 
   while (n_left_from > 0)
     {
@@ -125,29 +128,19 @@ ah_encrypt_inline (vlib_main_t * vm,
 
          if (PREDICT_FALSE (esp_seq_advance (sa0)))
            {
-             clib_warning ("sequence number counter has cycled SPI %u",
-                           sa0->spi);
-             if (is_ip6)
-               vlib_node_increment_counter (vm, ah6_encrypt_node.index,
-                                            AH_ENCRYPT_ERROR_SEQ_CYCLED, 1);
-             else
-               vlib_node_increment_counter (vm, ah4_encrypt_node.index,
-                                            AH_ENCRYPT_ERROR_SEQ_CYCLED, 1);
-             //TODO need to confirm if below is needed
-             to_next[0] = i_bi0;
-             to_next += 1;
+             i_b0->error = node->errors[AH_ENCRYPT_ERROR_SEQ_CYCLED];
              goto trace;
            }
-
-
-         sa0->total_data_size += i_b0->current_length;
+         vlib_increment_combined_counter
+           (&ipsec_sa_counters, thread_index, sa_index0,
+            1, i_b0->current_length);
 
          ssize_t adv;
          ih0 = vlib_buffer_get_current (i_b0);
          ttl = ih0->ip4.ttl;
          tos = ih0->ip4.tos;
 
-         if (PREDICT_TRUE (sa0->is_tunnel))
+         if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
            {
              if (is_ip6)
                adv = -sizeof (ip6_and_ah_header_t);
@@ -159,19 +152,18 @@ ah_encrypt_inline (vlib_main_t * vm,
              adv = -sizeof (ah_header_t);
            }
 
-         icv_size =
-           em->ipsec_proto_main_integ_algs[sa0->integ_alg].trunc_size;
+         icv_size = sa0->integ_trunc_size;
          const u8 padding_len = ah_calc_icv_padding_len (icv_size, is_ip6);
          adv -= padding_len;
          /* transport mode save the eth header before it is overwritten */
-         if (PREDICT_FALSE (!sa0->is_tunnel))
+         if (PREDICT_FALSE (!ipsec_sa_is_set_IS_TUNNEL (sa0)))
            {
              ethernet_header_t *ieh0 = (ethernet_header_t *)
                ((u8 *) vlib_buffer_get_current (i_b0) -
                 sizeof (ethernet_header_t));
              ethernet_header_t *oeh0 =
                (ethernet_header_t *) ((u8 *) ieh0 + (adv - icv_size));
-             clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
+             clib_memcpy_fast (oeh0, ieh0, sizeof (ethernet_header_t));
            }
 
          vlib_buffer_advance (i_b0, adv - icv_size);
@@ -185,7 +177,7 @@ ah_encrypt_inline (vlib_main_t * vm,
              hop_limit = ih6_0->ip6.hop_limit;
              ip_version_traffic_class_and_flow_label =
                ih6_0->ip6.ip_version_traffic_class_and_flow_label;
-             if (PREDICT_TRUE (sa0->is_tunnel))
+             if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
                {
                  next_hdr_type = IP_PROTOCOL_IPV6;
                }
@@ -214,7 +206,7 @@ ah_encrypt_inline (vlib_main_t * vm,
              oh0 = vlib_buffer_get_current (i_b0);
              clib_memset (oh0, 0, sizeof (ip4_and_ah_header_t));
 
-             if (PREDICT_TRUE (sa0->is_tunnel))
+             if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
                {
                  next_hdr_type = IP_PROTOCOL_IP_IN_IP;
                }
@@ -241,15 +233,18 @@ ah_encrypt_inline (vlib_main_t * vm,
            }
 
 
-         if (PREDICT_TRUE (!is_ip6 && sa0->is_tunnel && !sa0->is_tunnel_ip6))
+         if (PREDICT_TRUE (!is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
+                           !ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)))
            {
              oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
              oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
 
-             next0 = AH_ENCRYPT_NEXT_IP4_LOOKUP;
-             vnet_buffer (i_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+             next0 = sa0->dpo[IPSEC_PROTOCOL_AH].dpoi_next_node;
+             vnet_buffer (i_b0)->ip.adj_index[VLIB_TX] =
+               sa0->dpo[IPSEC_PROTOCOL_AH].dpoi_index;
            }
-         else if (is_ip6 && sa0->is_tunnel && sa0->is_tunnel_ip6)
+         else if (is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
+                  ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
            {
              oh6_0->ip6.src_address.as_u64[0] =
                sa0->tunnel_src_addr.ip6.as_u64[0];
@@ -260,22 +255,20 @@ ah_encrypt_inline (vlib_main_t * vm,
              oh6_0->ip6.dst_address.as_u64[1] =
                sa0->tunnel_dst_addr.ip6.as_u64[1];
 
-             next0 = AH_ENCRYPT_NEXT_IP6_LOOKUP;
-             vnet_buffer (i_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+             next0 = sa0->dpo[IPSEC_PROTOCOL_AH].dpoi_next_node;
+             vnet_buffer (i_b0)->ip.adj_index[VLIB_TX] =
+               sa0->dpo[IPSEC_PROTOCOL_AH].dpoi_index;
            }
 
          u8 sig[64];
-         clib_memset (sig, 0, sizeof (sig));
+
          u8 *digest =
            vlib_buffer_get_current (i_b0) + ip_hdr_size +
            sizeof (ah_header_t);
          clib_memset (digest, 0, icv_size);
 
-         unsigned size = hmac_calc (sa0->integ_alg, sa0->integ_key,
-                                    sa0->integ_key_len,
-                                    vlib_buffer_get_current (i_b0),
-                                    i_b0->current_length, sig, sa0->use_esn,
-                                    sa0->seq_hi);
+         unsigned size = hmac_calc (vm, sa0, vlib_buffer_get_current (i_b0),
+                                    i_b0->current_length, sig);
 
          memcpy (digest, sig, size);
          if (is_ip6)
@@ -291,7 +284,7 @@ ah_encrypt_inline (vlib_main_t * vm,
              oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
            }
 
-         if (!sa0->is_tunnel)
+         if (!ipsec_sa_is_set_IS_TUNNEL (sa0))
            {
              next0 = AH_ENCRYPT_NEXT_INTERFACE_OUTPUT;
              vlib_buffer_advance (i_b0, -sizeof (ethernet_header_t));
@@ -300,12 +293,13 @@ ah_encrypt_inline (vlib_main_t * vm,
        trace:
          if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED))
            {
-             i_b0->flags |= VLIB_BUFFER_IS_TRACED;
              ah_encrypt_trace_t *tr =
                vlib_add_trace (vm, node, i_b0, sizeof (*tr));
              tr->spi = sa0->spi;
-             tr->seq = sa0->seq - 1;
+             tr->seq_lo = sa0->seq;
+             tr->seq_hi = sa0->seq_hi;
              tr->integ_alg = sa0->integ_alg;
+             tr->sa_index = sa_index0;
            }
 
          vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
@@ -314,28 +308,22 @@ ah_encrypt_inline (vlib_main_t * vm,
        }
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-  if (is_ip6)
-    vlib_node_increment_counter (vm, ah6_encrypt_node.index,
-                                AH_ENCRYPT_ERROR_RX_PKTS,
-                                from_frame->n_vectors);
-  else
-    vlib_node_increment_counter (vm, ah4_encrypt_node.index,
-                                AH_ENCRYPT_ERROR_RX_PKTS,
-                                from_frame->n_vectors);
+  vlib_node_increment_counter (vm, node->node_index,
+                              AH_ENCRYPT_ERROR_RX_PKTS,
+                              from_frame->n_vectors);
 
   return from_frame->n_vectors;
 }
 
-static uword
-ah4_encrypt_node_fn (vlib_main_t * vm,
-                    vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+VLIB_NODE_FN (ah4_encrypt_node) (vlib_main_t * vm,
+                                vlib_node_runtime_t * node,
+                                vlib_frame_t * from_frame)
 {
   return ah_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
 }
 
 /* *INDENT-OFF* */
 VLIB_REGISTER_NODE (ah4_encrypt_node) = {
-  .function = ah4_encrypt_node_fn,
   .name = "ah4-encrypt",
   .vector_size = sizeof (u32),
   .format_trace = format_ah_encrypt_trace,
@@ -353,18 +341,15 @@ VLIB_REGISTER_NODE (ah4_encrypt_node) = {
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (ah4_encrypt_node, ah4_encrypt_node_fn);
-
-static uword
-ah6_encrypt_node_fn (vlib_main_t * vm,
-                    vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+VLIB_NODE_FN (ah6_encrypt_node) (vlib_main_t * vm,
+                                vlib_node_runtime_t * node,
+                                vlib_frame_t * from_frame)
 {
   return ah_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
 }
 
 /* *INDENT-OFF* */
 VLIB_REGISTER_NODE (ah6_encrypt_node) = {
-  .function = ah6_encrypt_node_fn,
   .name = "ah6-encrypt",
   .vector_size = sizeof (u32),
   .format_trace = format_ah_encrypt_trace,
@@ -382,7 +367,6 @@ VLIB_REGISTER_NODE (ah6_encrypt_node) = {
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (ah6_encrypt_node, ah6_encrypt_node_fn);
 /*
  * fd.io coding-style-patch-verification: ON
  *