ipsec: Store thread-index in buffer meta-data during SA handoff 07/31207/5
authorNeale Ranns <neale@graphiant.com>
Wed, 10 Feb 2021 08:42:49 +0000 (08:42 +0000)
committerMatthew Smith <mgsmith@netgate.com>
Fri, 12 Feb 2021 20:09:28 +0000 (20:09 +0000)
Type: improvement

negates the need to load the SA in the handoff node.
don't prefetch the packet data, it's not needed.

Signed-off-by: Neale Ranns <neale@graphiant.com>
Change-Id: I340472dc437f050cc1c3c11dfeb47ab09c609624

src/vnet/buffer.h
src/vnet/ipsec/ah_decrypt.c
src/vnet/ipsec/ah_encrypt.c
src/vnet/ipsec/esp_decrypt.c
src/vnet/ipsec/esp_encrypt.c
src/vnet/ipsec/ipsec_handoff.c
src/vnet/ipsec/ipsec_sa.h

index 88637e1..27aeb3b 100644 (file)
@@ -309,8 +309,11 @@ typedef struct
     /* interface output features */
     struct
     {
+      /* don't overlap the adjcencies nor flow-hash */
+      u32 __pad[3];
       u32 sad_index;
       u32 protect_index;
+      u16 thread_index;
     } ipsec;
 
     /* MAP */
index 03a9dc8..107ccd2 100644 (file)
@@ -187,6 +187,7 @@ ah_decrypt_inline (vlib_main_t * vm,
 
       if (PREDICT_TRUE (thread_index != sa0->thread_index))
        {
+         vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
          next[0] = AH_DECRYPT_NEXT_HANDOFF;
          goto next;
        }
index 610b95e..37dd25d 100644 (file)
@@ -195,6 +195,7 @@ ah_encrypt_inline (vlib_main_t * vm,
 
       if (PREDICT_TRUE (thread_index != sa0->thread_index))
        {
+         vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
          next[0] = AH_ENCRYPT_NEXT_HANDOFF;
          goto next;
        }
index e5277b1..a9aa9b8 100644 (file)
@@ -1123,6 +1123,7 @@ esp_decrypt_inline (vlib_main_t * vm,
 
       if (PREDICT_FALSE (thread_index != sa0->thread_index))
        {
+         vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
          esp_set_next_index (is_async, from, nexts, from[b - bufs],
                              &n_async_drop, ESP_DECRYPT_NEXT_HANDOFF, next);
          next[0] = ESP_DECRYPT_NEXT_HANDOFF;
index e64de26..c76fccc 100644 (file)
@@ -685,6 +685,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
 
       if (PREDICT_FALSE (thread_index != sa0->thread_index))
        {
+         vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
          esp_set_next_index (is_async, from, nexts, from[b - bufs],
                              &n_async_drop, handoff_next, next);
          goto trace;
index 8bd6d22..9092f81 100644 (file)
@@ -60,9 +60,7 @@ ipsec_handoff (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
   u16 thread_indices[VLIB_FRAME_SIZE], *ti;
   u32 n_enq, n_left_from, *from;
-  ipsec_main_t *im;
 
-  im = &ipsec_main;
   from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   vlib_get_buffers (vm, from, bufs, n_left_from);
@@ -72,9 +70,6 @@ ipsec_handoff (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
 
   while (n_left_from >= 4)
     {
-      ipsec_sa_t *sa0, *sa1, *sa2, *sa3;
-      u32 sai0, sai1, sai2, sai3;
-
       /* Prefetch next iteration. */
       if (n_left_from >= 12)
        {
@@ -82,28 +77,14 @@ ipsec_handoff (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
          vlib_prefetch_buffer_header (b[9], LOAD);
          vlib_prefetch_buffer_header (b[10], LOAD);
          vlib_prefetch_buffer_header (b[11], LOAD);
-
-         vlib_prefetch_buffer_data (b[4], LOAD);
-         vlib_prefetch_buffer_data (b[5], LOAD);
-         vlib_prefetch_buffer_data (b[6], LOAD);
-         vlib_prefetch_buffer_data (b[7], LOAD);
        }
 
-      sai0 = vnet_buffer (b[0])->ipsec.sad_index;
-      sai1 = vnet_buffer (b[1])->ipsec.sad_index;
-      sai2 = vnet_buffer (b[2])->ipsec.sad_index;
-      sai3 = vnet_buffer (b[3])->ipsec.sad_index;
-      sa0 = pool_elt_at_index (im->sad, sai0);
-      sa1 = pool_elt_at_index (im->sad, sai1);
-      sa2 = pool_elt_at_index (im->sad, sai2);
-      sa3 = pool_elt_at_index (im->sad, sai3);
-
-      ti[0] = sa0->thread_index;
-      ti[1] = sa1->thread_index;
-      ti[2] = sa2->thread_index;
-      ti[3] = sa3->thread_index;
-
-      if (node->flags & VLIB_NODE_FLAG_TRACE)
+      ti[0] = vnet_buffer (b[0])->ipsec.thread_index;
+      ti[1] = vnet_buffer (b[1])->ipsec.thread_index;
+      ti[2] = vnet_buffer (b[2])->ipsec.thread_index;
+      ti[3] = vnet_buffer (b[3])->ipsec.thread_index;
+
+      if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
        {
          if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
            {
@@ -137,13 +118,7 @@ ipsec_handoff (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
     }
   while (n_left_from > 0)
     {
-      ipsec_sa_t *sa0;
-      u32 sai0;
-
-      sai0 = vnet_buffer (b[0])->ipsec.sad_index;
-      sa0 = pool_elt_at_index (im->sad, sai0);
-
-      ti[0] = sa0->thread_index;
+      ti[0] = vnet_buffer (b[0])->ipsec.thread_index;
 
       if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
        {
index 84abd6e..a5756f5 100644 (file)
@@ -223,6 +223,14 @@ typedef struct
 STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline1, CLIB_CACHE_LINE_BYTES);
 STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline2, 2 * CLIB_CACHE_LINE_BYTES);
 
+/*
+ * Ensure that the IPsec data does not overlap with the IP data in
+ * the buffer meta data
+ */
+STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ipsec.sad_index) ==
+                STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ip.save_protocol),
+              "IPSec data is overlapping with IP data");
+
 #define _(a,v,s)                                                        \
   always_inline int                                                     \
   ipsec_sa_is_set_##v (const ipsec_sa_t *sa) {                          \