ipsec: remove pending node 28/29528/3
authorFan Zhang <roy.fan.zhang@intel.com>
Mon, 19 Oct 2020 12:08:34 +0000 (13:08 +0100)
committerDamjan Marion <dmarion@me.com>
Sat, 24 Oct 2020 09:38:00 +0000 (09:38 +0000)
This patch removes esp-encrypt-pending and esp-decrypt-pending
graph nodes from ipsec data-path.

Type: improvement

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Change-Id: Icd90837eafdbfbfdf348681dcafb872593978980

src/vnet/ipsec/esp.h
src/vnet/ipsec/esp_decrypt.c
src/vnet/ipsec/esp_encrypt.c

index 6c47bb0..0121015 100644 (file)
@@ -128,6 +128,46 @@ esp_aad_fill (u8 * data, const esp_header_t * esp, const ipsec_sa_t * sa)
     }
 }
 
+/* Special case to drop or hand off packets for sync/async modes.
+ *
+ * Different than sync mode, async mode only enqueue drop or hand-off packets
+ * to next nodes.
+ */
+always_inline void
+esp_set_next_index (int is_async, u32 * from, u16 * nexts, u32 bi,
+                   u16 * drop_index, u16 drop_next, u16 * next)
+{
+  if (is_async)
+    {
+      from[*drop_index] = bi;
+      nexts[*drop_index] = drop_next;
+      *drop_index += 1;
+    }
+  else
+    next[0] = drop_next;
+}
+
+/* when submitting a frame is failed, drop all buffers in the frame */
+always_inline void
+esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f,
+                                vlib_buffer_t ** b, u32 * from, u16 * nexts,
+                                u16 * n_dropped, u16 drop_next_index,
+                                vlib_error_t err)
+{
+  u32 n_drop = f->n_elts;
+  u32 *bi = f->buffer_indices;
+  b -= n_drop;
+  while (n_drop--)
+    {
+      b[0]->error = err;
+      esp_set_next_index (1, from, nexts, bi[0], n_dropped, drop_next_index,
+                         NULL);
+      bi++;
+      b++;
+    }
+  vnet_crypto_async_reset_frame (f);
+}
+
 /**
  * The post data structure to for esp_encrypt/decrypt_inline to write to
  * vib_buffer_t opaque unused field, and for post nodes to pick up after
index ff9fc0c..45d08a6 100644 (file)
@@ -32,8 +32,7 @@ _(DROP, "error-drop")                           \
 _(IP4_INPUT, "ip4-input-no-checksum")           \
 _(IP6_INPUT, "ip6-input")                       \
 _(L2_INPUT, "l2-input")                         \
-_(HANDOFF, "handoff")                          \
-_(PENDING, "pending")
+_(HANDOFF, "handoff")
 
 #define _(v, s) ESP_DECRYPT_NEXT_##v,
 typedef enum
@@ -678,7 +677,7 @@ esp_decrypt_prepare_async_frame (vlib_main_t * vm,
              /* allocate buffer failed, will not add to frame and drop */
              b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
              next[0] = ESP_DECRYPT_NEXT_DROP;
-             return 0;
+             return -1;
            }
        }
       else
@@ -738,7 +737,6 @@ out:
   *async_pd = *pd;
   *async_pd2 = *pd2;
   pd->protect_index = current_protect_index;
-  next[0] = ESP_DECRYPT_NEXT_PENDING;
 
   /* for AEAD integ_len - crypto_len will be negative, it is ok since it
    * is ignored by the engine. */
@@ -1012,20 +1010,6 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
     }
 }
 
-/* when submitting a frame is failed, drop all buffers in the frame */
-static_always_inline void
-esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f,
-                                vlib_buffer_t ** b, u16 * next)
-{
-  u32 n_drop = f->n_elts;
-  while (--n_drop)
-    {
-      (b - n_drop)[0]->error = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
-      (next - n_drop)[0] = ESP_DECRYPT_NEXT_DROP;
-    }
-  vnet_crypto_async_reset_frame (f);
-}
-
 always_inline uword
 esp_decrypt_inline (vlib_main_t * vm,
                    vlib_node_runtime_t * node, vlib_frame_t * from_frame,
@@ -1051,6 +1035,7 @@ esp_decrypt_inline (vlib_main_t * vm,
   vnet_crypto_async_frame_t *async_frame = 0;
   int is_async = im->async_mode;
   vnet_crypto_async_op_id_t last_async_op = ~0;
+  u16 n_async_drop = 0;
 
   vlib_get_buffers (vm, from, b, n_left);
   if (!is_async)
@@ -1081,6 +1066,8 @@ esp_decrypt_inline (vlib_main_t * vm,
       if (n_bufs == 0)
        {
          b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
+         esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                             &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
          next[0] = ESP_DECRYPT_NEXT_DROP;
          goto next;
        }
@@ -1110,7 +1097,10 @@ esp_decrypt_inline (vlib_main_t * vm,
              if (async_frame && async_frame->n_elts)
                {
                  if (vnet_crypto_async_submit_open_frame (vm, async_frame))
-                   esp_async_recycle_failed_submit (async_frame, b, next);
+                   esp_async_recycle_failed_submit (async_frame, b, from,
+                                                    nexts, &n_async_drop,
+                                                    ESP_DECRYPT_NEXT_DROP,
+                                                    ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
                }
              async_frame =
                vnet_crypto_async_get_frame (vm, sa0->crypto_async_dec_op_id);
@@ -1129,6 +1119,8 @@ esp_decrypt_inline (vlib_main_t * vm,
 
       if (PREDICT_TRUE (thread_index != sa0->decrypt_thread_index))
        {
+         esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                             &n_async_drop, ESP_DECRYPT_NEXT_HANDOFF, next);
          next[0] = ESP_DECRYPT_NEXT_HANDOFF;
          goto next;
        }
@@ -1161,14 +1153,16 @@ esp_decrypt_inline (vlib_main_t * vm,
       if (ipsec_sa_anti_replay_check (sa0, pd->seq))
        {
          b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
-         next[0] = ESP_DECRYPT_NEXT_DROP;
+         esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                             &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
          goto next;
        }
 
       if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
        {
          b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
-         next[0] = ESP_DECRYPT_NEXT_DROP;
+         esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                             &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
          goto next;
        }
 
@@ -1188,7 +1182,18 @@ esp_decrypt_inline (vlib_main_t * vm,
                                                     b[0], next, async_next);
          if (PREDICT_FALSE (ret < 0))
            {
-             esp_async_recycle_failed_submit (async_frame, b, next);
+             b[0]->error = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
+             esp_set_next_index (1, from, nexts, from[b - bufs],
+                                 &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+             /* when next[0] is ESP_DECRYPT_NEXT_DROP we only have to drop
+              * the current packet. Otherwise it is frame submission error
+              * thus we have to drop the whole frame.
+              */
+             if (next[0] != ESP_DECRYPT_NEXT_DROP && async_frame->n_elts)
+               esp_async_recycle_failed_submit (async_frame, b, from,
+                                                nexts, &n_async_drop,
+                                                ESP_DECRYPT_NEXT_DROP,
+                                                ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
              goto next;
            }
        }
@@ -1216,14 +1221,17 @@ esp_decrypt_inline (vlib_main_t * vm,
       if (async_frame && async_frame->n_elts)
        {
          if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
-           esp_async_recycle_failed_submit (async_frame, b, next);
+           esp_async_recycle_failed_submit (async_frame, b, from, nexts,
+                                            &n_async_drop,
+                                            ESP_DECRYPT_NEXT_DROP,
+                                            ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
        }
 
       /* no post process in async */
-      n_left = from_frame->n_vectors;
       vlib_node_increment_counter (vm, node->node_index,
                                   ESP_DECRYPT_ERROR_RX_PKTS, n_left);
-      vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
+      if (n_async_drop)
+       vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
 
       return n_left;
     }
@@ -1429,23 +1437,6 @@ VLIB_NODE_FN (esp6_decrypt_tun_post_node) (vlib_main_t * vm,
   return esp_decrypt_post_inline (vm, node, from_frame, 1, 1);
 }
 
-VLIB_NODE_FN (esp_decrypt_pending_node) (vlib_main_t * vm,
-                                        vlib_node_runtime_t * node,
-                                        vlib_frame_t * from_frame)
-{
-  return from_frame->n_vectors;
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp_decrypt_pending_node) = {
-  .name = "esp-decrypt-pending",
-  .vector_size = sizeof (u32),
-  .type = VLIB_NODE_TYPE_INTERNAL,
-
-  .n_next_nodes = 0
-};
-/* *INDENT-ON* */
-
 /* *INDENT-OFF* */
 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
   .name = "esp4-decrypt",
@@ -1463,7 +1454,6 @@ VLIB_REGISTER_NODE (esp4_decrypt_node) = {
     [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
     [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
     [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
-    [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending"
   },
 };
 
@@ -1495,7 +1485,6 @@ VLIB_REGISTER_NODE (esp6_decrypt_node) = {
     [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
     [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
     [ESP_DECRYPT_NEXT_HANDOFF]=  "esp6-decrypt-handoff",
-    [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending"
   },
 };
 
@@ -1525,7 +1514,6 @@ VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
     [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
     [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
     [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
-    [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending"
   },
 };
 
@@ -1555,7 +1543,6 @@ VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
     [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
     [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
     [ESP_DECRYPT_NEXT_HANDOFF]=  "esp6-decrypt-tun-handoff",
-    [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending"
   },
 };
 
index 9a1c0f1..f546168 100644 (file)
@@ -28,7 +28,6 @@
 #define foreach_esp_encrypt_next                   \
 _(DROP4, "ip4-drop")                               \
 _(DROP6, "ip6-drop")                               \
-_(PENDING, "pending")                              \
 _(HANDOFF4, "handoff4")                            \
 _(HANDOFF6, "handoff6")                            \
 _(INTERFACE_OUTPUT, "interface-output")
@@ -474,7 +473,7 @@ esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
                         vnet_crypto_async_frame_t ** async_frame,
                         ipsec_sa_t * sa, vlib_buffer_t * b,
                         esp_header_t * esp, u8 * payload, u32 payload_len,
-                        u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
+                        u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
                         u16 async_next, vlib_buffer_t * lb)
 {
   esp_post_data_t *post = esp_post_data (b);
@@ -484,8 +483,7 @@ esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
   i16 crypto_start_offset, integ_start_offset = 0;
   u16 crypto_total_len, integ_total_len;
 
-  post->next_index = next[0];
-  next[0] = ESP_ENCRYPT_NEXT_PENDING;
+  post->next_index = next;
 
   /* crypto */
   crypto_start_offset = payload - b->data;
@@ -555,21 +553,6 @@ out:
                                         iv, tag, aad, flag);
 }
 
-/* when submitting a frame is failed, drop all buffers in the frame */
-static_always_inline void
-esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f,
-                                vlib_buffer_t ** b, u16 * next,
-                                u16 drop_next)
-{
-  u32 n_drop = f->n_elts;
-  while (--n_drop)
-    {
-      (b - n_drop)[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
-      (next - n_drop)[0] = drop_next;
-    }
-  vnet_crypto_async_reset_frame (f);
-}
-
 always_inline uword
 esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
                    vlib_frame_t * frame, int is_ip6, int is_tun,
@@ -595,6 +578,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
   int is_async = im->async_mode;
   vnet_crypto_async_op_id_t last_async_op = ~0;
   u16 drop_next = (is_ip6 ? ESP_ENCRYPT_NEXT_DROP6 : ESP_ENCRYPT_NEXT_DROP4);
+  u16 n_async_drop = 0;
 
   vlib_get_buffers (vm, from, b, n_left);
   if (!is_async)
@@ -663,10 +647,11 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
            {
              if (async_frame && async_frame->n_elts)
                {
-                 if (vnet_crypto_async_submit_open_frame (vm, async_frame)
-                     < 0)
-                   esp_async_recycle_failed_submit (async_frame, b,
-                                                    next, drop_next);
+                 if (vnet_crypto_async_submit_open_frame (vm, async_frame))
+                   esp_async_recycle_failed_submit (async_frame, b, from,
+                                                    nexts, &n_async_drop,
+                                                    drop_next,
+                                                    ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
                }
              async_frame =
                vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id);
@@ -685,8 +670,10 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
 
       if (PREDICT_TRUE (thread_index != sa0->encrypt_thread_index))
        {
-         next[0] = (is_ip6 ?
-                    ESP_ENCRYPT_NEXT_HANDOFF6 : ESP_ENCRYPT_NEXT_HANDOFF4);
+         esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                             &n_async_drop,
+                             (is_ip6 ? ESP_ENCRYPT_NEXT_HANDOFF6 :
+                              ESP_ENCRYPT_NEXT_HANDOFF4), next);
          goto trace;
        }
 
@@ -695,7 +682,8 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (n_bufs == 0)
        {
          b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
-         next[0] = drop_next;
+         esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                             &n_async_drop, drop_next, next);
          goto trace;
        }
 
@@ -709,7 +697,8 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (PREDICT_FALSE (esp_seq_advance (sa0)))
        {
          b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
-         next[0] = drop_next;
+         esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                             &n_async_drop, drop_next, next);
          goto trace;
        }
 
@@ -727,7 +716,8 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          if (!next_hdr_ptr)
            {
              b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
-             next[0] = drop_next;
+             esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                                 &n_async_drop, drop_next, next);
              goto trace;
            }
          b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -801,7 +791,11 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
                                                 vlib_buffer_length_in_chain
                                                 (vm, b[0]));
          if (!next_hdr_ptr)
-           goto trace;
+           {
+             esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                                 &n_async_drop, drop_next, next);
+             goto trace;
+           }
 
          b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
          payload_len = b[0]->current_length;
@@ -894,15 +888,25 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (is_async)
        {
          if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0))
-           goto trace;
+           {
+             esp_set_next_index (is_async, from, nexts, from[b - bufs],
+                                 &n_async_drop, drop_next, next);
+             goto trace;
+           }
 
          if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp,
                                       payload, payload_len, iv_sz,
-                                      icv_sz, from[b - bufs], next, hdr_len,
-                                      async_next, lb))
+                                      icv_sz, from[b - bufs], next[0],
+                                      hdr_len, async_next, lb))
            {
-             esp_async_recycle_failed_submit (async_frame, b, next,
-                                              drop_next);
+             /* The fail only caused by submission, free the whole frame. */
+             if (async_frame->n_elts)
+               esp_async_recycle_failed_submit (async_frame, b, from, nexts,
+                                                &n_async_drop, drop_next,
+                                                ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+             b[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
+             esp_set_next_index (1, from, nexts, from[b - bufs],
+                                 &n_async_drop, drop_next, next);
              goto trace;
            }
        }
@@ -950,10 +954,22 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
       esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
                               ptd->chunks, drop_next);
     }
-  else if (async_frame && async_frame->n_elts)
+  else
     {
-      if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
-       esp_async_recycle_failed_submit (async_frame, b, next, drop_next);
+      if (async_frame && async_frame->n_elts)
+       {
+         if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
+           esp_async_recycle_failed_submit (async_frame, b, from, nexts,
+                                            &n_async_drop, drop_next,
+                                            ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+       }
+      vlib_node_increment_counter (vm, node->node_index,
+                                  ESP_ENCRYPT_ERROR_RX_PKTS,
+                                  frame->n_vectors);
+      if (n_async_drop)
+       vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
+
+      return frame->n_vectors;
     }
 
   vlib_node_increment_counter (vm, node->node_index,
@@ -1073,8 +1089,7 @@ VLIB_REGISTER_NODE (esp4_encrypt_node) = {
     [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
     [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
     [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
-    [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
-    [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
+    [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output"
   },
 };
 /* *INDENT-ON* */
@@ -1165,7 +1180,6 @@ VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
     [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
     [ESP_ENCRYPT_NEXT_HANDOFF6] = "error-drop",
     [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
-    [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
   },
 };
 
@@ -1213,7 +1227,6 @@ VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
     [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
     [ESP_ENCRYPT_NEXT_HANDOFF4] = "error-drop",
     [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
-    [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
     [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
   },
 };
@@ -1354,23 +1367,6 @@ VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FN (esp_encrypt_pending_node) (vlib_main_t * vm,
-                                        vlib_node_runtime_t * node,
-                                        vlib_frame_t * from_frame)
-{
-  return from_frame->n_vectors;
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp_encrypt_pending_node) = {
-  .name = "esp-encrypt-pending",
-  .vector_size = sizeof (u32),
-  .type = VLIB_NODE_TYPE_INTERNAL,
-
-  .n_next_nodes = 0
-};
-/* *INDENT-ON* */
-
 /*
  * fd.io coding-style-patch-verification: ON
  *