crypto: encrypt/decrypt queues sw_scheduler 86/30386/12
authorJakub Wysocki <jakubx.wysocki@intel.com>
Tue, 30 Nov 2021 10:53:03 +0000 (10:53 +0000)
committerFan Zhang <roy.fan.zhang@intel.com>
Wed, 5 Jan 2022 10:44:38 +0000 (10:44 +0000)
Type: improvement

Previously multiple sw crypto scheduler queues per core design
caused unaverage frame processing rate for each async op ID –
the lower the op ID is the highly likely they are processed first.

For example, when a RX core is feeding both encryption and
decryption jobs of the same crypto algorithm to the queues at a
high rate, in the mean time the crypto cores have no enough
cycles to process all: the jobs in the decryption queue are less
likely being processed, causing packet drop.

To improve the situation this patch makes every core only owning
a two queues, one for encrypt operations and one for decrypt.
The queue is changed either after checking each core
or after founding a frame to process.
All crypto jobs with different algorithm are pushed to
thoses queues and are treated evenly.

In addition, the crypto async infra now uses unified dequeue handler,
one per engine. Only the active engine will be registered its
dequeue handler in crypto main.

Signed-off-by: DariuszX Kazimierski <dariuszx.kazimierski@intel.com>
Signed-off-by: PiotrX Kleski <piotrx.kleski@intel.com>
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Jakub Wysocki <jakubx.wysocki@intel.com>
Change-Id: I517ee8e31633980de5e0dd4b05e1d5db5dea760e

src/plugins/crypto_sw_scheduler/crypto_sw_scheduler.h
src/plugins/crypto_sw_scheduler/main.c
src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c
src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c
src/vnet/crypto/cli.c
src/vnet/crypto/crypto.c
src/vnet/crypto/crypto.h
src/vnet/crypto/node.c

index 50dd6c1..e74dfdd 100644 (file)
 #define CRYPTO_SW_SCHEDULER_QUEUE_SIZE 64
 #define CRYPTO_SW_SCHEDULER_QUEUE_MASK (CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1)
 
 #define CRYPTO_SW_SCHEDULER_QUEUE_SIZE 64
 #define CRYPTO_SW_SCHEDULER_QUEUE_MASK (CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1)
 
+STATIC_ASSERT ((0 == (CRYPTO_SW_SCHEDULER_QUEUE_SIZE &
+                     (CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1))),
+              "CRYPTO_SW_SCHEDULER_QUEUE_SIZE is not pow2");
+
+typedef enum crypto_sw_scheduler_queue_type_t_
+{
+  CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT = 0,
+  CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT,
+  CRYPTO_SW_SCHED_QUEUE_N_TYPES
+} crypto_sw_scheduler_queue_type_t;
+
 typedef struct
 {
   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
   u32 head;
   u32 tail;
 typedef struct
 {
   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
   u32 head;
   u32 tail;
-  vnet_crypto_async_frame_t *jobs[0];
+  vnet_crypto_async_frame_t **jobs;
 } crypto_sw_scheduler_queue_t;
 
 typedef struct
 {
   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
 } crypto_sw_scheduler_queue_t;
 
 typedef struct
 {
   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
-  crypto_sw_scheduler_queue_t *queues[VNET_CRYPTO_ASYNC_OP_N_IDS];
+  crypto_sw_scheduler_queue_t queue[CRYPTO_SW_SCHED_QUEUE_N_TYPES];
+  u32 last_serve_lcore_id;
+  u8 last_serve_encrypt;
+  u8 last_return_queue;
   vnet_crypto_op_t *crypto_ops;
   vnet_crypto_op_t *integ_ops;
   vnet_crypto_op_t *chained_crypto_ops;
   vnet_crypto_op_t *crypto_ops;
   vnet_crypto_op_t *integ_ops;
   vnet_crypto_op_t *chained_crypto_ops;
index b0548fa..47fa37d 100644 (file)
@@ -74,68 +74,45 @@ crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
 }
 
 static int
 }
 
 static int
-crypto_sw_scheduler_frame_enqueue (vlib_main_t * vm,
-                                  vnet_crypto_async_frame_t * frame)
+crypto_sw_scheduler_frame_enqueue (vlib_main_t *vm,
+                                  vnet_crypto_async_frame_t *frame, u8 is_enc)
 {
   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
 {
   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
-  crypto_sw_scheduler_per_thread_data_t *ptd
-    = vec_elt_at_index (cm->per_thread_data, vm->thread_index);
-  crypto_sw_scheduler_queue_t *q = ptd->queues[frame->op];
-  u64 head = q->head;
-
-  if (q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
+  crypto_sw_scheduler_per_thread_data_t *ptd =
+    vec_elt_at_index (cm->per_thread_data, vm->thread_index);
+  crypto_sw_scheduler_queue_t *current_queue =
+    is_enc ? &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT] :
+            &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
+  u64 head = current_queue->head;
+
+  if (current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
     {
       u32 n_elts = frame->n_elts, i;
       for (i = 0; i < n_elts; i++)
        frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
       return -1;
     }
     {
       u32 n_elts = frame->n_elts, i;
       for (i = 0; i < n_elts; i++)
        frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
       return -1;
     }
-  q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
+
+  current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
   head += 1;
   CLIB_MEMORY_STORE_BARRIER ();
   head += 1;
   CLIB_MEMORY_STORE_BARRIER ();
-  q->head = head;
+  current_queue->head = head;
   return 0;
 }
 
   return 0;
 }
 
-static_always_inline vnet_crypto_async_frame_t *
-crypto_sw_scheduler_get_pending_frame (crypto_sw_scheduler_queue_t * q)
+static int
+crypto_sw_scheduler_frame_enqueue_decrypt (vlib_main_t *vm,
+                                          vnet_crypto_async_frame_t *frame)
 {
 {
-  vnet_crypto_async_frame_t *f;
-  u32 i;
-  u32 tail = q->tail;
-  u32 head = q->head;
-
-  for (i = tail; i < head; i++)
-    {
-      f = q->jobs[i & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
-      if (!f)
-       continue;
-      if (clib_atomic_bool_cmp_and_swap
-         (&f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
-          VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
-       {
-         return f;
-       }
+  return crypto_sw_scheduler_frame_enqueue (vm, frame, 0);
     }
     }
-  return NULL;
-}
-
-static_always_inline vnet_crypto_async_frame_t *
-crypto_sw_scheduler_get_completed_frame (crypto_sw_scheduler_queue_t * q)
-{
-  vnet_crypto_async_frame_t *f = 0;
-  if (q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]
-      && q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]->state
-      >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
+    static int
+    crypto_sw_scheduler_frame_enqueue_encrypt (
+      vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
     {
     {
-      u32 tail = q->tail;
-      CLIB_MEMORY_STORE_BARRIER ();
-      q->tail++;
-      f = q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
-      q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = 0;
+
+      return crypto_sw_scheduler_frame_enqueue (vm, frame, 1);
     }
     }
-  return f;
-}
 
 static_always_inline void
 cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
 
 static_always_inline void
 cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
@@ -324,114 +301,59 @@ process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
     }
 }
 
     }
 }
 
-static_always_inline vnet_crypto_async_frame_t *
-crypto_sw_scheduler_dequeue_aead (vlib_main_t * vm,
-                                 vnet_crypto_async_op_id_t async_op_id,
-                                 vnet_crypto_op_id_t sync_op_id, u8 tag_len,
-                                 u8 aad_len, u32 * nb_elts_processed,
-                                 u32 * enqueue_thread_idx)
+static_always_inline void
+crypto_sw_scheduler_process_aead (vlib_main_t *vm,
+                                 crypto_sw_scheduler_per_thread_data_t *ptd,
+                                 vnet_crypto_async_frame_t *f, u32 aead_op,
+                                 u32 aad_len, u32 digest_len)
 {
 {
-  crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
-  crypto_sw_scheduler_per_thread_data_t *ptd = 0;
-  crypto_sw_scheduler_queue_t *q = 0;
-  vnet_crypto_async_frame_t *f = 0;
   vnet_crypto_async_frame_elt_t *fe;
   u32 *bi;
   vnet_crypto_async_frame_elt_t *fe;
   u32 *bi;
-  u32 n_elts;
-  int i = 0;
+  u32 n_elts = f->n_elts;
   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
 
   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
 
-  if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
-    {
-      /* *INDENT-OFF* */
-      vec_foreach_index (i, cm->per_thread_data)
-      {
-        ptd = cm->per_thread_data + i;
-        q = ptd->queues[async_op_id];
-        f = crypto_sw_scheduler_get_pending_frame (q);
-        if (f)
-          break;
-      }
-      /* *INDENT-ON* */
-    }
+  vec_reset_length (ptd->crypto_ops);
+  vec_reset_length (ptd->integ_ops);
+  vec_reset_length (ptd->chained_crypto_ops);
+  vec_reset_length (ptd->chained_integ_ops);
+  vec_reset_length (ptd->chunks);
 
 
-  ptd = cm->per_thread_data + vm->thread_index;
+  fe = f->elts;
+  bi = f->buffer_indices;
 
 
-  if (f)
+  while (n_elts--)
     {
     {
-      *nb_elts_processed = n_elts = f->n_elts;
-      fe = f->elts;
-      bi = f->buffer_indices;
+      if (n_elts > 1)
+       clib_prefetch_load (fe + 1);
 
 
-      vec_reset_length (ptd->crypto_ops);
-      vec_reset_length (ptd->chained_crypto_ops);
-      vec_reset_length (ptd->chunks);
-
-      while (n_elts--)
-       {
-         if (n_elts > 1)
-           clib_prefetch_load (fe + 1);
-
-         crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
-                                           sync_op_id, aad_len, tag_len);
-         bi++;
-         fe++;
-       }
+      crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
+                                       aead_op, aad_len, digest_len);
+      bi++;
+      fe++;
+    }
 
       process_ops (vm, f, ptd->crypto_ops, &state);
       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
                           &state);
       f->state = state;
 
       process_ops (vm, f, ptd->crypto_ops, &state);
       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
                           &state);
       f->state = state;
-      *enqueue_thread_idx = f->enqueue_thread_index;
     }
 
     }
 
-  return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
-}
-
-static_always_inline vnet_crypto_async_frame_t *
-crypto_sw_scheduler_dequeue_link (vlib_main_t * vm,
-                                 vnet_crypto_async_op_id_t async_op_id,
-                                 vnet_crypto_op_id_t sync_crypto_op_id,
-                                 vnet_crypto_op_id_t sync_integ_op_id,
-                                 u16 digest_len, u8 is_enc,
-                                 u32 * nb_elts_processed,
-                                 u32 * enqueue_thread_idx)
-{
-  crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
-  crypto_sw_scheduler_per_thread_data_t *ptd = 0;
-  crypto_sw_scheduler_queue_t *q = 0;
-  vnet_crypto_async_frame_t *f = 0;
-  vnet_crypto_async_frame_elt_t *fe;
-  u32 *bi;
-  u32 n_elts;
-  int i = 0;
-  u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
-
-  if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
+    static_always_inline void
+    crypto_sw_scheduler_process_link (
+      vlib_main_t *vm, crypto_sw_scheduler_main_t *cm,
+      crypto_sw_scheduler_per_thread_data_t *ptd, vnet_crypto_async_frame_t *f,
+      u32 crypto_op, u32 auth_op, u16 digest_len, u8 is_enc)
     {
     {
-      /* *INDENT-OFF* */
-      vec_foreach_index (i, cm->per_thread_data)
-      {
-        ptd = cm->per_thread_data + i;
-        q = ptd->queues[async_op_id];
-        f = crypto_sw_scheduler_get_pending_frame (q);
-        if (f)
-          break;
-      }
-      /* *INDENT-ON* */
-    }
-
-  ptd = cm->per_thread_data + vm->thread_index;
+      vnet_crypto_async_frame_elt_t *fe;
+      u32 *bi;
+      u32 n_elts = f->n_elts;
+      u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
 
 
-  if (f)
-    {
       vec_reset_length (ptd->crypto_ops);
       vec_reset_length (ptd->integ_ops);
       vec_reset_length (ptd->chained_crypto_ops);
       vec_reset_length (ptd->chained_integ_ops);
       vec_reset_length (ptd->chunks);
       vec_reset_length (ptd->crypto_ops);
       vec_reset_length (ptd->integ_ops);
       vec_reset_length (ptd->chained_crypto_ops);
       vec_reset_length (ptd->chained_integ_ops);
       vec_reset_length (ptd->chunks);
-
-      *nb_elts_processed = n_elts = f->n_elts;
       fe = f->elts;
       bi = f->buffer_indices;
 
       fe = f->elts;
       bi = f->buffer_indices;
 
@@ -440,12 +362,9 @@ crypto_sw_scheduler_dequeue_link (vlib_main_t * vm,
          if (n_elts > 1)
            clib_prefetch_load (fe + 1);
 
          if (n_elts > 1)
            clib_prefetch_load (fe + 1);
 
-         crypto_sw_scheduler_convert_link_crypto (vm, ptd,
-                                                  cm->keys + fe->key_index,
-                                                  fe, fe - f->elts, bi[0],
-                                                  sync_crypto_op_id,
-                                                  sync_integ_op_id,
-                                                  digest_len, is_enc);
+         crypto_sw_scheduler_convert_link_crypto (
+           vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0],
+           crypto_op, auth_op, digest_len, is_enc);
          bi++;
          fe++;
        }
          bi++;
          fe++;
        }
@@ -470,11 +389,167 @@ crypto_sw_scheduler_dequeue_link (vlib_main_t * vm,
        }
 
       f->state = state;
        }
 
       f->state = state;
-      *enqueue_thread_idx = f->enqueue_thread_index;
     }
 
     }
 
-  return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
-}
+    static_always_inline int
+    convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id,
+                            u32 *crypto_op, u32 *auth_op_or_aad_len,
+                            u16 *digest_len, u8 *is_enc)
+    {
+      switch (async_op_id)
+       {
+#define _(n, s, k, t, a)                                                      \
+  case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC:                            \
+    *crypto_op = VNET_CRYPTO_OP_##n##_ENC;                                    \
+    *auth_op_or_aad_len = a;                                                  \
+    *digest_len = t;                                                          \
+    *is_enc = 1;                                                              \
+    return 1;                                                                 \
+  case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC:                            \
+    *crypto_op = VNET_CRYPTO_OP_##n##_DEC;                                    \
+    *auth_op_or_aad_len = a;                                                  \
+    *digest_len = t;                                                          \
+    *is_enc = 0;                                                              \
+    return 1;
+         foreach_crypto_aead_async_alg
+#undef _
+
+#define _(c, h, s, k, d)                                                      \
+  case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC:                               \
+    *crypto_op = VNET_CRYPTO_OP_##c##_ENC;                                    \
+    *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC;                          \
+    *digest_len = d;                                                          \
+    *is_enc = 1;                                                              \
+    return 0;                                                                 \
+  case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC:                               \
+    *crypto_op = VNET_CRYPTO_OP_##c##_DEC;                                    \
+    *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC;                          \
+    *digest_len = d;                                                          \
+    *is_enc = 0;                                                              \
+    return 0;
+           foreach_crypto_link_async_alg
+#undef _
+
+           default : return -1;
+       }
+
+      return -1;
+    }
+
+    static_always_inline vnet_crypto_async_frame_t *
+    crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
+                                u32 *enqueue_thread_idx)
+    {
+      crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
+      crypto_sw_scheduler_per_thread_data_t *ptd =
+       cm->per_thread_data + vm->thread_index;
+      vnet_crypto_async_frame_t *f = 0;
+      crypto_sw_scheduler_queue_t *current_queue = 0;
+      u32 tail, head;
+      u8 found = 0;
+
+      /* get a pending frame to process */
+      if (ptd->self_crypto_enabled)
+       {
+         u32 i = ptd->last_serve_lcore_id + 1;
+
+         while (1)
+           {
+             crypto_sw_scheduler_per_thread_data_t *st;
+             u32 j;
+
+             if (i >= vec_len (cm->per_thread_data))
+               i = 0;
+
+             st = cm->per_thread_data + i;
+
+             if (ptd->last_serve_encrypt)
+               current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
+             else
+               current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
+
+             tail = current_queue->tail;
+             head = current_queue->head;
+
+             for (j = tail; j != head; j++)
+               {
+
+                 f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
+
+                 if (!f)
+                   continue;
+
+                 if (clib_atomic_bool_cmp_and_swap (
+                       &f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
+                       VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
+                   {
+                     found = 1;
+                     break;
+                   }
+               }
+
+             if (found || i == ptd->last_serve_lcore_id)
+               {
+                 CLIB_MEMORY_STORE_BARRIER ();
+                 ptd->last_serve_encrypt = !ptd->last_serve_encrypt;
+                 break;
+               }
+
+             i++;
+           }
+
+         ptd->last_serve_lcore_id = i;
+       }
+
+      if (found)
+       {
+         u32 crypto_op, auth_op_or_aad_len;
+         u16 digest_len;
+         u8 is_enc;
+         int ret;
+
+         ret = convert_async_crypto_id (
+           f->op, &crypto_op, &auth_op_or_aad_len, &digest_len, &is_enc);
+
+         if (ret == 1)
+           crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op,
+                                             auth_op_or_aad_len, digest_len);
+         else if (ret == 0)
+           crypto_sw_scheduler_process_link (vm, cm, ptd, f, crypto_op,
+                                             auth_op_or_aad_len, digest_len,
+                                             is_enc);
+
+         *enqueue_thread_idx = f->enqueue_thread_index;
+         *nb_elts_processed = f->n_elts;
+       }
+
+      if (ptd->last_return_queue)
+       {
+         current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
+         ptd->last_return_queue = 0;
+       }
+      else
+       {
+         current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
+         ptd->last_return_queue = 1;
+       }
+
+      tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK;
+
+      if (current_queue->jobs[tail] &&
+         current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
+       {
+
+         CLIB_MEMORY_STORE_BARRIER ();
+         current_queue->tail++;
+         f = current_queue->jobs[tail];
+         current_queue->jobs[tail] = 0;
+
+         return f;
+       }
+
+      return 0;
+    }
 
 static clib_error_t *
 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
 
 static clib_error_t *
 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
@@ -586,50 +661,6 @@ sw_scheduler_cli_init (vlib_main_t * vm)
 
 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
 
 
 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
 
-/* *INDENT-OFF* */
-#define _(n, s, k, t, a)                                                      \
-  static vnet_crypto_async_frame_t                                            \
-      *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc (      \
-          vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
-  {                                                                           \
-    return crypto_sw_scheduler_dequeue_aead (                                 \
-        vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                       \
-        VNET_CRYPTO_OP_##n##_ENC, t, a, nb_elts_processed, thread_idx);       \
-  }                                                                           \
-  static vnet_crypto_async_frame_t                                            \
-      *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec (      \
-          vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
-  {                                                                           \
-    return crypto_sw_scheduler_dequeue_aead (                                 \
-        vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                       \
-        VNET_CRYPTO_OP_##n##_DEC, t, a, nb_elts_processed, thread_idx);       \
-  }
-foreach_crypto_aead_async_alg
-#undef _
-
-#define _(c, h, s, k, d)                                                      \
-  static vnet_crypto_async_frame_t                                            \
-      *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc (           \
-          vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
-  {                                                                           \
-    return crypto_sw_scheduler_dequeue_link (                                 \
-        vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,                          \
-        VNET_CRYPTO_OP_##c##_ENC, VNET_CRYPTO_OP_##h##_HMAC, d, 1,            \
-        nb_elts_processed, thread_idx);                                       \
-  }                                                                           \
-  static vnet_crypto_async_frame_t                                            \
-      *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec (           \
-          vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
-  {                                                                           \
-    return crypto_sw_scheduler_dequeue_link (                                 \
-        vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,                          \
-        VNET_CRYPTO_OP_##c##_DEC, VNET_CRYPTO_OP_##h##_HMAC, d, 0,            \
-        nb_elts_processed, thread_idx);                                       \
-  }
-    foreach_crypto_link_async_alg
-#undef _
-        /* *INDENT-ON* */
-
 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
 clib_error_t *
 crypto_sw_scheduler_init (vlib_main_t * vm)
 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
 clib_error_t *
 crypto_sw_scheduler_init (vlib_main_t * vm)
@@ -639,24 +670,29 @@ crypto_sw_scheduler_init (vlib_main_t * vm)
   clib_error_t *error = 0;
   crypto_sw_scheduler_per_thread_data_t *ptd;
 
   clib_error_t *error = 0;
   crypto_sw_scheduler_per_thread_data_t *ptd;
 
-  u32 queue_size = CRYPTO_SW_SCHEDULER_QUEUE_SIZE * sizeof (void *)
-    + sizeof (crypto_sw_scheduler_queue_t);
-
   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
                        CLIB_CACHE_LINE_BYTES);
 
   vec_foreach (ptd, cm->per_thread_data)
   {
     ptd->self_crypto_enabled = 1;
   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
                        CLIB_CACHE_LINE_BYTES);
 
   vec_foreach (ptd, cm->per_thread_data)
   {
     ptd->self_crypto_enabled = 1;
-    u32 i;
-    for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
-      {
-       crypto_sw_scheduler_queue_t *q
-         = clib_mem_alloc_aligned (queue_size, CLIB_CACHE_LINE_BYTES);
-       ASSERT (q != 0);
-       ptd->queues[i] = q;
-       clib_memset_u8 (q, 0, queue_size);
-      }
+
+    ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].head = 0;
+    ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].tail = 0;
+
+    vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].jobs,
+                         CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
+                         CLIB_CACHE_LINE_BYTES);
+
+    ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].head = 0;
+    ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].tail = 0;
+
+    ptd->last_serve_encrypt = 0;
+    ptd->last_return_queue = 0;
+
+    vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].jobs,
+                         CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
+                         CLIB_CACHE_LINE_BYTES);
   }
 
   cm->crypto_engine_index =
   }
 
   cm->crypto_engine_index =
@@ -670,32 +706,29 @@ crypto_sw_scheduler_init (vlib_main_t * vm)
 
   /* *INDENT-OFF* */
 #define _(n, s, k, t, a)                                                      \
 
   /* *INDENT-OFF* */
 #define _(n, s, k, t, a)                                                      \
-  vnet_crypto_register_async_handler (                                        \
-      vm, cm->crypto_engine_index,                                            \
-      VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                             \
-      crypto_sw_scheduler_frame_enqueue,                                      \
-      crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc);       \
-  vnet_crypto_register_async_handler (                                        \
-      vm, cm->crypto_engine_index,                                            \
-      VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                             \
-      crypto_sw_scheduler_frame_enqueue,                                      \
-      crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec);
+  vnet_crypto_register_enqueue_handler (                                      \
+    vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,  \
+    crypto_sw_scheduler_frame_enqueue_encrypt);                               \
+  vnet_crypto_register_enqueue_handler (                                      \
+    vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,  \
+    crypto_sw_scheduler_frame_enqueue_decrypt);
   foreach_crypto_aead_async_alg
 #undef _
 
 #define _(c, h, s, k, d)                                                      \
   foreach_crypto_aead_async_alg
 #undef _
 
 #define _(c, h, s, k, d)                                                      \
-  vnet_crypto_register_async_handler (                                        \
-      vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,   \
-      crypto_sw_scheduler_frame_enqueue,                                      \
-      crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc);            \
-  vnet_crypto_register_async_handler (                                        \
-      vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,   \
-      crypto_sw_scheduler_frame_enqueue,                                      \
-      crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec);
-      foreach_crypto_link_async_alg
+  vnet_crypto_register_enqueue_handler (                                      \
+    vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,     \
+    crypto_sw_scheduler_frame_enqueue_encrypt);                               \
+  vnet_crypto_register_enqueue_handler (                                      \
+    vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,     \
+    crypto_sw_scheduler_frame_enqueue_decrypt);
+    foreach_crypto_link_async_alg
 #undef _
       /* *INDENT-ON* */
 
 #undef _
       /* *INDENT-ON* */
 
+      vnet_crypto_register_dequeue_handler (vm, cm->crypto_engine_index,
+                                           crypto_sw_scheduler_dequeue);
+
   if (error)
     vec_free (cm->per_thread_data);
 
   if (error)
     vec_free (cm->per_thread_data);
 
index 771d804..9b57467 100644 (file)
@@ -515,6 +515,7 @@ cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx)
   struct rte_cryptodev_sym_capability_idx cap_aead_idx;
   u8 *name;
   clib_error_t *error = 0;
   struct rte_cryptodev_sym_capability_idx cap_aead_idx;
   u8 *name;
   clib_error_t *error = 0;
+  u32 ref_cnt = 0;
 
   vec_foreach (cet, cmt->per_thread_data)
     {
 
   vec_foreach (cet, cmt->per_thread_data)
     {
@@ -550,18 +551,18 @@ cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx)
       vec_validate (cet->cops, VNET_CRYPTO_FRAME_SIZE - 1);
     }
 
       vec_validate (cet->cops, VNET_CRYPTO_FRAME_SIZE - 1);
     }
 
-    /** INDENT-OFF **/
 #define _(a, b, c, d, e, f, g)                                                \
   cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;                              \
   cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c;                              \
   if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f))                   \
     {                                                                         \
 #define _(a, b, c, d, e, f, g)                                                \
   cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;                              \
   cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c;                              \
   if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f))                   \
     {                                                                         \
-      vnet_crypto_register_async_handler (                                    \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC,                 \
        vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC,                 \
-       cryptodev_enqueue_aead_aad_##f##_enc, cryptodev_frame_dequeue);       \
-      vnet_crypto_register_async_handler (                                    \
+       cryptodev_enqueue_aead_aad_##f##_enc);                                \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC,                 \
        vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC,                 \
-       cryptodev_enqueue_aead_aad_##f##_dec, cryptodev_frame_dequeue);       \
+       cryptodev_enqueue_aead_aad_##f##_dec);                                \
+      ref_cnt++;                                                              \
     }
   foreach_vnet_aead_crypto_conversion
 #undef _
     }
   foreach_vnet_aead_crypto_conversion
 #undef _
@@ -574,16 +575,19 @@ cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx)
   if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) &&             \
       cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1))                 \
     {                                                                         \
   if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) &&             \
       cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1))                 \
     {                                                                         \
-      vnet_crypto_register_async_handler (                                    \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC,                    \
        vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC,                    \
-       cryptodev_enqueue_linked_alg_enc, cryptodev_frame_dequeue);           \
-      vnet_crypto_register_async_handler (                                    \
+       cryptodev_enqueue_linked_alg_enc);                                    \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC,                    \
        vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC,                    \
-       cryptodev_enqueue_linked_alg_dec, cryptodev_frame_dequeue);           \
+       cryptodev_enqueue_linked_alg_dec);                                    \
+      ref_cnt++;                                                              \
     }
     foreach_cryptodev_link_async_alg
 #undef _
     }
     foreach_cryptodev_link_async_alg
 #undef _
-    /** INDENT-ON **/
+
+    if (ref_cnt)
+      vnet_crypto_register_dequeue_handler (vm, eidx, cryptodev_frame_dequeue);
 
     return 0;
 
 
     return 0;
 
index 41a1e0c..a93184a 100644 (file)
@@ -639,6 +639,7 @@ cryptodev_register_raw_hdl (vlib_main_t *vm, u32 eidx)
   struct rte_cryptodev_sym_capability_idx cap_aead_idx;
   u32 support_raw_api = 1, max_ctx_size = 0;
   clib_error_t *error = 0;
   struct rte_cryptodev_sym_capability_idx cap_aead_idx;
   u32 support_raw_api = 1, max_ctx_size = 0;
   clib_error_t *error = 0;
+  u8 ref_cnt = 0;
 
   vec_foreach (cinst, cmt->cryptodev_inst)
     {
 
   vec_foreach (cinst, cmt->cryptodev_inst)
     {
@@ -694,18 +695,18 @@ cryptodev_register_raw_hdl (vlib_main_t *vm, u32 eidx)
       vec_free (name);
     }
 
       vec_free (name);
     }
 
-/** INDENT-OFF **/
 #define _(a, b, c, d, e, f, g)                                                \
   cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;                              \
   cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c;                              \
   if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f))                   \
     {                                                                         \
 #define _(a, b, c, d, e, f, g)                                                \
   cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;                              \
   cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c;                              \
   if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f))                   \
     {                                                                         \
-      vnet_crypto_register_async_handler (                                    \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC,                 \
        vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC,                 \
-       cryptodev_raw_enq_aead_aad_##f##_enc, cryptodev_raw_dequeue);         \
-      vnet_crypto_register_async_handler (                                    \
+       cryptodev_raw_enq_aead_aad_##f##_enc);                                \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC,                 \
        vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC,                 \
-       cryptodev_raw_enq_aead_aad_##f##_dec, cryptodev_raw_dequeue);         \
+       cryptodev_raw_enq_aead_aad_##f##_dec);                                \
+      ref_cnt++;                                                              \
     }
   foreach_vnet_aead_crypto_conversion
 #undef _
     }
   foreach_vnet_aead_crypto_conversion
 #undef _
@@ -718,17 +719,21 @@ cryptodev_register_raw_hdl (vlib_main_t *vm, u32 eidx)
   if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) &&             \
       cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1))                 \
     {                                                                         \
   if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) &&             \
       cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1))                 \
     {                                                                         \
-      vnet_crypto_register_async_handler (                                    \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC,                    \
        vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC,                    \
-       cryptodev_raw_enq_linked_alg_enc, cryptodev_raw_dequeue);             \
-      vnet_crypto_register_async_handler (                                    \
+       cryptodev_raw_enq_linked_alg_enc);                                    \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC,                    \
        vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC,                    \
-       cryptodev_raw_enq_linked_alg_dec, cryptodev_raw_dequeue);             \
+       cryptodev_raw_enq_linked_alg_dec);                                    \
+      ref_cnt++;                                                              \
     }
     foreach_cryptodev_link_async_alg
 #undef _
 
     }
     foreach_cryptodev_link_async_alg
 #undef _
 
-      cmt->is_raw_api = 1;
+    if (ref_cnt)
+      vnet_crypto_register_dequeue_handler (vm, eidx, cryptodev_raw_dequeue);
+
+  cmt->is_raw_api = 1;
 
   return 0;
 
 
   return 0;
 
index a6098a1..4ee14ac 100644 (file)
@@ -67,7 +67,7 @@ format_vnet_crypto_engine_candidates (u8 * s, va_list * args)
     {
       vec_foreach (e, cm->engines)
        {
     {
       vec_foreach (e, cm->engines)
        {
-         if (e->enqueue_handlers[id] && e->dequeue_handlers[id])
+         if (e->enqueue_handlers[id] && e->dequeue_handler)
            {
              s = format (s, "%U", format_vnet_crypto_engine, e - cm->engines);
              if (ei == e - cm->engines)
            {
              s = format (s, "%U", format_vnet_crypto_engine, e - cm->engines);
              if (ei == e - cm->engines)
index 7903f88..9f437cf 100644 (file)
@@ -275,10 +275,9 @@ vnet_crypto_register_ops_handlers (vlib_main_t * vm, u32 engine_index,
 }
 
 void
 }
 
 void
-vnet_crypto_register_async_handler (vlib_main_t * vm, u32 engine_index,
-                                   vnet_crypto_async_op_id_t opt,
-                                   vnet_crypto_frame_enqueue_t * enqueue_hdl,
-                                   vnet_crypto_frame_dequeue_t * dequeue_hdl)
+vnet_crypto_register_enqueue_handler (vlib_main_t *vm, u32 engine_index,
+                                     vnet_crypto_async_op_id_t opt,
+                                     vnet_crypto_frame_enqueue_t *enqueue_hdl)
 {
   vnet_crypto_main_t *cm = &crypto_main;
   vnet_crypto_engine_t *ae, *e = vec_elt_at_index (cm->engines, engine_index);
 {
   vnet_crypto_main_t *cm = &crypto_main;
   vnet_crypto_engine_t *ae, *e = vec_elt_at_index (cm->engines, engine_index);
@@ -288,17 +287,14 @@ vnet_crypto_register_async_handler (vlib_main_t * vm, u32 engine_index,
   vec_validate_aligned (cm->dequeue_handlers, VNET_CRYPTO_ASYNC_OP_N_IDS,
                        CLIB_CACHE_LINE_BYTES);
 
   vec_validate_aligned (cm->dequeue_handlers, VNET_CRYPTO_ASYNC_OP_N_IDS,
                        CLIB_CACHE_LINE_BYTES);
 
-  /* both enqueue hdl and dequeue hdl should present */
-  if (!enqueue_hdl && !dequeue_hdl)
+  if (!enqueue_hdl)
     return;
 
   e->enqueue_handlers[opt] = enqueue_hdl;
     return;
 
   e->enqueue_handlers[opt] = enqueue_hdl;
-  e->dequeue_handlers[opt] = dequeue_hdl;
   if (otd->active_engine_index_async == ~0)
     {
       otd->active_engine_index_async = engine_index;
       cm->enqueue_handlers[opt] = enqueue_hdl;
   if (otd->active_engine_index_async == ~0)
     {
       otd->active_engine_index_async = engine_index;
       cm->enqueue_handlers[opt] = enqueue_hdl;
-      cm->dequeue_handlers[opt] = dequeue_hdl;
     }
 
   ae = vec_elt_at_index (cm->engines, otd->active_engine_index_async);
     }
 
   ae = vec_elt_at_index (cm->engines, otd->active_engine_index_async);
@@ -306,12 +302,73 @@ vnet_crypto_register_async_handler (vlib_main_t * vm, u32 engine_index,
     {
       otd->active_engine_index_async = engine_index;
       cm->enqueue_handlers[opt] = enqueue_hdl;
     {
       otd->active_engine_index_async = engine_index;
       cm->enqueue_handlers[opt] = enqueue_hdl;
-      cm->dequeue_handlers[opt] = dequeue_hdl;
     }
 
   return;
 }
 
     }
 
   return;
 }
 
+static int
+engine_index_cmp (void *v1, void *v2)
+{
+  u32 *a1 = v1;
+  u32 *a2 = v2;
+
+  if (*a1 > *a2)
+    return 1;
+  if (*a1 < *a2)
+    return -1;
+  return 0;
+}
+
+static void
+vnet_crypto_update_cm_dequeue_handlers (void)
+{
+  vnet_crypto_main_t *cm = &crypto_main;
+  vnet_crypto_async_op_data_t *otd;
+  vnet_crypto_engine_t *e;
+  u32 *active_engines = 0, *ei, last_ei = ~0, i;
+
+  vec_reset_length (cm->dequeue_handlers);
+
+  for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
+    {
+      otd = cm->async_opt_data + i;
+      e = cm->engines + otd->active_engine_index_async;
+      if (!e->dequeue_handler)
+       continue;
+      vec_add1 (active_engines, otd->active_engine_index_async);
+    }
+
+  vec_sort_with_function (active_engines, engine_index_cmp);
+
+  vec_foreach (ei, active_engines)
+    {
+      if (ei[0] == last_ei)
+       continue;
+
+      e = cm->engines + ei[0];
+      vec_add1 (cm->dequeue_handlers, e->dequeue_handler);
+      last_ei = ei[0];
+    }
+
+  vec_free (active_engines);
+}
+
+void
+vnet_crypto_register_dequeue_handler (vlib_main_t *vm, u32 engine_index,
+                                     vnet_crypto_frame_dequeue_t *deq_fn)
+{
+  vnet_crypto_main_t *cm = &crypto_main;
+  vnet_crypto_engine_t *e = vec_elt_at_index (cm->engines, engine_index);
+
+  if (!deq_fn)
+    return;
+
+  e->dequeue_handler = deq_fn;
+
+  return;
+}
+
 void
 vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index,
                                  vnet_crypto_key_handler_t * key_handler)
 void
 vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index,
                                  vnet_crypto_key_handler_t * key_handler)
@@ -509,11 +566,11 @@ crypto_set_active_async_engine (vnet_crypto_async_op_data_t * od,
   vnet_crypto_main_t *cm = &crypto_main;
   vnet_crypto_engine_t *ce = vec_elt_at_index (cm->engines, ei);
 
   vnet_crypto_main_t *cm = &crypto_main;
   vnet_crypto_engine_t *ce = vec_elt_at_index (cm->engines, ei);
 
-  if (ce->enqueue_handlers[id] && ce->dequeue_handlers[id])
+  if (ce->enqueue_handlers[id] && ce->dequeue_handler)
     {
       od->active_engine_index_async = ei;
       cm->enqueue_handlers[id] = ce->enqueue_handlers[id];
     {
       od->active_engine_index_async = ei;
       cm->enqueue_handlers[id] = ce->enqueue_handlers[id];
-      cm->dequeue_handlers[id] = ce->dequeue_handlers[id];
+      cm->dequeue_handlers[id] = ce->dequeue_handler;
     }
 }
 
     }
 }
 
@@ -525,6 +582,9 @@ vnet_crypto_set_async_handler2 (char *alg_name, char *engine)
   vnet_crypto_async_alg_data_t *ad;
   int i;
 
   vnet_crypto_async_alg_data_t *ad;
   int i;
 
+  if (cm->async_refcnt)
+    return -EBUSY;
+
   p = hash_get_mem (cm->async_alg_index_by_name, alg_name);
   if (!p)
     return -1;
   p = hash_get_mem (cm->async_alg_index_by_name, alg_name);
   if (!p)
     return -1;
@@ -546,6 +606,8 @@ vnet_crypto_set_async_handler2 (char *alg_name, char *engine)
       crypto_set_active_async_engine (od, id, p[0]);
     }
 
       crypto_set_active_async_engine (od, id, p[0]);
     }
 
+  vnet_crypto_update_cm_dequeue_handlers ();
+
   return 0;
 }
 
   return 0;
 }
 
@@ -603,12 +665,18 @@ vnet_crypto_request_async_mode (int is_enable)
     }
 
   if (state_change)
     }
 
   if (state_change)
-    for (i = skip_master; i < tm->n_vlib_mains; i++)
-      {
-       vlib_main_t *ovm = vlib_get_main_by_index (i);
-       if (state != vlib_node_get_state (ovm, cm->crypto_node_index))
-         vlib_node_set_state (ovm, cm->crypto_node_index, state);
-      }
+    {
+
+      for (i = skip_master; i < tm->n_vlib_mains; i++)
+       {
+         vlib_main_t *ovm = vlib_get_main_by_index (i);
+         if (state != vlib_node_get_state (ovm, cm->crypto_node_index))
+           vlib_node_set_state (ovm, cm->crypto_node_index, state);
+       }
+
+      if (is_enable)
+       vnet_crypto_update_cm_dequeue_handlers ();
+    }
 
   if (is_enable)
     cm->async_refcnt += 1;
 
   if (is_enable)
     cm->async_refcnt += 1;
@@ -753,7 +821,6 @@ vnet_crypto_init (vlib_main_t * vm)
                        CLIB_CACHE_LINE_BYTES);
   vec_validate (cm->algs, VNET_CRYPTO_N_ALGS);
   vec_validate (cm->async_algs, VNET_CRYPTO_N_ASYNC_ALGS);
                        CLIB_CACHE_LINE_BYTES);
   vec_validate (cm->algs, VNET_CRYPTO_N_ALGS);
   vec_validate (cm->async_algs, VNET_CRYPTO_N_ASYNC_ALGS);
-  clib_bitmap_validate (cm->async_active_ids, VNET_CRYPTO_ASYNC_OP_N_IDS);
 
 #define _(n, s, l) \
   vnet_crypto_init_cipher_data (VNET_CRYPTO_ALG_##n, \
 
 #define _(n, s, l) \
   vnet_crypto_init_cipher_data (VNET_CRYPTO_ALG_##n, \
index 71978b6..73b7f70 100644 (file)
@@ -422,12 +422,15 @@ void vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index,
 
 /** async crypto register functions */
 u32 vnet_crypto_register_post_node (vlib_main_t * vm, char *post_node_name);
 
 /** async crypto register functions */
 u32 vnet_crypto_register_post_node (vlib_main_t * vm, char *post_node_name);
-void vnet_crypto_register_async_handler (vlib_main_t * vm,
-                                        u32 engine_index,
-                                        vnet_crypto_async_op_id_t opt,
-                                        vnet_crypto_frame_enqueue_t * enq_fn,
-                                        vnet_crypto_frame_dequeue_t *
-                                        deq_fn);
+
+void
+vnet_crypto_register_enqueue_handler (vlib_main_t *vm, u32 engine_index,
+                                     vnet_crypto_async_op_id_t opt,
+                                     vnet_crypto_frame_enqueue_t *enq_fn);
+
+void
+vnet_crypto_register_dequeue_handler (vlib_main_t *vm, u32 engine_index,
+                                     vnet_crypto_frame_dequeue_t *deq_fn);
 
 typedef struct
 {
 
 typedef struct
 {
@@ -439,7 +442,7 @@ typedef struct
     vnet_crypto_chained_ops_handler_t
     * chained_ops_handlers[VNET_CRYPTO_N_OP_IDS];
   vnet_crypto_frame_enqueue_t *enqueue_handlers[VNET_CRYPTO_ASYNC_OP_N_IDS];
     vnet_crypto_chained_ops_handler_t
     * chained_ops_handlers[VNET_CRYPTO_N_OP_IDS];
   vnet_crypto_frame_enqueue_t *enqueue_handlers[VNET_CRYPTO_ASYNC_OP_N_IDS];
-  vnet_crypto_frame_dequeue_t *dequeue_handlers[VNET_CRYPTO_ASYNC_OP_N_IDS];
+  vnet_crypto_frame_dequeue_t *dequeue_handler;
 } vnet_crypto_engine_t;
 
 typedef struct
 } vnet_crypto_engine_t;
 
 typedef struct
@@ -456,7 +459,6 @@ typedef struct
   vnet_crypto_chained_ops_handler_t **chained_ops_handlers;
   vnet_crypto_frame_enqueue_t **enqueue_handlers;
   vnet_crypto_frame_dequeue_t **dequeue_handlers;
   vnet_crypto_chained_ops_handler_t **chained_ops_handlers;
   vnet_crypto_frame_enqueue_t **enqueue_handlers;
   vnet_crypto_frame_dequeue_t **dequeue_handlers;
-  clib_bitmap_t *async_active_ids;
   vnet_crypto_op_data_t opt_data[VNET_CRYPTO_N_OP_IDS];
   vnet_crypto_async_op_data_t async_opt_data[VNET_CRYPTO_ASYNC_OP_N_IDS];
   vnet_crypto_engine_t *engines;
   vnet_crypto_op_data_t opt_data[VNET_CRYPTO_N_OP_IDS];
   vnet_crypto_async_op_data_t async_opt_data[VNET_CRYPTO_ASYNC_OP_N_IDS];
   vnet_crypto_engine_t *engines;
@@ -591,7 +593,6 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm,
 {
   vnet_crypto_main_t *cm = &crypto_main;
   vlib_thread_main_t *tm = vlib_get_thread_main ();
 {
   vnet_crypto_main_t *cm = &crypto_main;
   vlib_thread_main_t *tm = vlib_get_thread_main ();
-  vnet_crypto_async_op_id_t opt = frame->op;
   u32 i = vlib_num_workers () > 0;
 
   frame->state = VNET_CRYPTO_FRAME_STATE_PENDING;
   u32 i = vlib_num_workers () > 0;
 
   frame->state = VNET_CRYPTO_FRAME_STATE_PENDING;
@@ -599,7 +600,6 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm,
 
   int ret = (cm->enqueue_handlers[frame->op]) (vm, frame);
 
 
   int ret = (cm->enqueue_handlers[frame->op]) (vm, frame);
 
-  clib_bitmap_set_no_check (cm->async_active_ids, opt, 1);
   if (PREDICT_TRUE (ret == 0))
     {
       if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT)
   if (PREDICT_TRUE (ret == 0))
     {
       if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT)
index e753f1a..4757f53 100644 (file)
@@ -158,14 +158,12 @@ VLIB_NODE_FN (crypto_dispatch_node) (vlib_main_t * vm,
 {
   vnet_crypto_main_t *cm = &crypto_main;
   vnet_crypto_thread_t *ct = cm->threads + vm->thread_index;
 {
   vnet_crypto_main_t *cm = &crypto_main;
   vnet_crypto_thread_t *ct = cm->threads + vm->thread_index;
-  u32 n_dispatched = 0, n_cache = 0;
-  u32 index;
+  u32 n_dispatched = 0, n_cache = 0, index;
+  vec_foreach_index (index, cm->dequeue_handlers)
 
 
-  /* *INDENT-OFF* */
-  clib_bitmap_foreach (index, cm->async_active_ids)  {
     n_cache = crypto_dequeue_frame (vm, node, ct, cm->dequeue_handlers[index],
                                    n_cache, &n_dispatched);
     n_cache = crypto_dequeue_frame (vm, node, ct, cm->dequeue_handlers[index],
                                    n_cache, &n_dispatched);
-  }
+
   /* *INDENT-ON* */
   if (n_cache)
     vlib_buffer_enqueue_to_next_vec (vm, node, &ct->buffer_indices, &ct->nexts,
   /* *INDENT-ON* */
   if (n_cache)
     vlib_buffer_enqueue_to_next_vec (vm, node, &ct->buffer_indices, &ct->nexts,