dpdk-cryptodev: add support chacha20-poly1305
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev_raw_data_path.c
index 40e020f..c096e34 100644 (file)
@@ -29,7 +29,7 @@
 #include <rte_cryptodev.h>
 #include <rte_crypto_sym.h>
 #include <rte_crypto.h>
-#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
 #include <rte_config.h>
 
 #include "cryptodev.h"
@@ -144,8 +144,8 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
 
       if (n_elts > 2)
        {
-         CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
-         CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
+         clib_prefetch_load (&fe[1]);
+         clib_prefetch_load (&fe[2]);
          vlib_prefetch_buffer_header (b[1], LOAD);
          vlib_prefetch_buffer_header (b[2], LOAD);
        }
@@ -275,7 +275,7 @@ cryptodev_raw_aead_enqueue (vlib_main_t *vm, vnet_crypto_async_frame_t *frame,
 
       if (n_elts > 1)
        {
-         CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
+         clib_prefetch_load (&fe[1]);
          vlib_prefetch_buffer_header (b[1], LOAD);
        }
 
@@ -349,7 +349,7 @@ cryptodev_raw_aead_enqueue (vlib_main_t *vm, vnet_crypto_async_frame_t *frame,
 
       if (aad_len == 8)
        *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
-      else
+      else if (aad_len != 0)
        {
          /* aad_len == 12 */
          *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
@@ -422,6 +422,7 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
                       u32 *enqueue_thread_idx)
 {
   cryptodev_main_t *cmt = &cryptodev_main;
+  vnet_crypto_main_t *cm = &crypto_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
   vnet_crypto_async_frame_t *frame, *frame_ret = 0;
   u32 n_deq, n_success;
@@ -449,8 +450,8 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
              vnet_crypto_async_frame_t *f1, *f2;
              GET_RING_OBJ (cet->cached_frame, i + 1, f1);
              GET_RING_OBJ (cet->cached_frame, i + 2, f2);
-             CLIB_PREFETCH (f1, CLIB_CACHE_LINE_BYTES, LOAD);
-             CLIB_PREFETCH (f2, CLIB_CACHE_LINE_BYTES, LOAD);
+             clib_prefetch_load (f1);
+             clib_prefetch_load (f2);
            }
 
          n_left = f->state & 0x7f;
@@ -510,13 +511,25 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
        }
     }
 
+  if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT &&
+      inflight > 0)
+    vlib_node_set_interrupt_pending (vlib_get_main_by_index (vm->thread_index),
+                                    cm->crypto_node_index);
+
   /* no point to dequeue further */
   if (!inflight || no_job_to_deq || !n_room_left)
     goto end_deq;
 
+#if RTE_VERSION >= RTE_VERSION_NUM(21, 5, 0, 0)
+  n_deq = rte_cryptodev_raw_dequeue_burst (
+    cet->ctx, cryptodev_get_frame_n_elts, 0, cryptodev_post_dequeue,
+    (void **) &frame, 0, &n_success, &dequeue_status);
+#else
   n_deq = rte_cryptodev_raw_dequeue_burst (
     cet->ctx, cryptodev_get_frame_n_elts, cryptodev_post_dequeue,
     (void **) &frame, 0, &n_success, &dequeue_status);
+#endif
+
   if (!n_deq)
     goto end_deq;
 
@@ -541,9 +554,15 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
   /* see if we can dequeue more */
   while (inflight && n_room_left && !no_job_to_deq)
     {
+#if RTE_VERSION >= RTE_VERSION_NUM(21, 5, 0, 0)
+      n_deq = rte_cryptodev_raw_dequeue_burst (
+       cet->ctx, cryptodev_get_frame_n_elts, 0, cryptodev_post_dequeue,
+       (void **) &frame, 0, &n_success, &dequeue_status);
+#else
       n_deq = rte_cryptodev_raw_dequeue_burst (
        cet->ctx, cryptodev_get_frame_n_elts, cryptodev_post_dequeue,
        (void **) &frame, 0, &n_success, &dequeue_status);
+#endif
       if (!n_deq)
        break;
       inflight -= n_deq;
@@ -572,6 +591,13 @@ end_deq:
   return frame_ret;
 }
 
+static_always_inline int
+cryptodev_raw_enq_aead_aad_0_enc (vlib_main_t *vm,
+                                 vnet_crypto_async_frame_t *frame)
+{
+  return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT, 0);
+}
+
 static_always_inline int
 cryptodev_raw_enq_aead_aad_8_enc (vlib_main_t *vm,
                                  vnet_crypto_async_frame_t *frame)
@@ -585,6 +611,13 @@ cryptodev_raw_enq_aead_aad_12_enc (vlib_main_t *vm,
   return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT, 12);
 }
 
+static_always_inline int
+cryptodev_raw_enq_aead_aad_0_dec (vlib_main_t *vm,
+                                 vnet_crypto_async_frame_t *frame)
+{
+  return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT, 0);
+}
+
 static_always_inline int
 cryptodev_raw_enq_aead_aad_8_dec (vlib_main_t *vm,
                                  vnet_crypto_async_frame_t *frame)
@@ -626,6 +659,7 @@ cryptodev_register_raw_hdl (vlib_main_t *vm, u32 eidx)
   struct rte_cryptodev_sym_capability_idx cap_aead_idx;
   u32 support_raw_api = 1, max_ctx_size = 0;
   clib_error_t *error = 0;
+  u8 ref_cnt = 0;
 
   vec_foreach (cinst, cmt->cryptodev_inst)
     {
@@ -681,18 +715,18 @@ cryptodev_register_raw_hdl (vlib_main_t *vm, u32 eidx)
       vec_free (name);
     }
 
-/** INDENT-OFF **/
 #define _(a, b, c, d, e, f, g)                                                \
   cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;                              \
   cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c;                              \
   if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f))                   \
     {                                                                         \
-      vnet_crypto_register_async_handler (                                    \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC,                 \
-       cryptodev_raw_enq_aead_aad_##f##_enc, cryptodev_raw_dequeue);         \
-      vnet_crypto_register_async_handler (                                    \
+       cryptodev_raw_enq_aead_aad_##f##_enc);                                \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC,                 \
-       cryptodev_raw_enq_aead_aad_##f##_dec, cryptodev_raw_dequeue);         \
+       cryptodev_raw_enq_aead_aad_##f##_dec);                                \
+      ref_cnt++;                                                              \
     }
   foreach_vnet_aead_crypto_conversion
 #undef _
@@ -705,17 +739,21 @@ cryptodev_register_raw_hdl (vlib_main_t *vm, u32 eidx)
   if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) &&             \
       cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1))                 \
     {                                                                         \
-      vnet_crypto_register_async_handler (                                    \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC,                    \
-       cryptodev_raw_enq_linked_alg_enc, cryptodev_raw_dequeue);             \
-      vnet_crypto_register_async_handler (                                    \
+       cryptodev_raw_enq_linked_alg_enc);                                    \
+      vnet_crypto_register_enqueue_handler (                                  \
        vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC,                    \
-       cryptodev_raw_enq_linked_alg_dec, cryptodev_raw_dequeue);             \
+       cryptodev_raw_enq_linked_alg_dec);                                    \
+      ref_cnt++;                                                              \
     }
     foreach_cryptodev_link_async_alg
 #undef _
 
-      cmt->is_raw_api = 1;
+    if (ref_cnt)
+      vnet_crypto_register_dequeue_handler (vm, eidx, cryptodev_raw_dequeue);
+
+  cmt->is_raw_api = 1;
 
   return 0;