dpdk-cryptodev: improve cryptodev cache ring implementation 46/39346/2
authorPiotr Bronowski <piotrx.bronowski@intel.com>
Thu, 6 Jul 2023 23:02:57 +0000 (23:02 +0000)
committerFan Zhang <fanzhang.oss@gmail.com>
Fri, 18 Aug 2023 15:20:02 +0000 (15:20 +0000)
Sw ring is renamed to the cache ring. This name better reflects the
puropse of this ring. We've introduced push/pop functions, as well as
other utility functions which remove code repetition. Error handlig
is improved: previously in case of an error all frame elements were
marked as bad, now only these for which errors occured have the error
status set.
Unnecessary stats counters have been removed.

Type: improvement
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Change-Id: I2fd42a529ac84ce5ad260611d6b35a861d441c79

src/plugins/dpdk/cryptodev/cryptodev.c
src/plugins/dpdk/cryptodev/cryptodev.h
src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c
src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c

index fa54d2b..c66e9ed 100644 (file)
@@ -667,37 +667,66 @@ VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
 };
 
 static clib_error_t *
 };
 
 static clib_error_t *
-cryptodev_show_sw_rings_fn (vlib_main_t *vm, unformat_input_t *input,
-                           vlib_cli_command_t *cmd)
+cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
+                              vlib_cli_command_t *cmd)
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   u32 thread_index = 0;
   vec_foreach_index (thread_index, cmt->per_thread_data)
     {
       cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   u32 thread_index = 0;
   vec_foreach_index (thread_index, cmt->per_thread_data)
     {
       cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
+      cryptodev_cache_ring_t *ring = &cet->cache_ring;
+      u16 head = ring->head;
+      u16 tail = ring->tail;
+      u16 n_cached = ((head == tail) && (ring->frames[head].f == 0)) ?
+                            0 :
+                    ((head == tail) && (ring->frames[head].f != 0)) ?
+                            (CRYPTODEV_CACHE_QUEUE_MASK + 1) :
+                    (head > tail) ?
+                            (head - tail) :
+                            (CRYPTODEV_CACHE_QUEUE_MASK - tail + head);
+
+      u16 enq_head = ring->enq_head;
+      u16 deq_tail = ring->deq_tail;
+      u16 n_frames_inflight =
+       ((enq_head == deq_tail) && (ring->frames[enq_head].f == 0)) ?
+               0 :
+       ((enq_head == deq_tail) && (ring->frames[enq_head].f != 0)) ?
+               CRYPTODEV_CACHE_QUEUE_MASK + 1 :
+       (enq_head > deq_tail) ?
+               (enq_head - deq_tail) :
+               (CRYPTODEV_CACHE_QUEUE_MASK - deq_tail + enq_head);
+
+      u16 n_frames_processed =
+       ((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ?
+               0 :
+       ((tail == deq_tail) && (ring->frames[deq_tail].f != 0)) ?
+                                 (CRYPTODEV_CACHE_QUEUE_MASK + 1) :
+       (deq_tail > tail) ? (deq_tail - tail) :
+                                 (CRYPTODEV_CACHE_QUEUE_MASK - tail + deq_tail);
+
       if (vlib_num_workers () > 0 && thread_index == 0)
        continue;
       vlib_cli_output (vm, "\n\n");
       if (vlib_num_workers () > 0 && thread_index == 0)
        continue;
       vlib_cli_output (vm, "\n\n");
-      vlib_cli_output (vm, "Frames total: %d", cet->frames_on_ring);
-      vlib_cli_output (vm, "Frames pending in a ring: %d",
-                      cet->frames_on_ring - cet->enqueued_not_dequeueq -
-                        cet->deqeued_not_returned);
+      vlib_cli_output (vm, "Frames total: %d", n_cached);
+      vlib_cli_output (vm, "Frames pending in the ring: %d",
+                      n_cached - n_frames_inflight - n_frames_processed);
       vlib_cli_output (vm, "Frames enqueued but not dequeued: %d",
       vlib_cli_output (vm, "Frames enqueued but not dequeued: %d",
-                      cet->enqueued_not_dequeueq);
+                      n_frames_inflight);
       vlib_cli_output (vm, "Frames dequed but not returned: %d",
       vlib_cli_output (vm, "Frames dequed but not returned: %d",
-                      cet->deqeued_not_returned);
+                      n_frames_processed);
       vlib_cli_output (vm, "inflight: %d", cet->inflight);
       vlib_cli_output (vm, "inflight: %d", cet->inflight);
-      vlib_cli_output (vm, "Head: %d", cet->frame_ring.head);
-      vlib_cli_output (vm, "Tail: %d", cet->frame_ring.tail);
+      vlib_cli_output (vm, "Head: %d", ring->head);
+      vlib_cli_output (vm, "Tail: %d", ring->tail);
       vlib_cli_output (vm, "\n\n");
     }
   return 0;
 }
 
 VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
       vlib_cli_output (vm, "\n\n");
     }
   return 0;
 }
 
 VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
-  .path = "show cryptodev sw-ring status",
-  .short_help = "show status of all cryptodev software rings",
-  .function = cryptodev_show_sw_rings_fn,
+  .path = "show cryptodev cache status",
+  .short_help = "show status of all cryptodev cache rings",
+  .function = cryptodev_show_cache_rings_fn,
 };
 
 static clib_error_t *
 };
 
 static clib_error_t *
index 1dc5e03..e7bdfc1 100644 (file)
@@ -156,26 +156,51 @@ typedef struct
 typedef struct
 {
   vnet_crypto_async_frame_t *f;
 typedef struct
 {
   vnet_crypto_async_frame_t *f;
+  union
+  {
+    struct
+    {
+      /* index of frame elt where enque to
+       * the crypto engine is happening */
+      u8 enq_elts_head;
+      /* index of the frame elt where dequeue
+       * from the crypto engine is happening */
+      u8 deq_elts_tail;
+      u8 elts_inflight;
 
 
-  u8 enqueued;
-  u8 dequeued;
-  u8 deq_state;
-  u8 frame_inflight;
+      u8 op_type;
+      u8 aad_len;
+      u8 n_elts;
+      u16 reserved;
+    };
+    u64 raw;
+  };
 
 
-  u8 op_type;
-  u8 aad_len;
-  u8 n_elts;
-  u8 reserved;
-} cryptodev_async_ring_elt;
+  u64 frame_elts_errs_mask;
+} cryptodev_cache_ring_elt_t;
 
 typedef struct
 {
 
 typedef struct
 {
-  cryptodev_async_ring_elt frames[VNET_CRYPTO_FRAME_POOL_SIZE];
-  uint16_t head;
-  uint16_t tail;
-  uint16_t enq; /*record the frame currently being enqueued */
-  uint16_t deq; /*record the frame currently being dequeued */
-} cryptodev_async_frame_sw_ring;
+  cryptodev_cache_ring_elt_t frames[VNET_CRYPTO_FRAME_POOL_SIZE];
+
+  union
+  {
+    struct
+    {
+      /* head of the cache ring */
+      u16 head;
+      /* tail of the cache ring */
+      u16 tail;
+      /* index of the frame where enqueue
+       * to the crypto engine is happening */
+      u16 enq_head;
+      /* index of the frame where dequeue
+       * from the crypto engine is happening */
+      u16 deq_tail;
+    };
+    u64 raw;
+  };
+} cryptodev_cache_ring_t;
 
 typedef struct
 {
 
 typedef struct
 {
@@ -194,13 +219,9 @@ typedef struct
     };
   };
 
     };
   };
 
-  cryptodev_async_frame_sw_ring frame_ring;
+  cryptodev_cache_ring_t cache_ring;
   u16 cryptodev_id;
   u16 cryptodev_q;
   u16 cryptodev_id;
   u16 cryptodev_q;
-  u16 frames_on_ring;
-  u16 enqueued_not_dequeueq;
-  u16 deqeued_not_returned;
-  u16 pending_to_qat;
   u16 inflight;
 } cryptodev_engine_thread_t;
 
   u16 inflight;
 } cryptodev_engine_thread_t;
 
@@ -224,16 +245,107 @@ typedef struct
 
 extern cryptodev_main_t cryptodev_main;
 
 
 extern cryptodev_main_t cryptodev_main;
 
+#define CRYPTODEV_CACHE_RING_GET_FRAME(r, i)                                  \
+  ((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].f)
+
+#define CRYPTODEV_CACHE_RING_GET_ERR_MASK(r, i)                               \
+  ((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].frame_elts_errs_mask)
+
+#define CRYPTODEV_CACHE_RING_GET_FRAME_ELTS_INFLIGHT(r, i)                    \
+  (((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].enq_elts_head) -             \
+   ((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].deq_elts_tail))
+
 static_always_inline void
 static_always_inline void
-cryptodev_mark_frame_err_status (vnet_crypto_async_frame_t *f,
-                                vnet_crypto_op_status_t s,
-                                vnet_crypto_async_frame_state_t fs)
+cryptodev_cache_ring_update_enq_head (cryptodev_cache_ring_t *r,
+                                     vnet_crypto_async_frame_t *f)
+{
+  if (r->frames[r->enq_head].enq_elts_head == f->n_elts)
+    {
+      r->enq_head++;
+      r->enq_head &= CRYPTODEV_CACHE_QUEUE_MASK;
+      f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
+    }
+}
+
+static_always_inline bool
+cryptodev_cache_ring_update_deq_tail (cryptodev_cache_ring_t *r,
+                                     u16 *const deq)
 {
 {
-  u32 n_elts = f->n_elts, i;
+  if (r->frames[*deq].deq_elts_tail == r->frames[*deq].n_elts)
+    {
+      *deq += 1;
+      *deq &= CRYPTODEV_CACHE_QUEUE_MASK;
+      return 1;
+    }
+
+  return 0;
+}
+static_always_inline u64
+cryptodev_mark_frame_fill_err (vnet_crypto_async_frame_t *f, u64 current_err,
+                              u16 index, u16 n, vnet_crypto_op_status_t op_s)
+{
+  u64 err = current_err;
+  u16 i;
+
+  ERROR_ASSERT (index + n <= VNET_CRYPTO_FRAME_SIZE);
+  ERROR_ASSERT (op_s != VNET_CRYPTO_OP_STATUS_COMPLETED);
+
+  for (i = index; i < (index + n); i++)
+    f->elts[i].status = op_s;
+
+  err |= (~(~(0u) << n) << index);
+
+  return err;
+}
+
+static_always_inline cryptodev_cache_ring_elt_t *
+cryptodev_cache_ring_push (cryptodev_cache_ring_t *r,
+                          vnet_crypto_async_frame_t *f)
+{
+  u16 head = r->head;
+  cryptodev_cache_ring_elt_t *ring_elt = &r->frames[head];
+  /**
+   * in debug mode we do the ring sanity test when a frame is enqueued to
+   * the ring.
+   **/
+#if CLIB_DEBUG > 0
+  u16 tail = r->tail;
+  u16 n_cached = (head >= tail) ? (head - tail) :
+                                       (CRYPTODEV_CACHE_QUEUE_MASK - tail + head);
+  ERROR_ASSERT (n_cached < VNET_CRYPTO_FRAME_POOL_SIZE);
+  ERROR_ASSERT (r->raw == 0 && r->frames[head].raw == 0 &&
+               r->frames[head].f == 0);
+#endif
+  ring_elt->f = f;
+  ring_elt->n_elts = f->n_elts;
+  /* update head */
+  r->head++;
+  r->head &= CRYPTODEV_CACHE_QUEUE_MASK;
+  return ring_elt;
+}
+
+static_always_inline vnet_crypto_async_frame_t *
+cryptodev_cache_ring_pop (cryptodev_cache_ring_t *r)
+{
+  vnet_crypto_async_frame_t *f;
+  u16 tail = r->tail;
+  cryptodev_cache_ring_elt_t *ring_elt = &r->frames[tail];
+
+  ERROR_ASSERT (r->frames[r->head].raw == 0 ? r->head != tail : 1);
+  ERROR_ASSERT (r->frames[tail].raw != 0);
+  ERROR_ASSERT (ring_elt->deq_elts_tail == ring_elt->enq_elts_head &&
+               ring_elt->deq_elts_tail == ring_elt->n_elts);
+
+  f = CRYPTODEV_CACHE_RING_GET_FRAME (r, tail);
+  f->state = CRYPTODEV_CACHE_RING_GET_ERR_MASK (r, r->tail) == 0 ?
+                    VNET_CRYPTO_FRAME_STATE_SUCCESS :
+                    VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
+
+  clib_memset (ring_elt, 0, sizeof (*ring_elt));
+  r->tail++;
+  r->tail &= CRYPTODEV_CACHE_QUEUE_MASK;
 
 
-  for (i = 0; i < n_elts; i++)
-    f->elts[i].status = s;
-  f->state = fs;
+  return f;
 }
 
 int cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
 }
 
 int cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
index 56b9105..4e4295a 100644 (file)
@@ -142,16 +142,14 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
-  cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->head];
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  ERROR_ASSERT (frame != 0);
+  ERROR_ASSERT (frame->n_elts > 0);
+  cryptodev_cache_ring_elt_t *ring_elt =
+    cryptodev_cache_ring_push (ring, frame);
 
 
-  cet->frames_on_ring++;
-  ring_elt->f = frame;
-  ring_elt->n_elts = frame->n_elts;
   ring_elt->aad_len = 1;
   ring_elt->op_type = (u8) op_type;
   ring_elt->aad_len = 1;
   ring_elt->op_type = (u8) op_type;
-  ring->head++;
-  ring->head &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
   return 0;
 }
 
   return 0;
 }
 
@@ -163,7 +161,8 @@ cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
   cryptodev_main_t *cmt = &cryptodev_main;
   clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
   cryptodev_main_t *cmt = &cryptodev_main;
   clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  u16 *const enq = &ring->enq_head;
   vnet_crypto_async_frame_elt_t *fe;
   cryptodev_session_t *sess = 0;
   cryptodev_op_t *cops[CRYPTODE_ENQ_MAX] = {};
   vnet_crypto_async_frame_elt_t *fe;
   cryptodev_session_t *sess = 0;
   cryptodev_op_t *cops[CRYPTODE_ENQ_MAX] = {};
@@ -177,7 +176,7 @@ cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
     return;
 
   max_to_enq = clib_min (CRYPTODE_ENQ_MAX,
     return;
 
   max_to_enq = clib_min (CRYPTODE_ENQ_MAX,
-                        frame->n_elts - ring->frames[ring->enq].enqueued);
+                        frame->n_elts - ring->frames[*enq].enq_elts_head);
 
   if (cet->inflight + max_to_enq > CRYPTODEV_MAX_INFLIGHT)
     return;
 
   if (cet->inflight + max_to_enq > CRYPTODEV_MAX_INFLIGHT)
     return;
@@ -187,14 +186,18 @@ cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
   if (PREDICT_FALSE (
        rte_mempool_get_bulk (cet->cop_pool, (void **) cops, n_elts) < 0))
     {
   if (PREDICT_FALSE (
        rte_mempool_get_bulk (cet->cop_pool, (void **) cops, n_elts) < 0))
     {
-      cryptodev_mark_frame_err_status (frame,
-                                      VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
-                                      VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
+      cryptodev_mark_frame_fill_err (
+       frame, ring->frames[*enq].frame_elts_errs_mask,
+       ring->frames[*enq].enq_elts_head, max_to_enq,
+       VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+      ring->frames[*enq].enq_elts_head += max_to_enq;
+      ring->frames[*enq].deq_elts_tail += max_to_enq;
+      cryptodev_cache_ring_update_enq_head (ring, frame);
       return;
     }
 
       return;
     }
 
-  fe = frame->elts + ring->frames[ring->enq].enqueued;
-  bi = frame->buffer_indices + ring->frames[ring->enq].enqueued;
+  fe = frame->elts + ring->frames[*enq].enq_elts_head;
+  bi = frame->buffer_indices + ring->frames[*enq].enq_elts_head;
 
   while (n_elts)
     {
 
   while (n_elts)
     {
@@ -221,9 +224,10 @@ cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
              if (PREDICT_FALSE (
                    cryptodev_session_create (vm, last_key_index, 0) < 0))
                {
              if (PREDICT_FALSE (
                    cryptodev_session_create (vm, last_key_index, 0) < 0))
                {
-                 cryptodev_mark_frame_err_status (
-                   frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
-                   VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
+                 cryptodev_mark_frame_fill_err (
+                   frame, ring->frames[*enq].frame_elts_errs_mask,
+                   ring->frames[*enq].enq_elts_head, max_to_enq,
+                   VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
                  goto error_exit;
                }
            }
                  goto error_exit;
                }
            }
@@ -257,7 +261,7 @@ cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
        cryptodev_validate_mbuf (sop->m_src, b);
 
       clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
        cryptodev_validate_mbuf (sop->m_src, b);
 
       clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
-      ring->frames[ring->enq].enqueued++;
+      ring->frames[*enq].enq_elts_head++;
       cop++;
       bi++;
       fe++;
       cop++;
       bi++;
       fe++;
@@ -267,21 +271,15 @@ cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
   n_enqueue =
     rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
                                 (struct rte_crypto_op **) cops, max_to_enq);
   n_enqueue =
     rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
                                 (struct rte_crypto_op **) cops, max_to_enq);
-  ASSERT (n_enqueue == max_to_enq);
+  ERROR_ASSERT (n_enqueue == max_to_enq);
   cet->inflight += max_to_enq;
   cet->inflight += max_to_enq;
-  ring->frames[ring->enq].frame_inflight += max_to_enq;
-  if (ring->frames[ring->enq].enqueued == frame->n_elts)
-    {
-      cet->frame_ring.enq++;
-      cet->frame_ring.enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
-      frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
-    }
-
+  cryptodev_cache_ring_update_enq_head (ring, frame);
   return;
 
 error_exit:
   return;
 
 error_exit:
-  ring->enq++;
-  ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
+  ring->frames[*enq].enq_elts_head += max_to_enq;
+  ring->frames[*enq].deq_elts_tail += max_to_enq;
+  cryptodev_cache_ring_update_enq_head (ring, frame);
   rte_mempool_put_bulk (cet->cop_pool, (void **) cops, max_to_enq);
 }
 
   rte_mempool_put_bulk (cet->cop_pool, (void **) cops, max_to_enq);
 }
 
@@ -292,16 +290,13 @@ cryptodev_frame_aead_enqueue (vlib_main_t *vm,
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
-  cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->head];
-  cet->frames_on_ring++;
-  ring_elt->f = frame;
-  ring_elt->n_elts = frame->n_elts;
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  ERROR_ASSERT (frame != 0);
+  ERROR_ASSERT (frame->n_elts > 0);
+  cryptodev_cache_ring_elt_t *ring_elt =
+    cryptodev_cache_ring_push (ring, frame);
   ring_elt->aad_len = aad_len;
   ring_elt->op_type = (u8) op_type;
   ring_elt->aad_len = aad_len;
   ring_elt->op_type = (u8) op_type;
-  ring->head++;
-  ring->head &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
-
   return 0;
 }
 
   return 0;
 }
 
@@ -312,7 +307,8 @@ cryptodev_aead_enqueue_internal (vlib_main_t *vm,
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  u16 *const enq = &ring->enq_head;
   clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
   vnet_crypto_async_frame_elt_t *fe;
   cryptodev_session_t *sess = 0;
   clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
   vnet_crypto_async_frame_elt_t *fe;
   cryptodev_session_t *sess = 0;
@@ -321,7 +317,7 @@ cryptodev_aead_enqueue_internal (vlib_main_t *vm,
   u32 *bi = 0;
   u32 n_enqueue = 0, n_elts;
   u32 last_key_index = ~0;
   u32 *bi = 0;
   u32 n_enqueue = 0, n_elts;
   u32 last_key_index = ~0;
-  u16 left_to_enq = frame->n_elts - ring->frames[ring->enq].enqueued;
+  u16 left_to_enq = frame->n_elts - ring->frames[*enq].enq_elts_head;
   const u16 max_to_enq = clib_min (CRYPTODE_ENQ_MAX, left_to_enq);
 
   if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
   const u16 max_to_enq = clib_min (CRYPTODE_ENQ_MAX, left_to_enq);
 
   if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
@@ -335,14 +331,18 @@ cryptodev_aead_enqueue_internal (vlib_main_t *vm,
   if (PREDICT_FALSE (
        rte_mempool_get_bulk (cet->cop_pool, (void **) cops, n_elts) < 0))
     {
   if (PREDICT_FALSE (
        rte_mempool_get_bulk (cet->cop_pool, (void **) cops, n_elts) < 0))
     {
-      cryptodev_mark_frame_err_status (frame,
-                                      VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
-                                      VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
+      cryptodev_mark_frame_fill_err (
+       frame, ring->frames[*enq].frame_elts_errs_mask,
+       ring->frames[*enq].enq_elts_head, max_to_enq,
+       VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+      ring->frames[*enq].enq_elts_head += max_to_enq;
+      ring->frames[*enq].deq_elts_tail += max_to_enq;
+      cryptodev_cache_ring_update_enq_head (ring, frame);
       return -1;
     }
 
       return -1;
     }
 
-  fe = frame->elts + ring->frames[ring->enq].enqueued;
-  bi = frame->buffer_indices + ring->frames[ring->enq].enqueued;
+  fe = frame->elts + ring->frames[*enq].enq_elts_head;
+  bi = frame->buffer_indices + ring->frames[*enq].enq_elts_head;
 
   while (n_elts)
     {
 
   while (n_elts)
     {
@@ -367,9 +367,10 @@ cryptodev_aead_enqueue_internal (vlib_main_t *vm,
              if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
                                                           aad_len) < 0))
                {
              if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
                                                           aad_len) < 0))
                {
-                 cryptodev_mark_frame_err_status (
-                   frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
-                   VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
+                 cryptodev_mark_frame_fill_err (
+                   frame, ring->frames[*enq].frame_elts_errs_mask,
+                   ring->frames[*enq].enq_elts_head, max_to_enq,
+                   VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
                  goto error_exit;
                }
            }
                  goto error_exit;
                }
            }
@@ -387,9 +388,10 @@ cryptodev_aead_enqueue_internal (vlib_main_t *vm,
              if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
                                                           aad_len) < 0))
                {
              if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
                                                           aad_len) < 0))
                {
-                 cryptodev_mark_frame_err_status (
-                   frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
-                   VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
+                 cryptodev_mark_frame_fill_err (
+                   frame, ring->frames[*enq].frame_elts_errs_mask,
+                   ring->frames[*enq].enq_elts_head, max_to_enq,
+                   VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
                  goto error_exit;
                }
            }
                  goto error_exit;
                }
            }
@@ -426,7 +428,6 @@ cryptodev_aead_enqueue_internal (vlib_main_t *vm,
       clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
       clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
 
       clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
       clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
 
-      ring->frames[ring->enq].enqueued++;
       cop++;
       bi++;
       fe++;
       cop++;
       bi++;
       fe++;
@@ -436,22 +437,17 @@ cryptodev_aead_enqueue_internal (vlib_main_t *vm,
   n_enqueue =
     rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
                                 (struct rte_crypto_op **) cops, max_to_enq);
   n_enqueue =
     rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
                                 (struct rte_crypto_op **) cops, max_to_enq);
-  ASSERT (n_enqueue == max_to_enq);
+  ERROR_ASSERT (n_enqueue == max_to_enq);
   cet->inflight += max_to_enq;
   cet->inflight += max_to_enq;
-  ring->frames[ring->enq].frame_inflight += max_to_enq;
-  if (ring->frames[ring->enq].enqueued == frame->n_elts)
-    {
-      ring->enq++;
-      ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
-      frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
-      cet->enqueued_not_dequeueq++;
-    }
+  ring->frames[*enq].enq_elts_head += max_to_enq;
+  cryptodev_cache_ring_update_enq_head (ring, frame);
 
   return 0;
 
 error_exit:
 
   return 0;
 
 error_exit:
-  ring->enq++;
-  ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
+  ring->frames[*enq].enq_elts_head += max_to_enq;
+  ring->frames[*enq].deq_elts_tail += max_to_enq;
+  cryptodev_cache_ring_update_enq_head (ring, frame);
   rte_mempool_put_bulk (cet->cop_pool, (void **) cops, max_to_enq);
 
   return -1;
   rte_mempool_put_bulk (cet->cop_pool, (void **) cops, max_to_enq);
 
   return -1;
@@ -464,7 +460,8 @@ cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
   vnet_crypto_async_frame_t *frame = NULL;
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
   vnet_crypto_async_frame_t *frame = NULL;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  u16 *const deq = &ring->deq_tail;
   u16 n_deq, idx, left_to_deq, i;
   u16 max_to_deq = 0;
   u16 inflight = cet->inflight;
   u16 n_deq, idx, left_to_deq, i;
   u16 max_to_deq = 0;
   u16 inflight = cet->inflight;
@@ -472,24 +469,27 @@ cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
   cryptodev_op_t *cops[CRYPTODE_DEQ_MAX] = {};
   cryptodev_op_t **cop = cops;
   vnet_crypto_async_frame_elt_t *fe;
   cryptodev_op_t *cops[CRYPTODE_DEQ_MAX] = {};
   cryptodev_op_t **cop = cops;
   vnet_crypto_async_frame_elt_t *fe;
-  u32 n_elts;
-  u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */
+  u32 n_elts, n;
+  u64 err0 = 0, err1 = 0, err2 = 0, err3 = 0; /* partial errors mask */
 
 
-  idx = ring->deq;
+  idx = ring->deq_tail;
 
   for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
     {
 
   for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
     {
-      if (PREDICT_TRUE (ring->frames[idx].frame_inflight > 0))
+      u32 frame_inflight =
+       CRYPTODEV_CACHE_RING_GET_FRAME_ELTS_INFLIGHT (ring, idx);
+
+      if (PREDICT_TRUE (frame_inflight > 0))
        break;
       idx++;
       idx &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
     }
 
        break;
       idx++;
       idx &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
     }
 
-  ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
-  ring->deq = idx;
+  ERROR_ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
+  ring->deq_tail = idx;
 
   left_to_deq =
 
   left_to_deq =
-    ring->frames[ring->deq].f->n_elts - ring->frames[ring->deq].dequeued;
+    ring->frames[*deq].f->n_elts - ring->frames[*deq].deq_elts_tail;
   max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
 
   /* deq field can be used to track frame that is currently dequeued
   max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
 
   /* deq field can be used to track frame that is currently dequeued
@@ -502,55 +502,50 @@ cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
   if (n_deq == 0)
     return dequeue_more;
 
   if (n_deq == 0)
     return dequeue_more;
 
-  ss0 = ring->frames[ring->deq].deq_state;
-  ss1 = ring->frames[ring->deq].deq_state;
-  ss2 = ring->frames[ring->deq].deq_state;
-  ss3 = ring->frames[ring->deq].deq_state;
-
-  frame = ring->frames[ring->deq].f;
-  fe = frame->elts + ring->frames[ring->deq].dequeued;
+  frame = ring->frames[*deq].f;
+  fe = frame->elts + ring->frames[*deq].deq_elts_tail;
 
   n_elts = n_deq;
 
   n_elts = n_deq;
+  n = ring->frames[*deq].deq_elts_tail;
+
   while (n_elts > 4)
     {
   while (n_elts > 4)
     {
-      ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
-      ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
-      ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
-      ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
+      fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
+      fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
+      fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
+      fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
+
+      err0 |= (fe[0].status == VNET_CRYPTO_OP_STATUS_COMPLETED) << n;
+      err1 |= (fe[1].status == VNET_CRYPTO_OP_STATUS_COMPLETED) << (n + 1);
+      err2 |= (fe[2].status == VNET_CRYPTO_OP_STATUS_COMPLETED) << (n + 2);
+      err3 |= (fe[3].status == VNET_CRYPTO_OP_STATUS_COMPLETED) << (n + 3);
 
       cop += 4;
       fe += 4;
       n_elts -= 4;
 
       cop += 4;
       fe += 4;
       n_elts -= 4;
+      n += 4;
     }
 
   while (n_elts)
     {
     }
 
   while (n_elts)
     {
-      ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
+      fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
+      err0 |= (fe[0].status == VNET_CRYPTO_OP_STATUS_COMPLETED) << n;
+      n++;
       fe++;
       cop++;
       n_elts--;
     }
 
       fe++;
       cop++;
       n_elts--;
     }
 
-  ring->frames[ring->deq].deq_state |= (u8) (ss0 | ss1 | ss2 | ss3);
+  ring->frames[*deq].frame_elts_errs_mask |= (err0 | err1 | err2 | err3);
 
   rte_mempool_put_bulk (cet->cop_pool, (void **) cops, n_deq);
 
   inflight -= n_deq;
 
   rte_mempool_put_bulk (cet->cop_pool, (void **) cops, n_deq);
 
   inflight -= n_deq;
-  ring->frames[ring->deq].dequeued += n_deq;
-  ring->frames[ring->deq].frame_inflight -= n_deq;
-  if (ring->frames[ring->deq].dequeued == ring->frames[ring->deq].n_elts)
+  ring->frames[*deq].deq_elts_tail += n_deq;
+  if (cryptodev_cache_ring_update_deq_tail (ring, deq))
     {
     {
-      frame->state =
-       (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
-               VNET_CRYPTO_FRAME_STATE_SUCCESS :
-               VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
-
       *nb_elts_processed = frame->n_elts;
       *enqueue_thread_idx = frame->enqueue_thread_index;
       *nb_elts_processed = frame->n_elts;
       *enqueue_thread_idx = frame->enqueue_thread_index;
-      cet->deqeued_not_returned++;
-      cet->enqueued_not_dequeueq--;
-      ring->deq++;
-      ring->deq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
       dequeue_more = (max_to_deq < CRYPTODE_DEQ_MAX);
     }
 
       dequeue_more = (max_to_deq < CRYPTODE_DEQ_MAX);
     }
 
@@ -559,7 +554,7 @@ cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
 }
 
 static_always_inline void
 }
 
 static_always_inline void
-cryptodev_enqueue_frame (vlib_main_t *vm, cryptodev_async_ring_elt *ring_elt)
+cryptodev_enqueue_frame (vlib_main_t *vm, cryptodev_cache_ring_elt_t *ring_elt)
 {
   cryptodev_op_type_t op_type = (cryptodev_op_type_t) ring_elt->op_type;
   u8 linked_or_aad_len = ring_elt->aad_len;
 {
   cryptodev_op_type_t op_type = (cryptodev_op_type_t) ring_elt->op_type;
   u8 linked_or_aad_len = ring_elt->aad_len;
@@ -578,9 +573,9 @@ cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
   cryptodev_main_t *cmt = &cryptodev_main;
   vnet_crypto_main_t *cm = &crypto_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
   cryptodev_main_t *cmt = &cryptodev_main;
   vnet_crypto_main_t *cm = &crypto_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  cryptodev_cache_ring_elt_t *ring_elt = &ring->frames[ring->tail];
 
 
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
-  cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->tail];
   vnet_crypto_async_frame_t *ret_frame = 0;
   u8 dequeue_more = 1;
 
   vnet_crypto_async_frame_t *ret_frame = 0;
   u8 dequeue_more = 1;
 
@@ -590,23 +585,16 @@ cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
                                                       enqueue_thread_idx);
     }
 
                                                       enqueue_thread_idx);
     }
 
-  if (PREDICT_TRUE (ring->frames[ring->enq].f != 0))
-    cryptodev_enqueue_frame (vm, &ring->frames[ring->enq]);
+  if (PREDICT_TRUE (ring->frames[ring->enq_head].f != 0))
+    cryptodev_enqueue_frame (vm, &ring->frames[ring->enq_head]);
 
   if (PREDICT_TRUE (ring_elt->f != 0))
     {
 
   if (PREDICT_TRUE (ring_elt->f != 0))
     {
-      if ((ring_elt->f->state == VNET_CRYPTO_FRAME_STATE_SUCCESS ||
-          ring_elt->f->state == VNET_CRYPTO_FRAME_STATE_ELT_ERROR) &&
-         ring_elt->enqueued == ring_elt->dequeued)
+      if (ring_elt->enq_elts_head == ring_elt->deq_elts_tail)
        {
          vlib_node_set_interrupt_pending (
            vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
        {
          vlib_node_set_interrupt_pending (
            vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
-         ret_frame = ring_elt->f;
-         memset (ring_elt, 0, sizeof (*ring_elt));
-         ring->tail += 1;
-         ring->tail &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
-         cet->frames_on_ring--;
-         cet->deqeued_not_returned--;
+         ret_frame = cryptodev_cache_ring_pop (ring);
          return ret_frame;
        }
     }
          return ret_frame;
        }
     }
index 9f0936a..19291eb 100644 (file)
@@ -96,7 +96,7 @@ cryptodev_reset_ctx (cryptodev_engine_thread_t *cet)
 {
   union rte_cryptodev_session_ctx sess_ctx;
 
 {
   union rte_cryptodev_session_ctx sess_ctx;
 
-  ASSERT (cet->reset_sess != 0);
+  ERROR_ASSERT (cet->reset_sess != 0);
 
   sess_ctx.crypto_sess = cet->reset_sess;
 
 
   sess_ctx.crypto_sess = cet->reset_sess;
 
@@ -112,15 +112,14 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
-  cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->head];
-  cet->frames_on_ring++;
-  ring_elt->f = frame;
-  ring_elt->n_elts = frame->n_elts;
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  ERROR_ASSERT (frame != 0);
+  ERROR_ASSERT (frame->n_elts > 0);
+  cryptodev_cache_ring_elt_t *ring_elt =
+    cryptodev_cache_ring_push (ring, frame);
+
   ring_elt->aad_len = 1;
   ring_elt->op_type = (u8) op_type;
   ring_elt->aad_len = 1;
   ring_elt->op_type = (u8) op_type;
-  ring->head++;
-  ring->head &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
   return 0;
 }
 
   return 0;
 }
 
@@ -135,13 +134,14 @@ cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
   vlib_buffer_t **b;
   struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
   struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
   vlib_buffer_t **b;
   struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
   struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  u16 *const enq = &ring->enq_head;
   u32 n_elts;
   u32 last_key_index = ~0;
   i16 min_ofs;
   u32 max_end;
   u32 max_to_enq = clib_min (CRYPTODE_ENQ_MAX,
   u32 n_elts;
   u32 last_key_index = ~0;
   i16 min_ofs;
   u32 max_end;
   u32 max_to_enq = clib_min (CRYPTODE_ENQ_MAX,
-                            frame->n_elts - ring->frames[ring->enq].enqueued);
+                            frame->n_elts - ring->frames[*enq].enq_elts_head);
   u8 is_update = 0;
   int status;
 
   u8 is_update = 0;
   int status;
 
@@ -152,8 +152,8 @@ cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
 
   vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
 
 
   vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
 
-  b = cet->b + ring->frames[ring->enq].enqueued;
-  fe = frame->elts + ring->frames[ring->enq].enqueued;
+  b = cet->b + ring->frames[*enq].enq_elts_head;
+  fe = frame->elts + ring->frames[*enq].enq_elts_head;
 
   while (n_elts)
     {
 
   while (n_elts)
     {
@@ -233,7 +233,7 @@ cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
       if (PREDICT_FALSE (status < 0))
        goto error_exit;
 
       if (PREDICT_FALSE (status < 0))
        goto error_exit;
 
-      ring->frames[ring->enq].enqueued += 1;
+      ring->frames[*enq].enq_elts_head += 1;
       b++;
       fe++;
       n_elts--;
       b++;
       fe++;
       n_elts--;
@@ -244,22 +244,19 @@ cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
     goto error_exit;
 
   cet->inflight += max_to_enq;
     goto error_exit;
 
   cet->inflight += max_to_enq;
-  ring->frames[ring->enq].frame_inflight += max_to_enq;
-  if (ring->frames[ring->enq].enqueued == frame->n_elts)
-    {
-      cet->frame_ring.enq += 1;
-      cet->frame_ring.enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
-      frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
-    }
+  cryptodev_cache_ring_update_enq_head (ring, frame);
   return;
 
 error_exit:
   return;
 
 error_exit:
-  cryptodev_mark_frame_err_status (frame,
-                                  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
-                                  VNET_CRYPTO_FRAME_STATE_ELT_ERROR);
+  cryptodev_mark_frame_fill_err (frame,
+                                ring->frames[*enq].frame_elts_errs_mask,
+                                ring->frames[*enq].enq_elts_head, max_to_enq,
+                                VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+  ring->frames[*enq].enq_elts_head += max_to_enq;
+  ring->frames[*enq].deq_elts_tail += max_to_enq;
+  cryptodev_cache_ring_update_enq_head (ring, frame);
   cryptodev_reset_ctx (cet);
   cryptodev_reset_ctx (cet);
-  cet->frame_ring.enq += 1;
-  cet->frame_ring.enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
+
   return;
 }
 
   return;
 }
 
@@ -269,15 +266,14 @@ cryptodev_raw_aead_enqueue (vlib_main_t *vm, vnet_crypto_async_frame_t *frame,
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
-  cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->head];
-  cet->frames_on_ring++;
-  ring_elt->f = frame;
-  ring_elt->n_elts = frame->n_elts;
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  ERROR_ASSERT (frame != 0);
+  ERROR_ASSERT (frame->n_elts > 0);
+  cryptodev_cache_ring_elt_t *ring_elt =
+    cryptodev_cache_ring_push (ring, frame);
+
   ring_elt->aad_len = aad_len;
   ring_elt->op_type = (u8) op_type;
   ring_elt->aad_len = aad_len;
   ring_elt->op_type = (u8) op_type;
-  ring->head++;
-  ring->head &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
   return 0;
 }
 
   return 0;
 }
 
@@ -288,7 +284,7 @@ cryptodev_raw_aead_enqueue_internal (vlib_main_t *vm,
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
   vnet_crypto_async_frame_elt_t *fe;
   vlib_buffer_t **b;
   u32 n_elts;
   vnet_crypto_async_frame_elt_t *fe;
   vlib_buffer_t **b;
   u32 n_elts;
@@ -296,7 +292,8 @@ cryptodev_raw_aead_enqueue_internal (vlib_main_t *vm,
   struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
   struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
   u32 last_key_index = ~0;
   struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
   struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
   u32 last_key_index = ~0;
-  u16 left_to_enq = frame->n_elts - ring->frames[ring->enq].enqueued;
+  u16 *const enq = &ring->enq_head;
+  u16 left_to_enq = frame->n_elts - ring->frames[*enq].enq_elts_head;
   u16 max_to_enq = clib_min (CRYPTODE_ENQ_MAX, left_to_enq);
   u8 is_update = 0;
   int status;
   u16 max_to_enq = clib_min (CRYPTODE_ENQ_MAX, left_to_enq);
   u8 is_update = 0;
   int status;
@@ -310,8 +307,8 @@ cryptodev_raw_aead_enqueue_internal (vlib_main_t *vm,
 
   vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
 
 
   vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
 
-  fe = frame->elts + ring->frames[ring->enq].enqueued;
-  b = cet->b + ring->frames[ring->enq].enqueued;
+  fe = frame->elts + ring->frames[*enq].enq_elts_head;
+  b = cet->b + ring->frames[*enq].enq_elts_head;
   cofs.raw = 0;
 
   while (n_elts)
   cofs.raw = 0;
 
   while (n_elts)
@@ -424,7 +421,7 @@ cryptodev_raw_aead_enqueue_internal (vlib_main_t *vm,
       if (PREDICT_FALSE (status < 0))
        goto error_exit;
 
       if (PREDICT_FALSE (status < 0))
        goto error_exit;
 
-      ring->frames[ring->enq].enqueued += 1;
+      ring->frames[*enq].enq_elts_head += 1;
       fe++;
       b++;
       n_elts--;
       fe++;
       b++;
       n_elts--;
@@ -435,24 +432,18 @@ cryptodev_raw_aead_enqueue_internal (vlib_main_t *vm,
     goto error_exit;
 
   cet->inflight += max_to_enq;
     goto error_exit;
 
   cet->inflight += max_to_enq;
-  ring->frames[ring->enq].frame_inflight += max_to_enq;
-  if (ring->frames[ring->enq].enqueued == frame->n_elts)
-    {
-      ring->enq += 1;
-      ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
-      frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
-      cet->enqueued_not_dequeueq++;
-    }
-
+  cryptodev_cache_ring_update_enq_head (ring, frame);
   return;
 
 error_exit:
   return;
 
 error_exit:
-  cryptodev_mark_frame_err_status (frame,
-                                  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
-                                  VNET_CRYPTO_FRAME_STATE_ELT_ERROR);
+  cryptodev_mark_frame_fill_err (frame,
+                                ring->frames[*enq].frame_elts_errs_mask,
+                                ring->frames[*enq].enq_elts_head, max_to_enq,
+                                VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+  ring->frames[*enq].enq_elts_head += max_to_enq;
+  ring->frames[*enq].deq_elts_tail += max_to_enq;
+  cryptodev_cache_ring_update_enq_head (ring, frame);
   cryptodev_reset_ctx (cet);
   cryptodev_reset_ctx (cet);
-  ring->enq += 1;
-  ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
   return;
 }
 
   return;
 }
 
@@ -472,7 +463,8 @@ cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
   vnet_crypto_async_frame_t *frame;
   cryptodev_main_t *cmt = &cryptodev_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
   vnet_crypto_async_frame_t *frame;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  u16 *const deq = &ring->deq_tail;
   u32 n_success;
   u16 n_deq, indice, i, left_to_deq;
   u16 max_to_deq = 0;
   u32 n_success;
   u16 n_deq, indice, i, left_to_deq;
   u16 max_to_deq = 0;
@@ -480,22 +472,22 @@ cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
   u8 dequeue_more = 0;
   int dequeue_status;
 
   u8 dequeue_more = 0;
   int dequeue_status;
 
-  indice = ring->deq;
+  indice = *deq;
 
   for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
     {
 
   for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
     {
-      if (PREDICT_TRUE (ring->frames[indice].frame_inflight > 0))
+      if (PREDICT_TRUE (
+           CRYPTODEV_CACHE_RING_GET_FRAME_ELTS_INFLIGHT (ring, indice) > 0))
        break;
       indice += 1;
        break;
       indice += 1;
-      indice &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
+      indice &= CRYPTODEV_CACHE_QUEUE_MASK;
     }
 
     }
 
-  ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
+  ERROR_ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
 
 
-  ring->deq = indice;
+  *deq = indice;
 
 
-  left_to_deq =
-    ring->frames[ring->deq].f->n_elts - ring->frames[ring->deq].dequeued;
+  left_to_deq = ring->frames[*deq].n_elts - ring->frames[*deq].deq_elts_tail;
   max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
 
   /* you can use deq field to track frame that is currently dequeued */
   max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
 
   /* you can use deq field to track frame that is currently dequeued */
@@ -506,37 +498,39 @@ cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
     cet->ctx, NULL, max_to_deq, cryptodev_post_dequeue, (void **) &frame, 0,
     &n_success, &dequeue_status);
 
     cet->ctx, NULL, max_to_deq, cryptodev_post_dequeue, (void **) &frame, 0,
     &n_success, &dequeue_status);
 
-  if (!n_deq)
+  if (n_deq == 0)
     return dequeue_more;
 
   inflight -= n_deq;
     return dequeue_more;
 
   inflight -= n_deq;
-  ring->frames[ring->deq].dequeued += n_deq;
-  ring->frames[ring->deq].deq_state += n_success;
-  ring->frames[ring->deq].frame_inflight -= n_deq;
+  if (PREDICT_FALSE (n_success < n_deq))
+    {
+      u16 idx = ring->frames[*deq].deq_elts_tail;
 
 
-  if (ring->frames[ring->deq].dequeued == ring->frames[ring->deq].n_elts)
+      for (i = 0; i < n_deq; i++)
+       {
+         if (frame->elts[idx + i].status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+           ring->frames[*deq].frame_elts_errs_mask |= 1 << (idx + i);
+       }
+    }
+  ring->frames[*deq].deq_elts_tail += n_deq;
+
+  if (cryptodev_cache_ring_update_deq_tail (ring, deq))
     {
     {
-      frame->state = ring->frames[ring->deq].deq_state == frame->n_elts ?
-                            VNET_CRYPTO_FRAME_STATE_SUCCESS :
-                            VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
       *nb_elts_processed = frame->n_elts;
       *enqueue_thread_idx = frame->enqueue_thread_index;
       *nb_elts_processed = frame->n_elts;
       *enqueue_thread_idx = frame->enqueue_thread_index;
-      cet->deqeued_not_returned++;
-      cet->enqueued_not_dequeueq--;
-      ring->deq += 1;
-      ring->deq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
       dequeue_more = max_to_deq < CRYPTODE_DEQ_MAX;
     }
 
   int res =
     rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
       dequeue_more = max_to_deq < CRYPTODE_DEQ_MAX;
     }
 
   int res =
     rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
-  ASSERT (res == 0);
+  ERROR_ASSERT (res == 0);
   cet->inflight = inflight;
   return dequeue_more;
 }
 
 static_always_inline void
   cet->inflight = inflight;
   return dequeue_more;
 }
 
 static_always_inline void
-cryptodev_enqueue_frame (vlib_main_t *vm, cryptodev_async_ring_elt *ring_elt)
+cryptodev_enqueue_frame_to_qat (vlib_main_t *vm,
+                               cryptodev_cache_ring_elt_t *ring_elt)
 {
   cryptodev_op_type_t op_type = (cryptodev_op_type_t) ring_elt->op_type;
   u8 linked_or_aad_len = ring_elt->aad_len;
 {
   cryptodev_op_type_t op_type = (cryptodev_op_type_t) ring_elt->op_type;
   u8 linked_or_aad_len = ring_elt->aad_len;
@@ -555,8 +549,8 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
   cryptodev_main_t *cmt = &cryptodev_main;
   vnet_crypto_main_t *cm = &crypto_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
   cryptodev_main_t *cmt = &cryptodev_main;
   vnet_crypto_main_t *cm = &crypto_main;
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
-  cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
-  cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->tail];
+  cryptodev_cache_ring_t *ring = &cet->cache_ring;
+  cryptodev_cache_ring_elt_t *ring_elt = &ring->frames[ring->tail];
   vnet_crypto_async_frame_t *ret_frame = 0;
   u8 dequeue_more = 1;
 
   vnet_crypto_async_frame_t *ret_frame = 0;
   u8 dequeue_more = 1;
 
@@ -566,26 +560,17 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
                                                     enqueue_thread_idx);
     }
 
                                                     enqueue_thread_idx);
     }
 
-  if (PREDICT_TRUE (ring->frames[ring->enq].f != 0))
-    cryptodev_enqueue_frame (vm, &ring->frames[ring->enq]);
+  if (PREDICT_TRUE (ring->frames[ring->enq_head].f != 0))
+    cryptodev_enqueue_frame_to_qat (vm, &ring->frames[ring->enq_head]);
 
   if (PREDICT_TRUE (ring_elt->f != 0))
     {
 
   if (PREDICT_TRUE (ring_elt->f != 0))
     {
-      if ((ring_elt->f->state == VNET_CRYPTO_FRAME_STATE_SUCCESS ||
-          ring_elt->f->state == VNET_CRYPTO_FRAME_STATE_ELT_ERROR) &&
-         ring_elt->enqueued == ring_elt->dequeued)
+      if (ring_elt->enq_elts_head == ring_elt->deq_elts_tail)
        {
          vlib_node_set_interrupt_pending (
            vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
        {
          vlib_node_set_interrupt_pending (
            vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
-         ret_frame = ring_elt->f;
-         ring_elt->f = 0;
-         ring_elt->dequeued = 0;
-         ring_elt->enqueued = 0;
-         ring_elt->deq_state = 0;
-         ring->tail += 1;
-         ring->tail &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
-         cet->frames_on_ring--;
-         cet->deqeued_not_returned--;
+         ret_frame = cryptodev_cache_ring_pop (ring);
+
          return ret_frame;
        }
     }
          return ret_frame;
        }
     }
@@ -684,7 +669,7 @@ cryptodev_register_raw_hdl (vlib_main_t *vm, u32 eidx)
     {
       u32 thread_id = cet - cmt->per_thread_data;
       u32 numa = vlib_get_main_by_index (thread_id)->numa_node;
     {
       u32 thread_id = cet - cmt->per_thread_data;
       u32 numa = vlib_get_main_by_index (thread_id)->numa_node;
-      u8 *name = format (0, "cache_frame_ring_%u_%u", numa, thread_id);
+      u8 *name = format (0, "cache_cache_ring_%u_%u", numa, thread_id);
 
       cet->aad_buf = rte_zmalloc_socket (
        0, CRYPTODEV_NB_CRYPTO_OPS * CRYPTODEV_MAX_AAD_SIZE,
 
       cet->aad_buf = rte_zmalloc_socket (
        0, CRYPTODEV_NB_CRYPTO_OPS * CRYPTODEV_MAX_AAD_SIZE,