cryptodev_cache_ring_elt_t *ring_elt =
cryptodev_cache_ring_push (ring, frame);
+ if (PREDICT_FALSE (ring_elt == NULL))
+ return -1;
+
ring_elt->aad_len = 1;
ring_elt->op_type = (u8) op_type;
return 0;
ERROR_ASSERT (frame->n_elts > 0);
cryptodev_cache_ring_elt_t *ring_elt =
cryptodev_cache_ring_push (ring, frame);
+
+ if (PREDICT_FALSE (ring_elt == NULL))
+ return -1;
+
ring_elt->aad_len = aad_len;
ring_elt->op_type = (u8) op_type;
return 0;
}
static_always_inline u8
-cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
- u32 *enqueue_thread_idx)
+cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *enqueue_thread_idx)
{
cryptodev_main_t *cmt = &cryptodev_main;
cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
vnet_crypto_async_frame_t *frame = NULL;
cryptodev_cache_ring_t *ring = &cet->cache_ring;
u16 *const deq = &ring->deq_tail;
- u16 n_deq, idx, left_to_deq, i;
+ u16 n_deq, left_to_deq;
u16 max_to_deq = 0;
u16 inflight = cet->inflight;
u8 dequeue_more = 0;
u32 n_elts, n;
u64 err0 = 0, err1 = 0, err2 = 0, err3 = 0; /* partial errors mask */
- idx = ring->deq_tail;
-
- for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
- {
- u32 frame_inflight =
- CRYPTODEV_CACHE_RING_GET_FRAME_ELTS_INFLIGHT (ring, idx);
-
- if (PREDICT_TRUE (frame_inflight > 0))
- break;
- idx++;
- idx &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
- }
-
- ERROR_ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
- ring->deq_tail = idx;
-
left_to_deq =
ring->frames[*deq].f->n_elts - ring->frames[*deq].deq_elts_tail;
max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
/* deq field can be used to track frame that is currently dequeued
based on that you can specify the amount of elements to deq for the frame */
-
n_deq =
rte_cryptodev_dequeue_burst (cet->cryptodev_id, cet->cryptodev_q,
(struct rte_crypto_op **) cops, max_to_deq);
while (n_elts)
{
fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
- err0 |= (fe[0].status == VNET_CRYPTO_OP_STATUS_COMPLETED) << n;
+ err0 |= ((u64) (fe[0].status == VNET_CRYPTO_OP_STATUS_COMPLETED)) << n;
n++;
fe++;
cop++;
ring->frames[*deq].deq_elts_tail += n_deq;
if (cryptodev_cache_ring_update_deq_tail (ring, deq))
{
- *nb_elts_processed = frame->n_elts;
+ u32 fr_processed =
+ (CRYPTODEV_CACHE_QUEUE_SIZE - ring->tail + ring->deq_tail) &
+ CRYPTODEV_CACHE_QUEUE_MASK;
+
*enqueue_thread_idx = frame->enqueue_thread_index;
- dequeue_more = (max_to_deq < CRYPTODE_DEQ_MAX);
+ dequeue_more = (fr_processed < CRYPTODEV_MAX_PROCESED_IN_CACHE_QUEUE);
}
cet->inflight = inflight;
while (cet->inflight > 0 && dequeue_more)
{
- dequeue_more = cryptodev_frame_dequeue_internal (vm, nb_elts_processed,
- enqueue_thread_idx);
+ dequeue_more = cryptodev_frame_dequeue_internal (vm, enqueue_thread_idx);
}
if (PREDICT_TRUE (ring->frames[ring->enq_head].f != 0))
if (PREDICT_TRUE (ring_elt->f != 0))
{
- if (ring_elt->enq_elts_head == ring_elt->deq_elts_tail)
+ if (ring_elt->n_elts == ring_elt->deq_elts_tail)
{
+ *nb_elts_processed = ring_elt->n_elts;
vlib_node_set_interrupt_pending (
vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
ret_frame = cryptodev_cache_ring_pop (ring);