2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
35 #include "cryptodev.h"
38 #define always_inline static inline
40 #define always_inline static inline __attribute__ ((__always_inline__))
43 static_always_inline u64
44 compute_ofs_linked_alg (vnet_crypto_async_frame_elt_t *fe, i16 *min_ofs,
47 union rte_crypto_sym_ofs ofs;
48 u32 crypto_end = fe->crypto_start_offset + fe->crypto_total_length;
50 fe->integ_start_offset + fe->crypto_total_length + fe->integ_length_adj;
52 *min_ofs = clib_min (fe->crypto_start_offset, fe->integ_start_offset);
53 *max_end = clib_max (crypto_end, integ_end);
55 ofs.ofs.cipher.head = fe->crypto_start_offset - *min_ofs;
56 ofs.ofs.cipher.tail = *max_end - crypto_end;
57 ofs.ofs.auth.head = fe->integ_start_offset - *min_ofs;
58 ofs.ofs.auth.tail = *max_end - integ_end;
63 static_always_inline int
64 cryptodev_frame_build_sgl (vlib_main_t *vm, enum rte_iova_mode iova_mode,
65 struct rte_crypto_vec *data_vec, u16 *n_seg,
66 vlib_buffer_t *b, u32 size)
68 struct rte_crypto_vec *vec = data_vec + 1;
69 if (vlib_buffer_chain_linearize (vm, b) > CRYPTODEV_MAX_N_SGL)
72 while ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && size)
75 b = vlib_get_buffer (vm, b->next_buffer);
76 len = clib_min (b->current_length, size);
77 vec->base = (void *) vlib_buffer_get_current (b);
78 if (iova_mode == RTE_IOVA_VA)
79 vec->iova = pointer_to_uword (vec->base);
81 vec->iova = vlib_buffer_get_current_pa (vm, b);
94 static_always_inline void
95 cryptodev_reset_ctx (cryptodev_engine_thread_t *cet)
97 union rte_cryptodev_session_ctx sess_ctx;
99 ASSERT (cet->reset_sess != 0);
101 sess_ctx.crypto_sess = cet->reset_sess;
103 rte_cryptodev_configure_raw_dp_ctx (cet->cryptodev_id, cet->cryptodev_q,
104 cet->ctx, RTE_CRYPTO_OP_WITH_SESSION,
108 static_always_inline int
109 cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
110 vnet_crypto_async_frame_t *frame,
111 cryptodev_op_type_t op_type)
113 cryptodev_main_t *cmt = &cryptodev_main;
114 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
115 vnet_crypto_async_frame_elt_t *fe;
117 struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
118 struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
120 u32 last_key_index = ~0;
126 n_elts = frame->n_elts;
128 if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
130 cryptodev_mark_frame_err_status (frame,
131 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
135 vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
142 union rte_crypto_sym_ofs cofs;
147 clib_prefetch_load (&fe[1]);
148 clib_prefetch_load (&fe[2]);
149 vlib_prefetch_buffer_header (b[1], LOAD);
150 vlib_prefetch_buffer_header (b[2], LOAD);
153 if (PREDICT_FALSE (last_key_index != fe->key_index))
155 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
156 union rte_cryptodev_session_ctx sess_ctx;
158 if (PREDICT_FALSE (key->keys[vm->numa_node][op_type] == 0))
160 status = cryptodev_session_create (vm, fe->key_index, 0);
161 if (PREDICT_FALSE (status < 0))
165 /* Borrow a created session to reset session ctx, based on a valid
166 * assumption that the session reset won't happen until first valid
167 * packet is processed */
168 if (PREDICT_FALSE (cet->reset_sess == 0))
169 cet->reset_sess = key->keys[vm->numa_node][op_type];
171 sess_ctx.crypto_sess = key->keys[vm->numa_node][op_type];
173 status = rte_cryptodev_configure_raw_dp_ctx (
174 cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
175 RTE_CRYPTO_OP_WITH_SESSION, sess_ctx, is_update);
176 if (PREDICT_FALSE (status < 0))
179 last_key_index = fe->key_index;
183 cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
185 vec->len = max_end - min_ofs;
186 if (cmt->iova_mode == RTE_IOVA_VA)
188 vec[0].base = (void *) (b[0]->data + min_ofs);
189 vec[0].iova = pointer_to_uword (b[0]->data) + min_ofs;
190 iv_vec.va = (void *) fe->iv;
191 iv_vec.iova = pointer_to_uword (fe->iv);
192 digest_vec.va = (void *) fe->tag;
193 digest_vec.iova = pointer_to_uword (fe->tag);
197 vec[0].base = (void *) (b[0]->data + min_ofs);
198 vec[0].iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
199 iv_vec.va = (void *) fe->iv;
200 iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
201 digest_vec.va = (void *) fe->tag;
202 digest_vec.iova = vlib_physmem_get_pa (vm, fe->digest);
205 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
207 vec[0].len = b[0]->current_data + b[0]->current_length - min_ofs;
208 if (cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
209 max_end - min_ofs - vec->len) < 0)
213 status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
214 &digest_vec, 0, (void *) frame);
215 if (PREDICT_FALSE (status < 0))
223 status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
224 if (PREDICT_FALSE (status < 0))
226 cryptodev_reset_ctx (cet);
230 cet->inflight += frame->n_elts;
234 cryptodev_mark_frame_err_status (frame,
235 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
236 cryptodev_reset_ctx (cet);
240 static_always_inline int
241 cryptodev_raw_aead_enqueue (vlib_main_t *vm, vnet_crypto_async_frame_t *frame,
242 cryptodev_op_type_t op_type, u8 aad_len)
244 cryptodev_main_t *cmt = &cryptodev_main;
245 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
246 vnet_crypto_async_frame_elt_t *fe;
249 union rte_crypto_sym_ofs cofs;
250 struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
251 struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
252 u32 last_key_index = ~0;
256 n_elts = frame->n_elts;
258 if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
260 cryptodev_mark_frame_err_status (frame,
261 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
265 vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
273 u32 aad_offset = ((cet->aad_index++) & CRYPTODEV_AAD_MASK) << 4;
278 clib_prefetch_load (&fe[1]);
279 vlib_prefetch_buffer_header (b[1], LOAD);
282 if (PREDICT_FALSE (last_key_index != fe->key_index))
284 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
285 union rte_cryptodev_session_ctx sess_ctx;
287 if (PREDICT_FALSE (key->keys[vm->numa_node][op_type] == 0))
289 status = cryptodev_session_create (vm, fe->key_index, aad_len);
290 if (PREDICT_FALSE (status < 0))
295 (u8) key->keys[vm->numa_node][op_type]->opaque_data !=
298 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
299 fe->key_index, aad_len);
300 status = cryptodev_session_create (vm, fe->key_index, aad_len);
301 if (PREDICT_FALSE (status < 0))
305 /* Borrow a created session to reset session ctx, based on a valid
306 * assumption that the session reset won't happen until first valid
307 * packet is processed */
309 if (PREDICT_FALSE (cet->reset_sess == 0))
310 cet->reset_sess = key->keys[vm->numa_node][op_type];
312 sess_ctx.crypto_sess = key->keys[vm->numa_node][op_type];
314 status = rte_cryptodev_configure_raw_dp_ctx (
315 cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
316 RTE_CRYPTO_OP_WITH_SESSION, sess_ctx, is_update);
317 if (PREDICT_FALSE (status < 0))
320 last_key_index = fe->key_index;
324 if (cmt->iova_mode == RTE_IOVA_VA)
326 vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
327 vec[0].iova = pointer_to_uword (vec[0].base);
328 vec[0].len = fe->crypto_total_length;
329 iv_vec.va = (void *) fe->iv;
330 iv_vec.iova = pointer_to_uword (fe->iv);
331 digest_vec.va = (void *) fe->tag;
332 digest_vec.iova = pointer_to_uword (fe->tag);
333 aad_vec.va = (void *) (cet->aad_buf + aad_offset);
334 aad_vec.iova = cet->aad_phy_addr + aad_offset;
338 vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
340 vlib_buffer_get_pa (vm, b[0]) + fe->crypto_start_offset;
341 vec[0].len = fe->crypto_total_length;
342 iv_vec.va = (void *) fe->iv;
343 iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
344 aad_vec.va = (void *) (cet->aad_buf + aad_offset);
345 aad_vec.iova = cet->aad_phy_addr + aad_offset;
346 digest_vec.va = (void *) fe->tag;
347 digest_vec.iova = vlib_physmem_get_pa (vm, fe->tag);
351 *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
355 *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
356 *(u32 *) (cet->aad_buf + aad_offset + 8) = *(u32 *) (fe->aad + 8);
359 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
361 vec[0].len = b[0]->current_data + b[0]->current_length -
362 fe->crypto_start_offset;
364 cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
365 fe->crypto_total_length - vec[0].len);
371 rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
372 &digest_vec, &aad_vec, (void *) frame);
373 if (PREDICT_FALSE (status < 0))
381 status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
382 if (PREDICT_FALSE (status < 0))
385 cet->inflight += frame->n_elts;
390 cryptodev_mark_frame_err_status (frame,
391 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
392 cryptodev_reset_ctx (cet);
396 static_always_inline u32
397 cryptodev_get_frame_n_elts (void *frame)
399 vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
403 static_always_inline void
404 cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success)
406 vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
408 f->elts[index].status = is_op_success ? VNET_CRYPTO_OP_STATUS_COMPLETED :
409 VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
412 #define GET_RING_OBJ(r, pos, f) \
415 vnet_crypto_async_frame_t **ring = (void *) &r[1]; \
416 f = ring[(r->cons.head + pos) & r->mask]; \
420 static_always_inline vnet_crypto_async_frame_t *
421 cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
422 u32 *enqueue_thread_idx)
424 cryptodev_main_t *cmt = &cryptodev_main;
425 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
426 vnet_crypto_async_frame_t *frame, *frame_ret = 0;
427 u32 n_deq, n_success;
428 u32 n_cached_frame = rte_ring_count (cet->cached_frame), n_room_left;
429 u8 no_job_to_deq = 0;
430 u16 inflight = cet->inflight;
433 n_room_left = CRYPTODEV_DEQ_CACHE_SZ - n_cached_frame - 1;
438 for (i = 0; i < n_cached_frame; i++)
440 vnet_crypto_async_frame_t *f;
442 enum rte_crypto_op_status op_status;
445 GET_RING_OBJ (cet->cached_frame, i, f);
447 if (i < n_cached_frame - 2)
449 vnet_crypto_async_frame_t *f1, *f2;
450 GET_RING_OBJ (cet->cached_frame, i + 1, f1);
451 GET_RING_OBJ (cet->cached_frame, i + 2, f2);
452 clib_prefetch_load (f1);
453 clib_prefetch_load (f2);
456 n_left = f->state & 0x7f;
457 err = f->state & 0x80;
459 for (j = f->n_elts - n_left; j < f->n_elts && inflight; j++)
462 f_ret = rte_cryptodev_raw_dequeue (cet->ctx, &ret, &op_status);
469 case RTE_CRYPTO_OP_STATUS_SUCCESS:
470 f->elts[j].status = VNET_CRYPTO_OP_STATUS_COMPLETED;
473 f->elts[j].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
485 f->state = err ? VNET_CRYPTO_FRAME_STATE_ELT_ERROR :
486 VNET_CRYPTO_FRAME_STATE_SUCCESS;
490 f->state = f->n_elts - j;
497 /* to here f is not completed dequeued and no more job can be
500 f->state = f->n_elts - j;
508 rte_ring_sc_dequeue (cet->cached_frame, (void **) &frame_ret);
513 /* no point to dequeue further */
514 if (!inflight || no_job_to_deq || !n_room_left)
517 #if RTE_VERSION >= RTE_VERSION_NUM(21, 5, 0, 0)
518 n_deq = rte_cryptodev_raw_dequeue_burst (
519 cet->ctx, cryptodev_get_frame_n_elts, 0, cryptodev_post_dequeue,
520 (void **) &frame, 0, &n_success, &dequeue_status);
522 n_deq = rte_cryptodev_raw_dequeue_burst (
523 cet->ctx, cryptodev_get_frame_n_elts, cryptodev_post_dequeue,
524 (void **) &frame, 0, &n_success, &dequeue_status);
531 no_job_to_deq = n_deq < frame->n_elts;
532 /* we have to cache the frame */
533 if (frame_ret || n_cached_frame || no_job_to_deq)
535 frame->state = frame->n_elts - n_deq;
536 frame->state |= ((n_success < n_deq) << 7);
537 rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
542 frame->state = n_success == frame->n_elts ?
543 VNET_CRYPTO_FRAME_STATE_SUCCESS :
544 VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
548 /* see if we can dequeue more */
549 while (inflight && n_room_left && !no_job_to_deq)
551 #if RTE_VERSION >= RTE_VERSION_NUM(21, 5, 0, 0)
552 n_deq = rte_cryptodev_raw_dequeue_burst (
553 cet->ctx, cryptodev_get_frame_n_elts, 0, cryptodev_post_dequeue,
554 (void **) &frame, 0, &n_success, &dequeue_status);
556 n_deq = rte_cryptodev_raw_dequeue_burst (
557 cet->ctx, cryptodev_get_frame_n_elts, cryptodev_post_dequeue,
558 (void **) &frame, 0, &n_success, &dequeue_status);
563 no_job_to_deq = n_deq < frame->n_elts;
564 frame->state = frame->n_elts - n_deq;
565 frame->state |= ((n_success < n_deq) << 7);
566 rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
571 if (inflight < cet->inflight)
574 rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
576 cet->inflight = inflight;
581 *nb_elts_processed = frame_ret->n_elts;
582 *enqueue_thread_idx = frame_ret->enqueue_thread_index;
588 static_always_inline int
589 cryptodev_raw_enq_aead_aad_8_enc (vlib_main_t *vm,
590 vnet_crypto_async_frame_t *frame)
592 return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT, 8);
594 static_always_inline int
595 cryptodev_raw_enq_aead_aad_12_enc (vlib_main_t *vm,
596 vnet_crypto_async_frame_t *frame)
598 return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT, 12);
601 static_always_inline int
602 cryptodev_raw_enq_aead_aad_8_dec (vlib_main_t *vm,
603 vnet_crypto_async_frame_t *frame)
605 return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT, 8);
607 static_always_inline int
608 cryptodev_raw_enq_aead_aad_12_dec (vlib_main_t *vm,
609 vnet_crypto_async_frame_t *frame)
611 return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT, 12);
614 static_always_inline int
615 cryptodev_raw_enq_linked_alg_enc (vlib_main_t *vm,
616 vnet_crypto_async_frame_t *frame)
618 return cryptodev_frame_linked_algs_enqueue (vm, frame,
619 CRYPTODEV_OP_TYPE_ENCRYPT);
622 static_always_inline int
623 cryptodev_raw_enq_linked_alg_dec (vlib_main_t *vm,
624 vnet_crypto_async_frame_t *frame)
626 return cryptodev_frame_linked_algs_enqueue (vm, frame,
627 CRYPTODEV_OP_TYPE_DECRYPT);
631 cryptodev_register_raw_hdl (vlib_main_t *vm, u32 eidx)
633 cryptodev_main_t *cmt = &cryptodev_main;
634 cryptodev_engine_thread_t *cet;
635 cryptodev_inst_t *cinst;
636 struct rte_cryptodev_info info;
637 struct rte_cryptodev_sym_capability_idx cap_auth_idx;
638 struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
639 struct rte_cryptodev_sym_capability_idx cap_aead_idx;
640 u32 support_raw_api = 1, max_ctx_size = 0;
641 clib_error_t *error = 0;
643 vec_foreach (cinst, cmt->cryptodev_inst)
646 rte_cryptodev_info_get (cinst->dev_id, &info);
647 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))
653 ctx_size = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
654 max_ctx_size = clib_max (ctx_size, max_ctx_size);
657 if (!support_raw_api)
658 return cryptodev_register_cop_hdl (vm, eidx);
660 vec_foreach (cet, cmt->per_thread_data)
662 u32 thread_id = cet - cmt->per_thread_data;
663 u32 numa = vlib_get_main_by_index (thread_id)->numa_node;
664 u8 *name = format (0, "cache_frame_ring_%u_%u", numa, thread_id);
667 rte_ring_create ((char *) name, CRYPTODEV_DEQ_CACHE_SZ, numa,
668 RING_F_SC_DEQ | RING_F_SP_ENQ);
670 cet->aad_buf = rte_zmalloc_socket (
671 0, CRYPTODEV_NB_CRYPTO_OPS * CRYPTODEV_MAX_AAD_SIZE,
672 CLIB_CACHE_LINE_BYTES, numa);
673 if (cet->aad_buf == 0)
675 error = clib_error_return (0, "Failed to alloc aad buf");
678 cet->aad_phy_addr = rte_malloc_virt2iova (cet->aad_buf);
681 rte_zmalloc_socket (0, max_ctx_size, CLIB_CACHE_LINE_BYTES, numa);
684 error = clib_error_return (0, "Failed to alloc raw dp ctx");
688 if (cet->cached_frame == 0)
690 error = clib_error_return (0, "Failed to alloc frame ring %s", name);
698 #define _(a, b, c, d, e, f, g) \
699 cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
700 cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
701 if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
703 vnet_crypto_register_async_handler ( \
704 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
705 cryptodev_raw_enq_aead_aad_##f##_enc, cryptodev_raw_dequeue); \
706 vnet_crypto_register_async_handler ( \
707 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
708 cryptodev_raw_enq_aead_aad_##f##_dec, cryptodev_raw_dequeue); \
710 foreach_vnet_aead_crypto_conversion
713 #define _(a, b, c, d, e) \
714 cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
715 cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC; \
716 cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
717 cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
718 if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
719 cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
721 vnet_crypto_register_async_handler ( \
722 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
723 cryptodev_raw_enq_linked_alg_enc, cryptodev_raw_dequeue); \
724 vnet_crypto_register_async_handler ( \
725 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
726 cryptodev_raw_enq_linked_alg_dec, cryptodev_raw_dequeue); \
728 foreach_cryptodev_link_async_alg
736 vec_foreach (cet, cmt->per_thread_data)
738 if (cet->cached_frame)
739 rte_ring_free (cet->cached_frame);