3 *------------------------------------------------------------------
4 * Copyright (c) 2019 - 2021 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vnet/crypto/crypto.h>
22 #include <dpdk/buffer.h>
23 #include <dpdk/device/dpdk.h>
24 #include <dpdk/device/dpdk_priv.h>
26 #include <rte_bus_vdev.h>
27 #include <rte_cryptodev.h>
28 #include <rte_crypto_sym.h>
29 #include <rte_crypto.h>
30 #include <rte_ring_peek_zc.h>
31 #include <rte_config.h>
33 #include "cryptodev.h"
36 #define always_inline static inline
38 #define always_inline static inline __attribute__ ((__always_inline__))
41 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
43 #define foreach_vnet_crypto_status_conversion \
44 _ (SUCCESS, COMPLETED) \
45 _ (NOT_PROCESSED, WORK_IN_PROGRESS) \
46 _ (AUTH_FAILED, FAIL_BAD_HMAC) \
47 _ (INVALID_SESSION, FAIL_ENGINE_ERR) \
48 _ (INVALID_ARGS, FAIL_ENGINE_ERR) \
49 _ (ERROR, FAIL_ENGINE_ERR)
51 static const vnet_crypto_op_status_t cryptodev_status_conversion[] = {
52 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
53 foreach_vnet_crypto_status_conversion
57 static_always_inline rte_iova_t
58 cryptodev_get_iova (clib_pmalloc_main_t *pm, enum rte_iova_mode mode,
62 if (mode == RTE_IOVA_VA)
63 return (rte_iova_t) pointer_to_uword (data);
65 index = clib_pmalloc_get_page_index (pm, data);
66 return pointer_to_uword (data) - pm->lookup_table[index];
69 static_always_inline void
70 cryptodev_validate_mbuf (struct rte_mbuf *mb, vlib_buffer_t *b)
72 /* on vnet side vlib_buffer current_length is updated by cipher padding and
73 * icv_sh. mbuf needs to be sync with these changes */
74 u16 data_len = b->current_length +
75 (b->data + b->current_data - rte_pktmbuf_mtod (mb, u8 *));
77 /* for input nodes that are not dpdk-input, it is possible the mbuf
78 * was updated before as one of the chained mbufs. Setting nb_segs
79 * to 1 here to prevent the cryptodev PMD to access potentially
80 * invalid m_src->next pointers.
83 mb->pkt_len = mb->data_len = data_len;
86 static_always_inline void
87 cryptodev_validate_mbuf_chain (vlib_main_t *vm, struct rte_mbuf *mb,
90 struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */
91 /* when input node is not dpdk, mbuf data len is not initialized, for
92 * single buffer it is not a problem since the data length is written
93 * into cryptodev operation. For chained buffer a reference data length
94 * has to be computed through vlib_buffer.
96 * even when input node is dpdk, it is possible chained vlib_buffers
97 * are updated (either added or removed a buffer) but not not mbuf fields.
98 * we have to re-link every mbuf in the chain.
100 u16 data_len = b->current_length +
101 (b->data + b->current_data - rte_pktmbuf_mtod (mb, u8 *));
103 first_mb->nb_segs = 1;
104 first_mb->pkt_len = first_mb->data_len = data_len;
106 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
108 b = vlib_get_buffer (vm, b->next_buffer);
109 mb = rte_mbuf_from_vlib_buffer (b);
110 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
111 rte_pktmbuf_reset (mb);
114 mb->data_len = b->current_length;
115 mb->pkt_len = b->current_length;
116 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
118 if (PREDICT_FALSE (b->ref_count > 1))
120 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
124 static_always_inline void
125 crypto_op_init (struct rte_mempool *mempool,
126 void *_arg __attribute__ ((unused)), void *_obj,
127 unsigned i __attribute__ ((unused)))
129 struct rte_crypto_op *op = _obj;
131 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
132 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
133 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
134 op->phys_addr = rte_mempool_virt2iova (_obj);
135 op->mempool = mempool;
138 static_always_inline int
139 cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
140 vnet_crypto_async_frame_t *frame,
141 cryptodev_op_type_t op_type)
143 cryptodev_main_t *cmt = &cryptodev_main;
144 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
145 cryptodev_cache_ring_t *ring = &cet->cache_ring;
146 ERROR_ASSERT (frame != 0);
147 ERROR_ASSERT (frame->n_elts > 0);
148 cryptodev_cache_ring_elt_t *ring_elt =
149 cryptodev_cache_ring_push (ring, frame);
151 ring_elt->aad_len = 1;
152 ring_elt->op_type = (u8) op_type;
156 static_always_inline void
157 cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
158 vnet_crypto_async_frame_t *frame,
159 cryptodev_op_type_t op_type)
161 cryptodev_main_t *cmt = &cryptodev_main;
162 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
163 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
164 cryptodev_cache_ring_t *ring = &cet->cache_ring;
165 u16 *const enq = &ring->enq_head;
166 vnet_crypto_async_frame_elt_t *fe;
167 cryptodev_session_t *sess = 0;
168 cryptodev_op_t *cops[CRYPTODE_ENQ_MAX] = {};
169 cryptodev_op_t **cop = cops;
171 u32 n_enqueue, n_elts;
172 u32 last_key_index = ~0;
175 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
178 max_to_enq = clib_min (CRYPTODE_ENQ_MAX,
179 frame->n_elts - ring->frames[*enq].enq_elts_head);
181 if (cet->inflight + max_to_enq > CRYPTODEV_MAX_INFLIGHT)
187 rte_mempool_get_bulk (cet->cop_pool, (void **) cops, n_elts) < 0))
189 cryptodev_mark_frame_fill_err (
190 frame, ring->frames[*enq].frame_elts_errs_mask,
191 ring->frames[*enq].enq_elts_head, max_to_enq,
192 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
193 ring->frames[*enq].enq_elts_head += max_to_enq;
194 ring->frames[*enq].deq_elts_tail += max_to_enq;
195 cryptodev_cache_ring_update_enq_head (ring, frame);
199 fe = frame->elts + ring->frames[*enq].enq_elts_head;
200 bi = frame->buffer_indices + ring->frames[*enq].enq_elts_head;
204 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
205 struct rte_crypto_sym_op *sop = &cop[0]->sop;
206 i16 crypto_offset = fe->crypto_start_offset;
207 i16 integ_offset = fe->integ_start_offset;
208 u32 offset_diff = crypto_offset - integ_offset;
212 CLIB_PREFETCH (cop[1], sizeof (*cop[1]), STORE);
213 CLIB_PREFETCH (cop[2], sizeof (*cop[2]), STORE);
214 clib_prefetch_load (&fe[1]);
215 clib_prefetch_load (&fe[2]);
217 if (last_key_index != fe->key_index)
219 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
220 last_key_index = fe->key_index;
222 if (key->keys[vm->numa_node][op_type] == 0)
225 cryptodev_session_create (vm, last_key_index, 0) < 0))
227 cryptodev_mark_frame_fill_err (
228 frame, ring->frames[*enq].frame_elts_errs_mask,
229 ring->frames[*enq].enq_elts_head, max_to_enq,
230 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
234 sess = key->keys[vm->numa_node][op_type];
237 sop->m_src = rte_mbuf_from_vlib_buffer (b);
238 sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
240 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
241 * so we have to manually adjust mbuf data_off here so cryptodev can
242 * correctly compute the data pointer. The prepend here will be later
243 * rewritten by tx. */
244 if (PREDICT_TRUE (fe->integ_start_offset < 0))
246 sop->m_src->data_off += fe->integ_start_offset;
248 crypto_offset = offset_diff;
251 sop->cipher.data.offset = crypto_offset;
252 sop->cipher.data.length = fe->crypto_total_length;
253 sop->auth.data.offset = integ_offset;
254 sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj;
255 sop->auth.digest.data = fe->digest;
256 sop->auth.digest.phys_addr =
257 cryptodev_get_iova (pm, cmt->iova_mode, fe->digest);
258 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
259 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
261 cryptodev_validate_mbuf (sop->m_src, b);
263 clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
264 ring->frames[*enq].enq_elts_head++;
272 rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
273 (struct rte_crypto_op **) cops, max_to_enq);
274 ERROR_ASSERT (n_enqueue == max_to_enq);
275 cet->inflight += max_to_enq;
276 cryptodev_cache_ring_update_enq_head (ring, frame);
280 ring->frames[*enq].enq_elts_head += max_to_enq;
281 ring->frames[*enq].deq_elts_tail += max_to_enq;
282 cryptodev_cache_ring_update_enq_head (ring, frame);
283 rte_mempool_put_bulk (cet->cop_pool, (void **) cops, max_to_enq);
286 static_always_inline int
287 cryptodev_frame_aead_enqueue (vlib_main_t *vm,
288 vnet_crypto_async_frame_t *frame,
289 cryptodev_op_type_t op_type, u8 aad_len)
291 cryptodev_main_t *cmt = &cryptodev_main;
292 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
293 cryptodev_cache_ring_t *ring = &cet->cache_ring;
294 ERROR_ASSERT (frame != 0);
295 ERROR_ASSERT (frame->n_elts > 0);
296 cryptodev_cache_ring_elt_t *ring_elt =
297 cryptodev_cache_ring_push (ring, frame);
298 ring_elt->aad_len = aad_len;
299 ring_elt->op_type = (u8) op_type;
303 static_always_inline int
304 cryptodev_aead_enqueue_internal (vlib_main_t *vm,
305 vnet_crypto_async_frame_t *frame,
306 cryptodev_op_type_t op_type, u8 aad_len)
308 cryptodev_main_t *cmt = &cryptodev_main;
309 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
310 cryptodev_cache_ring_t *ring = &cet->cache_ring;
311 u16 *const enq = &ring->enq_head;
312 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
313 vnet_crypto_async_frame_elt_t *fe;
314 cryptodev_session_t *sess = 0;
315 cryptodev_op_t *cops[CRYPTODE_ENQ_MAX] = {};
316 cryptodev_op_t **cop = cops;
318 u32 n_enqueue = 0, n_elts;
319 u32 last_key_index = ~0;
320 u16 left_to_enq = frame->n_elts - ring->frames[*enq].enq_elts_head;
321 const u16 max_to_enq = clib_min (CRYPTODE_ENQ_MAX, left_to_enq);
323 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
326 if (cet->inflight + max_to_enq > CRYPTODEV_MAX_INFLIGHT)
332 rte_mempool_get_bulk (cet->cop_pool, (void **) cops, n_elts) < 0))
334 cryptodev_mark_frame_fill_err (
335 frame, ring->frames[*enq].frame_elts_errs_mask,
336 ring->frames[*enq].enq_elts_head, max_to_enq,
337 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
338 ring->frames[*enq].enq_elts_head += max_to_enq;
339 ring->frames[*enq].deq_elts_tail += max_to_enq;
340 cryptodev_cache_ring_update_enq_head (ring, frame);
344 fe = frame->elts + ring->frames[*enq].enq_elts_head;
345 bi = frame->buffer_indices + ring->frames[*enq].enq_elts_head;
349 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
350 struct rte_crypto_sym_op *sop = &cop[0]->sop;
351 u16 crypto_offset = fe->crypto_start_offset;
355 CLIB_PREFETCH (cop[1], sizeof (*cop[1]), STORE);
356 CLIB_PREFETCH (cop[2], sizeof (*cop[2]), STORE);
357 clib_prefetch_load (&fe[1]);
358 clib_prefetch_load (&fe[2]);
360 if (last_key_index != fe->key_index)
362 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
364 last_key_index = fe->key_index;
365 if (key->keys[vm->numa_node][op_type] == 0)
367 if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
370 cryptodev_mark_frame_fill_err (
371 frame, ring->frames[*enq].frame_elts_errs_mask,
372 ring->frames[*enq].enq_elts_head, max_to_enq,
373 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
377 else if (PREDICT_FALSE (
378 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
379 rte_cryptodev_sym_session_opaque_data_get (
380 key->keys[vm->numa_node][op_type]) != (u64) aad_len
382 key->keys[vm->numa_node][op_type]->opaque_data != aad_len
386 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
387 fe->key_index, aad_len);
388 if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
391 cryptodev_mark_frame_fill_err (
392 frame, ring->frames[*enq].frame_elts_errs_mask,
393 ring->frames[*enq].enq_elts_head, max_to_enq,
394 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
399 sess = key->keys[vm->numa_node][op_type];
402 sop->m_src = rte_mbuf_from_vlib_buffer (b);
403 sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
405 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
406 * so we have to manually adjust mbuf data_off here so cryptodev can
407 * correctly compute the data pointer. The prepend here will be later
408 * rewritten by tx. */
409 if (PREDICT_FALSE (fe->crypto_start_offset < 0))
411 rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset);
416 sop->aead.aad.data = cop[0]->aad;
417 sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
418 sop->aead.data.length = fe->crypto_total_length;
419 sop->aead.data.offset = crypto_offset;
420 sop->aead.digest.data = fe->tag;
421 sop->aead.digest.phys_addr =
422 cryptodev_get_iova (pm, cmt->iova_mode, fe->tag);
423 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
424 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
426 cryptodev_validate_mbuf (sop->m_src, b);
428 clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
429 clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
438 rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
439 (struct rte_crypto_op **) cops, max_to_enq);
440 ERROR_ASSERT (n_enqueue == max_to_enq);
441 cet->inflight += max_to_enq;
442 ring->frames[*enq].enq_elts_head += max_to_enq;
443 cryptodev_cache_ring_update_enq_head (ring, frame);
448 ring->frames[*enq].enq_elts_head += max_to_enq;
449 ring->frames[*enq].deq_elts_tail += max_to_enq;
450 cryptodev_cache_ring_update_enq_head (ring, frame);
451 rte_mempool_put_bulk (cet->cop_pool, (void **) cops, max_to_enq);
456 static_always_inline u8
457 cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
458 u32 *enqueue_thread_idx)
460 cryptodev_main_t *cmt = &cryptodev_main;
461 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
462 vnet_crypto_async_frame_t *frame = NULL;
463 cryptodev_cache_ring_t *ring = &cet->cache_ring;
464 u16 *const deq = &ring->deq_tail;
465 u16 n_deq, idx, left_to_deq, i;
467 u16 inflight = cet->inflight;
469 cryptodev_op_t *cops[CRYPTODE_DEQ_MAX] = {};
470 cryptodev_op_t **cop = cops;
471 vnet_crypto_async_frame_elt_t *fe;
473 u64 err0 = 0, err1 = 0, err2 = 0, err3 = 0; /* partial errors mask */
475 idx = ring->deq_tail;
477 for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
480 CRYPTODEV_CACHE_RING_GET_FRAME_ELTS_INFLIGHT (ring, idx);
482 if (PREDICT_TRUE (frame_inflight > 0))
485 idx &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
488 ERROR_ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
489 ring->deq_tail = idx;
492 ring->frames[*deq].f->n_elts - ring->frames[*deq].deq_elts_tail;
493 max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
495 /* deq field can be used to track frame that is currently dequeued
496 based on that you can specify the amount of elements to deq for the frame */
499 rte_cryptodev_dequeue_burst (cet->cryptodev_id, cet->cryptodev_q,
500 (struct rte_crypto_op **) cops, max_to_deq);
505 frame = ring->frames[*deq].f;
506 fe = frame->elts + ring->frames[*deq].deq_elts_tail;
509 n = ring->frames[*deq].deq_elts_tail;
513 fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
514 fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
515 fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
516 fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
518 err0 |= ((u64) (fe[0].status == VNET_CRYPTO_OP_STATUS_COMPLETED)) << n;
519 err1 |= ((u64) (fe[1].status == VNET_CRYPTO_OP_STATUS_COMPLETED))
521 err2 |= ((u64) (fe[2].status == VNET_CRYPTO_OP_STATUS_COMPLETED))
523 err3 |= ((u64) (fe[3].status == VNET_CRYPTO_OP_STATUS_COMPLETED))
534 fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
535 err0 |= (fe[0].status == VNET_CRYPTO_OP_STATUS_COMPLETED) << n;
542 ring->frames[*deq].frame_elts_errs_mask |= (err0 | err1 | err2 | err3);
544 rte_mempool_put_bulk (cet->cop_pool, (void **) cops, n_deq);
547 ring->frames[*deq].deq_elts_tail += n_deq;
548 if (cryptodev_cache_ring_update_deq_tail (ring, deq))
550 *nb_elts_processed = frame->n_elts;
551 *enqueue_thread_idx = frame->enqueue_thread_index;
552 dequeue_more = (max_to_deq < CRYPTODE_DEQ_MAX);
555 cet->inflight = inflight;
559 static_always_inline void
560 cryptodev_enqueue_frame (vlib_main_t *vm, cryptodev_cache_ring_elt_t *ring_elt)
562 cryptodev_op_type_t op_type = (cryptodev_op_type_t) ring_elt->op_type;
563 u8 linked_or_aad_len = ring_elt->aad_len;
565 if (linked_or_aad_len == 1)
566 cryptodev_frame_linked_algs_enqueue_internal (vm, ring_elt->f, op_type);
568 cryptodev_aead_enqueue_internal (vm, ring_elt->f, op_type,
572 static_always_inline vnet_crypto_async_frame_t *
573 cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
574 u32 *enqueue_thread_idx)
576 cryptodev_main_t *cmt = &cryptodev_main;
577 vnet_crypto_main_t *cm = &crypto_main;
578 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
579 cryptodev_cache_ring_t *ring = &cet->cache_ring;
580 cryptodev_cache_ring_elt_t *ring_elt = &ring->frames[ring->tail];
582 vnet_crypto_async_frame_t *ret_frame = 0;
585 while (cet->inflight > 0 && dequeue_more)
587 dequeue_more = cryptodev_frame_dequeue_internal (vm, nb_elts_processed,
591 if (PREDICT_TRUE (ring->frames[ring->enq_head].f != 0))
592 cryptodev_enqueue_frame (vm, &ring->frames[ring->enq_head]);
594 if (PREDICT_TRUE (ring_elt->f != 0))
596 if (ring_elt->enq_elts_head == ring_elt->deq_elts_tail)
598 vlib_node_set_interrupt_pending (
599 vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
600 ret_frame = cryptodev_cache_ring_pop (ring);
607 static_always_inline int
608 cryptodev_enqueue_aead_aad_0_enc (vlib_main_t *vm,
609 vnet_crypto_async_frame_t *frame)
611 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
614 static_always_inline int
615 cryptodev_enqueue_aead_aad_8_enc (vlib_main_t *vm,
616 vnet_crypto_async_frame_t *frame)
618 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
621 static_always_inline int
622 cryptodev_enqueue_aead_aad_12_enc (vlib_main_t *vm,
623 vnet_crypto_async_frame_t *frame)
625 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
629 static_always_inline int
630 cryptodev_enqueue_aead_aad_0_dec (vlib_main_t *vm,
631 vnet_crypto_async_frame_t *frame)
633 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
636 static_always_inline int
637 cryptodev_enqueue_aead_aad_8_dec (vlib_main_t *vm,
638 vnet_crypto_async_frame_t *frame)
640 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
643 static_always_inline int
644 cryptodev_enqueue_aead_aad_12_dec (vlib_main_t *vm,
645 vnet_crypto_async_frame_t *frame)
647 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
651 static_always_inline int
652 cryptodev_enqueue_linked_alg_enc (vlib_main_t *vm,
653 vnet_crypto_async_frame_t *frame)
655 return cryptodev_frame_linked_algs_enqueue (vm, frame,
656 CRYPTODEV_OP_TYPE_ENCRYPT);
659 static_always_inline int
660 cryptodev_enqueue_linked_alg_dec (vlib_main_t *vm,
661 vnet_crypto_async_frame_t *frame)
663 return cryptodev_frame_linked_algs_enqueue (vm, frame,
664 CRYPTODEV_OP_TYPE_DECRYPT);
668 cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx)
670 cryptodev_main_t *cmt = &cryptodev_main;
671 cryptodev_engine_thread_t *cet;
672 struct rte_cryptodev_sym_capability_idx cap_auth_idx;
673 struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
674 struct rte_cryptodev_sym_capability_idx cap_aead_idx;
676 clib_error_t *error = 0;
679 vec_foreach (cet, cmt->per_thread_data)
681 u32 thread_index = cet - cmt->per_thread_data;
682 u32 numa = vlib_get_main_by_index (thread_index)->numa_node;
683 name = format (0, "vpp_cop_pool_%u_%u", numa, thread_index);
684 cet->cop_pool = rte_mempool_create (
685 (char *) name, CRYPTODEV_NB_CRYPTO_OPS, sizeof (cryptodev_op_t), 0,
686 sizeof (struct rte_crypto_op_pool_private), NULL, NULL, crypto_op_init,
687 NULL, vm->numa_node, 0);
691 error = clib_error_return (
692 0, "Failed to create cryptodev op pool %s", name);
698 #define _(a, b, c, d, e, f, g) \
699 cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
700 cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
701 if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
703 vnet_crypto_register_enqueue_handler ( \
704 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
705 cryptodev_enqueue_aead_aad_##f##_enc); \
706 vnet_crypto_register_enqueue_handler ( \
707 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
708 cryptodev_enqueue_aead_aad_##f##_dec); \
711 foreach_vnet_aead_crypto_conversion
714 #define _(a, b, c, d, e) \
715 cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
716 cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC; \
717 cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
718 cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
719 if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
720 cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
722 vnet_crypto_register_enqueue_handler ( \
723 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
724 cryptodev_enqueue_linked_alg_enc); \
725 vnet_crypto_register_enqueue_handler ( \
726 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
727 cryptodev_enqueue_linked_alg_dec); \
730 foreach_cryptodev_link_async_alg
734 vnet_crypto_register_dequeue_handler (vm, eidx, cryptodev_frame_dequeue);
739 vec_foreach (cet, cmt->per_thread_data)
742 rte_mempool_free (cet->cop_pool);