3 *------------------------------------------------------------------
4 * Copyright (c) 2019 - 2021 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vnet/crypto/crypto.h>
22 #include <dpdk/buffer.h>
23 #include <dpdk/device/dpdk.h>
24 #include <dpdk/device/dpdk_priv.h>
26 #include <rte_bus_vdev.h>
27 #include <rte_cryptodev.h>
28 #include <rte_crypto_sym.h>
29 #include <rte_crypto.h>
30 #include <rte_ring_peek_zc.h>
31 #include <rte_config.h>
33 #include "cryptodev.h"
36 #define always_inline static inline
38 #define always_inline static inline __attribute__ ((__always_inline__))
41 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
43 #define foreach_vnet_crypto_status_conversion \
44 _ (SUCCESS, COMPLETED) \
45 _ (NOT_PROCESSED, WORK_IN_PROGRESS) \
46 _ (AUTH_FAILED, FAIL_BAD_HMAC) \
47 _ (INVALID_SESSION, FAIL_ENGINE_ERR) \
48 _ (INVALID_ARGS, FAIL_ENGINE_ERR) \
49 _ (ERROR, FAIL_ENGINE_ERR)
51 static const vnet_crypto_op_status_t cryptodev_status_conversion[] = {
52 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
53 foreach_vnet_crypto_status_conversion
57 static_always_inline rte_iova_t
58 cryptodev_get_iova (clib_pmalloc_main_t *pm, enum rte_iova_mode mode,
62 if (mode == RTE_IOVA_VA)
63 return (rte_iova_t) pointer_to_uword (data);
65 index = clib_pmalloc_get_page_index (pm, data);
66 return pointer_to_uword (data) - pm->lookup_table[index];
69 static_always_inline void
70 cryptodev_validate_mbuf (struct rte_mbuf *mb, vlib_buffer_t *b)
72 /* on vnet side vlib_buffer current_length is updated by cipher padding and
73 * icv_sh. mbuf needs to be sync with these changes */
74 u16 data_len = b->current_length +
75 (b->data + b->current_data - rte_pktmbuf_mtod (mb, u8 *));
77 /* for input nodes that are not dpdk-input, it is possible the mbuf
78 * was updated before as one of the chained mbufs. Setting nb_segs
79 * to 1 here to prevent the cryptodev PMD to access potentially
80 * invalid m_src->next pointers.
83 mb->pkt_len = mb->data_len = data_len;
86 static_always_inline void
87 cryptodev_validate_mbuf_chain (vlib_main_t *vm, struct rte_mbuf *mb,
90 struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */
91 /* when input node is not dpdk, mbuf data len is not initialized, for
92 * single buffer it is not a problem since the data length is written
93 * into cryptodev operation. For chained buffer a reference data length
94 * has to be computed through vlib_buffer.
96 * even when input node is dpdk, it is possible chained vlib_buffers
97 * are updated (either added or removed a buffer) but not not mbuf fields.
98 * we have to re-link every mbuf in the chain.
100 u16 data_len = b->current_length +
101 (b->data + b->current_data - rte_pktmbuf_mtod (mb, u8 *));
103 first_mb->nb_segs = 1;
104 first_mb->pkt_len = first_mb->data_len = data_len;
106 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
108 b = vlib_get_buffer (vm, b->next_buffer);
109 mb = rte_mbuf_from_vlib_buffer (b);
110 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
111 rte_pktmbuf_reset (mb);
114 mb->data_len = b->current_length;
115 mb->pkt_len = b->current_length;
116 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
118 if (PREDICT_FALSE (b->ref_count > 1))
120 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
124 static_always_inline void
125 crypto_op_init (struct rte_mempool *mempool,
126 void *_arg __attribute__ ((unused)), void *_obj,
127 unsigned i __attribute__ ((unused)))
129 struct rte_crypto_op *op = _obj;
131 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
132 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
133 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
134 op->phys_addr = rte_mempool_virt2iova (_obj);
135 op->mempool = mempool;
138 static_always_inline int
139 cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
140 vnet_crypto_async_frame_t *frame,
141 cryptodev_op_type_t op_type)
143 cryptodev_main_t *cmt = &cryptodev_main;
144 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
145 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
146 cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->head];
148 cet->frames_on_ring++;
150 ring_elt->n_elts = frame->n_elts;
151 ring_elt->aad_len = 1;
152 ring_elt->op_type = (u8) op_type;
154 ring->head &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
158 static_always_inline void
159 cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
160 vnet_crypto_async_frame_t *frame,
161 cryptodev_op_type_t op_type)
163 cryptodev_main_t *cmt = &cryptodev_main;
164 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
165 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
166 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
167 vnet_crypto_async_frame_elt_t *fe;
168 cryptodev_session_t *sess = 0;
169 cryptodev_op_t *cops[CRYPTODE_ENQ_MAX] = {};
170 cryptodev_op_t **cop = cops;
172 u32 n_enqueue, n_elts;
173 u32 last_key_index = ~0;
176 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
179 max_to_enq = clib_min (CRYPTODE_ENQ_MAX,
180 frame->n_elts - ring->frames[ring->enq].enqueued);
182 if (cet->inflight + max_to_enq > CRYPTODEV_MAX_INFLIGHT)
188 rte_mempool_get_bulk (cet->cop_pool, (void **) cops, n_elts) < 0))
190 cryptodev_mark_frame_err_status (frame,
191 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
192 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
196 fe = frame->elts + ring->frames[ring->enq].enqueued;
197 bi = frame->buffer_indices + ring->frames[ring->enq].enqueued;
201 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
202 struct rte_crypto_sym_op *sop = &cop[0]->sop;
203 i16 crypto_offset = fe->crypto_start_offset;
204 i16 integ_offset = fe->integ_start_offset;
205 u32 offset_diff = crypto_offset - integ_offset;
209 CLIB_PREFETCH (cop[1], sizeof (*cop[1]), STORE);
210 CLIB_PREFETCH (cop[2], sizeof (*cop[2]), STORE);
211 clib_prefetch_load (&fe[1]);
212 clib_prefetch_load (&fe[2]);
214 if (last_key_index != fe->key_index)
216 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
217 last_key_index = fe->key_index;
219 if (key->keys[vm->numa_node][op_type] == 0)
222 cryptodev_session_create (vm, last_key_index, 0) < 0))
224 cryptodev_mark_frame_err_status (
225 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
226 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
230 sess = key->keys[vm->numa_node][op_type];
233 sop->m_src = rte_mbuf_from_vlib_buffer (b);
234 sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
236 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
237 * so we have to manually adjust mbuf data_off here so cryptodev can
238 * correctly compute the data pointer. The prepend here will be later
239 * rewritten by tx. */
240 if (PREDICT_TRUE (fe->integ_start_offset < 0))
242 sop->m_src->data_off += fe->integ_start_offset;
244 crypto_offset = offset_diff;
247 sop->cipher.data.offset = crypto_offset;
248 sop->cipher.data.length = fe->crypto_total_length;
249 sop->auth.data.offset = integ_offset;
250 sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj;
251 sop->auth.digest.data = fe->digest;
252 sop->auth.digest.phys_addr =
253 cryptodev_get_iova (pm, cmt->iova_mode, fe->digest);
254 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
255 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
257 cryptodev_validate_mbuf (sop->m_src, b);
259 clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
260 ring->frames[ring->enq].enqueued++;
268 rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
269 (struct rte_crypto_op **) cops, max_to_enq);
270 ASSERT (n_enqueue == max_to_enq);
271 cet->inflight += max_to_enq;
272 ring->frames[ring->enq].frame_inflight += max_to_enq;
273 if (ring->frames[ring->enq].enqueued == frame->n_elts)
275 cet->frame_ring.enq++;
276 cet->frame_ring.enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
277 frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
284 ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
285 rte_mempool_put_bulk (cet->cop_pool, (void **) cops, max_to_enq);
288 static_always_inline int
289 cryptodev_frame_aead_enqueue (vlib_main_t *vm,
290 vnet_crypto_async_frame_t *frame,
291 cryptodev_op_type_t op_type, u8 aad_len)
293 cryptodev_main_t *cmt = &cryptodev_main;
294 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
295 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
296 cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->head];
297 cet->frames_on_ring++;
299 ring_elt->n_elts = frame->n_elts;
300 ring_elt->aad_len = aad_len;
301 ring_elt->op_type = (u8) op_type;
303 ring->head &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
308 static_always_inline int
309 cryptodev_aead_enqueue_internal (vlib_main_t *vm,
310 vnet_crypto_async_frame_t *frame,
311 cryptodev_op_type_t op_type, u8 aad_len)
313 cryptodev_main_t *cmt = &cryptodev_main;
314 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
315 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
316 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
317 vnet_crypto_async_frame_elt_t *fe;
318 cryptodev_session_t *sess = 0;
319 cryptodev_op_t *cops[CRYPTODE_ENQ_MAX] = {};
320 cryptodev_op_t **cop = cops;
322 u32 n_enqueue = 0, n_elts;
323 u32 last_key_index = ~0;
324 u16 left_to_enq = frame->n_elts - ring->frames[ring->enq].enqueued;
325 const u16 max_to_enq = clib_min (CRYPTODE_ENQ_MAX, left_to_enq);
327 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
330 if (cet->inflight + max_to_enq > CRYPTODEV_MAX_INFLIGHT)
336 rte_mempool_get_bulk (cet->cop_pool, (void **) cops, n_elts) < 0))
338 cryptodev_mark_frame_err_status (frame,
339 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
340 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
344 fe = frame->elts + ring->frames[ring->enq].enqueued;
345 bi = frame->buffer_indices + ring->frames[ring->enq].enqueued;
349 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
350 struct rte_crypto_sym_op *sop = &cop[0]->sop;
351 u16 crypto_offset = fe->crypto_start_offset;
355 CLIB_PREFETCH (cop[1], sizeof (*cop[1]), STORE);
356 CLIB_PREFETCH (cop[2], sizeof (*cop[2]), STORE);
357 clib_prefetch_load (&fe[1]);
358 clib_prefetch_load (&fe[2]);
360 if (last_key_index != fe->key_index)
362 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
364 last_key_index = fe->key_index;
365 if (key->keys[vm->numa_node][op_type] == 0)
367 if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
370 cryptodev_mark_frame_err_status (
371 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
372 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
376 else if (PREDICT_FALSE (
377 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
378 rte_cryptodev_sym_session_opaque_data_get (
379 key->keys[vm->numa_node][op_type]) != (u64) aad_len
381 key->keys[vm->numa_node][op_type]->opaque_data != aad_len
385 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
386 fe->key_index, aad_len);
387 if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
390 cryptodev_mark_frame_err_status (
391 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
392 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
397 sess = key->keys[vm->numa_node][op_type];
400 sop->m_src = rte_mbuf_from_vlib_buffer (b);
401 sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
403 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
404 * so we have to manually adjust mbuf data_off here so cryptodev can
405 * correctly compute the data pointer. The prepend here will be later
406 * rewritten by tx. */
407 if (PREDICT_FALSE (fe->crypto_start_offset < 0))
409 rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset);
414 sop->aead.aad.data = cop[0]->aad;
415 sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
416 sop->aead.data.length = fe->crypto_total_length;
417 sop->aead.data.offset = crypto_offset;
418 sop->aead.digest.data = fe->tag;
419 sop->aead.digest.phys_addr =
420 cryptodev_get_iova (pm, cmt->iova_mode, fe->tag);
421 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
422 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
424 cryptodev_validate_mbuf (sop->m_src, b);
426 clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
427 clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
429 ring->frames[ring->enq].enqueued++;
437 rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
438 (struct rte_crypto_op **) cops, max_to_enq);
439 ASSERT (n_enqueue == max_to_enq);
440 cet->inflight += max_to_enq;
441 ring->frames[ring->enq].frame_inflight += max_to_enq;
442 if (ring->frames[ring->enq].enqueued == frame->n_elts)
445 ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
446 frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
447 cet->enqueued_not_dequeueq++;
454 ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
455 rte_mempool_put_bulk (cet->cop_pool, (void **) cops, max_to_enq);
460 static_always_inline u8
461 cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
462 u32 *enqueue_thread_idx)
464 cryptodev_main_t *cmt = &cryptodev_main;
465 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
466 vnet_crypto_async_frame_t *frame = NULL;
467 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
468 u16 n_deq, idx, left_to_deq, i;
470 u16 inflight = cet->inflight;
472 cryptodev_op_t *cops[CRYPTODE_DEQ_MAX] = {};
473 cryptodev_op_t **cop = cops;
474 vnet_crypto_async_frame_elt_t *fe;
476 u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */
480 for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
482 if (PREDICT_TRUE (ring->frames[idx].frame_inflight > 0))
485 idx &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
488 ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
492 ring->frames[ring->deq].f->n_elts - ring->frames[ring->deq].dequeued;
493 max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
495 /* deq field can be used to track frame that is currently dequeued
496 based on that you can specify the amount of elements to deq for the frame */
499 rte_cryptodev_dequeue_burst (cet->cryptodev_id, cet->cryptodev_q,
500 (struct rte_crypto_op **) cops, max_to_deq);
505 ss0 = ring->frames[ring->deq].deq_state;
506 ss1 = ring->frames[ring->deq].deq_state;
507 ss2 = ring->frames[ring->deq].deq_state;
508 ss3 = ring->frames[ring->deq].deq_state;
510 frame = ring->frames[ring->deq].f;
511 fe = frame->elts + ring->frames[ring->deq].dequeued;
516 ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
517 ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
518 ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
519 ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
528 ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
534 ring->frames[ring->deq].deq_state |= (u8) (ss0 | ss1 | ss2 | ss3);
536 rte_mempool_put_bulk (cet->cop_pool, (void **) cops, n_deq);
539 ring->frames[ring->deq].dequeued += n_deq;
540 ring->frames[ring->deq].frame_inflight -= n_deq;
541 if (ring->frames[ring->deq].dequeued == ring->frames[ring->deq].n_elts)
544 (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
545 VNET_CRYPTO_FRAME_STATE_SUCCESS :
546 VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
548 *nb_elts_processed = frame->n_elts;
549 *enqueue_thread_idx = frame->enqueue_thread_index;
550 cet->deqeued_not_returned++;
551 cet->enqueued_not_dequeueq--;
553 ring->deq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
554 dequeue_more = (max_to_deq < CRYPTODE_DEQ_MAX);
557 cet->inflight = inflight;
561 static_always_inline void
562 cryptodev_enqueue_frame (vlib_main_t *vm, cryptodev_async_ring_elt *ring_elt)
564 cryptodev_op_type_t op_type = (cryptodev_op_type_t) ring_elt->op_type;
565 u8 linked_or_aad_len = ring_elt->aad_len;
567 if (linked_or_aad_len == 1)
568 cryptodev_frame_linked_algs_enqueue_internal (vm, ring_elt->f, op_type);
570 cryptodev_aead_enqueue_internal (vm, ring_elt->f, op_type,
574 static_always_inline vnet_crypto_async_frame_t *
575 cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
576 u32 *enqueue_thread_idx)
578 cryptodev_main_t *cmt = &cryptodev_main;
579 vnet_crypto_main_t *cm = &crypto_main;
580 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
582 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
583 cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->tail];
584 vnet_crypto_async_frame_t *ret_frame = 0;
587 while (cet->inflight > 0 && dequeue_more)
589 dequeue_more = cryptodev_frame_dequeue_internal (vm, nb_elts_processed,
593 if (PREDICT_TRUE (ring->frames[ring->enq].f != 0))
594 cryptodev_enqueue_frame (vm, &ring->frames[ring->enq]);
596 if (PREDICT_TRUE (ring_elt->f != 0))
598 if ((ring_elt->f->state == VNET_CRYPTO_FRAME_STATE_SUCCESS ||
599 ring_elt->f->state == VNET_CRYPTO_FRAME_STATE_ELT_ERROR) &&
600 ring_elt->enqueued == ring_elt->dequeued)
602 vlib_node_set_interrupt_pending (
603 vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
604 ret_frame = ring_elt->f;
605 memset (ring_elt, 0, sizeof (*ring_elt));
607 ring->tail &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
608 cet->frames_on_ring--;
609 cet->deqeued_not_returned--;
616 static_always_inline int
617 cryptodev_enqueue_aead_aad_0_enc (vlib_main_t *vm,
618 vnet_crypto_async_frame_t *frame)
620 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
623 static_always_inline int
624 cryptodev_enqueue_aead_aad_8_enc (vlib_main_t *vm,
625 vnet_crypto_async_frame_t *frame)
627 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
630 static_always_inline int
631 cryptodev_enqueue_aead_aad_12_enc (vlib_main_t *vm,
632 vnet_crypto_async_frame_t *frame)
634 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
638 static_always_inline int
639 cryptodev_enqueue_aead_aad_0_dec (vlib_main_t *vm,
640 vnet_crypto_async_frame_t *frame)
642 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
645 static_always_inline int
646 cryptodev_enqueue_aead_aad_8_dec (vlib_main_t *vm,
647 vnet_crypto_async_frame_t *frame)
649 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
652 static_always_inline int
653 cryptodev_enqueue_aead_aad_12_dec (vlib_main_t *vm,
654 vnet_crypto_async_frame_t *frame)
656 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
660 static_always_inline int
661 cryptodev_enqueue_linked_alg_enc (vlib_main_t *vm,
662 vnet_crypto_async_frame_t *frame)
664 return cryptodev_frame_linked_algs_enqueue (vm, frame,
665 CRYPTODEV_OP_TYPE_ENCRYPT);
668 static_always_inline int
669 cryptodev_enqueue_linked_alg_dec (vlib_main_t *vm,
670 vnet_crypto_async_frame_t *frame)
672 return cryptodev_frame_linked_algs_enqueue (vm, frame,
673 CRYPTODEV_OP_TYPE_DECRYPT);
677 cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx)
679 cryptodev_main_t *cmt = &cryptodev_main;
680 cryptodev_engine_thread_t *cet;
681 struct rte_cryptodev_sym_capability_idx cap_auth_idx;
682 struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
683 struct rte_cryptodev_sym_capability_idx cap_aead_idx;
685 clib_error_t *error = 0;
688 vec_foreach (cet, cmt->per_thread_data)
690 u32 thread_index = cet - cmt->per_thread_data;
691 u32 numa = vlib_get_main_by_index (thread_index)->numa_node;
692 name = format (0, "vpp_cop_pool_%u_%u", numa, thread_index);
693 cet->cop_pool = rte_mempool_create (
694 (char *) name, CRYPTODEV_NB_CRYPTO_OPS, sizeof (cryptodev_op_t), 0,
695 sizeof (struct rte_crypto_op_pool_private), NULL, NULL, crypto_op_init,
696 NULL, vm->numa_node, 0);
700 error = clib_error_return (
701 0, "Failed to create cryptodev op pool %s", name);
707 #define _(a, b, c, d, e, f, g) \
708 cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
709 cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
710 if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
712 vnet_crypto_register_enqueue_handler ( \
713 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
714 cryptodev_enqueue_aead_aad_##f##_enc); \
715 vnet_crypto_register_enqueue_handler ( \
716 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
717 cryptodev_enqueue_aead_aad_##f##_dec); \
720 foreach_vnet_aead_crypto_conversion
723 #define _(a, b, c, d, e) \
724 cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
725 cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC; \
726 cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
727 cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
728 if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
729 cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
731 vnet_crypto_register_enqueue_handler ( \
732 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
733 cryptodev_enqueue_linked_alg_enc); \
734 vnet_crypto_register_enqueue_handler ( \
735 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
736 cryptodev_enqueue_linked_alg_dec); \
739 foreach_cryptodev_link_async_alg
743 vnet_crypto_register_dequeue_handler (vm, eidx, cryptodev_frame_dequeue);
748 vec_foreach (cet, cmt->per_thread_data)
751 rte_mempool_free (cet->cop_pool);