2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_malloc.h>
33 #include <rte_config.h>
35 #include "cryptodev.h"
38 #define always_inline static inline
40 #define always_inline static inline __attribute__ ((__always_inline__))
43 static_always_inline u64
44 compute_ofs_linked_alg (vnet_crypto_async_frame_elt_t *fe, i16 *min_ofs,
47 union rte_crypto_sym_ofs ofs;
48 u32 crypto_end = fe->crypto_start_offset + fe->crypto_total_length;
50 fe->integ_start_offset + fe->crypto_total_length + fe->integ_length_adj;
52 *min_ofs = clib_min (fe->crypto_start_offset, fe->integ_start_offset);
53 *max_end = clib_max (crypto_end, integ_end);
55 ofs.ofs.cipher.head = fe->crypto_start_offset - *min_ofs;
56 ofs.ofs.cipher.tail = *max_end - crypto_end;
57 ofs.ofs.auth.head = fe->integ_start_offset - *min_ofs;
58 ofs.ofs.auth.tail = *max_end - integ_end;
63 static_always_inline int
64 cryptodev_frame_build_sgl (vlib_main_t *vm, enum rte_iova_mode iova_mode,
65 struct rte_crypto_vec *data_vec, u16 *n_seg,
66 vlib_buffer_t *b, u32 size)
68 struct rte_crypto_vec *vec = data_vec + 1;
69 if (vlib_buffer_chain_linearize (vm, b) > CRYPTODEV_MAX_N_SGL)
72 while ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && size)
75 b = vlib_get_buffer (vm, b->next_buffer);
76 len = clib_min (b->current_length, size);
77 vec->base = (void *) vlib_buffer_get_current (b);
78 if (iova_mode == RTE_IOVA_VA)
79 vec->iova = pointer_to_uword (vec->base);
81 vec->iova = vlib_buffer_get_current_pa (vm, b);
94 static_always_inline void
95 cryptodev_reset_ctx (cryptodev_engine_thread_t *cet)
97 union rte_cryptodev_session_ctx sess_ctx;
99 ASSERT (cet->reset_sess != 0);
101 sess_ctx.crypto_sess = cet->reset_sess;
103 rte_cryptodev_configure_raw_dp_ctx (cet->cryptodev_id, cet->cryptodev_q,
104 cet->ctx, RTE_CRYPTO_OP_WITH_SESSION,
108 static_always_inline int
109 cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
110 vnet_crypto_async_frame_t *frame,
111 cryptodev_op_type_t op_type)
113 cryptodev_main_t *cmt = &cryptodev_main;
114 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
115 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
116 cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->head];
117 cet->frames_on_ring++;
119 ring_elt->n_elts = frame->n_elts;
120 ring_elt->aad_len = 1;
121 ring_elt->op_type = (u8) op_type;
123 ring->head &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
127 static_always_inline void
128 cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
129 vnet_crypto_async_frame_t *frame,
130 cryptodev_op_type_t op_type)
132 cryptodev_main_t *cmt = &cryptodev_main;
133 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
134 vnet_crypto_async_frame_elt_t *fe;
136 struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
137 struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
138 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
140 u32 last_key_index = ~0;
143 u32 max_to_enq = clib_min (CRYPTODE_ENQ_MAX,
144 frame->n_elts - ring->frames[ring->enq].enqueued);
148 if (cet->inflight + max_to_enq > CRYPTODEV_MAX_INFLIGHT)
153 vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
155 b = cet->b + ring->frames[ring->enq].enqueued;
156 fe = frame->elts + ring->frames[ring->enq].enqueued;
160 union rte_crypto_sym_ofs cofs;
165 clib_prefetch_load (&fe[1]);
166 clib_prefetch_load (&fe[2]);
167 vlib_prefetch_buffer_header (b[1], LOAD);
168 vlib_prefetch_buffer_header (b[2], LOAD);
171 if (PREDICT_FALSE (last_key_index != fe->key_index))
173 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
174 union rte_cryptodev_session_ctx sess_ctx;
176 if (PREDICT_FALSE (key->keys[vm->numa_node][op_type] == 0))
178 status = cryptodev_session_create (vm, fe->key_index, 0);
179 if (PREDICT_FALSE (status < 0))
183 /* Borrow a created session to reset session ctx, based on a valid
184 * assumption that the session reset won't happen until first valid
185 * packet is processed */
186 if (PREDICT_FALSE (cet->reset_sess == 0))
187 cet->reset_sess = key->keys[vm->numa_node][op_type];
189 sess_ctx.crypto_sess = key->keys[vm->numa_node][op_type];
191 status = rte_cryptodev_configure_raw_dp_ctx (
192 cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
193 RTE_CRYPTO_OP_WITH_SESSION, sess_ctx, is_update);
194 if (PREDICT_FALSE (status < 0))
197 last_key_index = fe->key_index;
201 cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
203 vec->len = max_end - min_ofs;
204 if (cmt->iova_mode == RTE_IOVA_VA)
206 vec[0].base = (void *) (b[0]->data + min_ofs);
207 vec[0].iova = pointer_to_uword (b[0]->data) + min_ofs;
208 iv_vec.va = (void *) fe->iv;
209 iv_vec.iova = pointer_to_uword (fe->iv);
210 digest_vec.va = (void *) fe->tag;
211 digest_vec.iova = pointer_to_uword (fe->tag);
215 vec[0].base = (void *) (b[0]->data + min_ofs);
216 vec[0].iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
217 iv_vec.va = (void *) fe->iv;
218 iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
219 digest_vec.va = (void *) fe->tag;
220 digest_vec.iova = vlib_physmem_get_pa (vm, fe->digest);
223 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
225 vec[0].len = b[0]->current_data + b[0]->current_length - min_ofs;
226 if (cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
227 max_end - min_ofs - vec->len) < 0)
231 status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
232 &digest_vec, 0, (void *) frame);
233 if (PREDICT_FALSE (status < 0))
236 ring->frames[ring->enq].enqueued += 1;
242 status = rte_cryptodev_raw_enqueue_done (cet->ctx, max_to_enq);
243 if (PREDICT_FALSE (status < 0))
246 cet->inflight += max_to_enq;
247 ring->frames[ring->enq].frame_inflight += max_to_enq;
248 if (ring->frames[ring->enq].enqueued == frame->n_elts)
250 cet->frame_ring.enq += 1;
251 cet->frame_ring.enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
252 frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
257 cryptodev_mark_frame_err_status (frame,
258 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
259 VNET_CRYPTO_FRAME_STATE_ELT_ERROR);
260 cryptodev_reset_ctx (cet);
261 cet->frame_ring.enq += 1;
262 cet->frame_ring.enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
266 static_always_inline int
267 cryptodev_raw_aead_enqueue (vlib_main_t *vm, vnet_crypto_async_frame_t *frame,
268 cryptodev_op_type_t op_type, u8 aad_len)
270 cryptodev_main_t *cmt = &cryptodev_main;
271 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
272 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
273 cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->head];
274 cet->frames_on_ring++;
276 ring_elt->n_elts = frame->n_elts;
277 ring_elt->aad_len = aad_len;
278 ring_elt->op_type = (u8) op_type;
280 ring->head &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
284 static_always_inline void
285 cryptodev_raw_aead_enqueue_internal (vlib_main_t *vm,
286 vnet_crypto_async_frame_t *frame,
287 cryptodev_op_type_t op_type, u8 aad_len)
289 cryptodev_main_t *cmt = &cryptodev_main;
290 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
291 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
292 vnet_crypto_async_frame_elt_t *fe;
295 union rte_crypto_sym_ofs cofs;
296 struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
297 struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
298 u32 last_key_index = ~0;
299 u16 left_to_enq = frame->n_elts - ring->frames[ring->enq].enqueued;
300 u16 max_to_enq = clib_min (CRYPTODE_ENQ_MAX, left_to_enq);
304 if (cet->inflight + max_to_enq > CRYPTODEV_MAX_INFLIGHT)
311 vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
313 fe = frame->elts + ring->frames[ring->enq].enqueued;
314 b = cet->b + ring->frames[ring->enq].enqueued;
319 u32 aad_offset = ((cet->aad_index++) & CRYPTODEV_AAD_MASK) << 4;
324 clib_prefetch_load (&fe[1]);
325 vlib_prefetch_buffer_header (b[1], LOAD);
328 if (PREDICT_FALSE (last_key_index != fe->key_index))
330 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
331 union rte_cryptodev_session_ctx sess_ctx;
333 if (PREDICT_FALSE (key->keys[vm->numa_node][op_type] == 0))
335 status = cryptodev_session_create (vm, fe->key_index, aad_len);
336 if (PREDICT_FALSE (status < 0))
341 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
342 rte_cryptodev_sym_session_opaque_data_get (
343 key->keys[vm->numa_node][op_type]) != (u64) aad_len
345 (u8) key->keys[vm->numa_node][op_type]->opaque_data != aad_len
349 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
350 fe->key_index, aad_len);
351 status = cryptodev_session_create (vm, fe->key_index, aad_len);
352 if (PREDICT_FALSE (status < 0))
356 /* Borrow a created session to reset session ctx, based on a valid
357 * assumption that the session reset won't happen until first valid
358 * packet is processed */
360 if (PREDICT_FALSE (cet->reset_sess == 0))
361 cet->reset_sess = key->keys[vm->numa_node][op_type];
363 sess_ctx.crypto_sess = key->keys[vm->numa_node][op_type];
365 status = rte_cryptodev_configure_raw_dp_ctx (
366 cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
367 RTE_CRYPTO_OP_WITH_SESSION, sess_ctx, is_update);
368 if (PREDICT_FALSE (status < 0))
371 last_key_index = fe->key_index;
375 if (cmt->iova_mode == RTE_IOVA_VA)
377 vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
378 vec[0].iova = pointer_to_uword (vec[0].base);
379 vec[0].len = fe->crypto_total_length;
380 iv_vec.va = (void *) fe->iv;
381 iv_vec.iova = pointer_to_uword (fe->iv);
382 digest_vec.va = (void *) fe->tag;
383 digest_vec.iova = pointer_to_uword (fe->tag);
384 aad_vec.va = (void *) (cet->aad_buf + aad_offset);
385 aad_vec.iova = cet->aad_phy_addr + aad_offset;
389 vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
391 vlib_buffer_get_pa (vm, b[0]) + fe->crypto_start_offset;
392 vec[0].len = fe->crypto_total_length;
393 iv_vec.va = (void *) fe->iv;
394 iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
395 aad_vec.va = (void *) (cet->aad_buf + aad_offset);
396 aad_vec.iova = cet->aad_phy_addr + aad_offset;
397 digest_vec.va = (void *) fe->tag;
398 digest_vec.iova = vlib_physmem_get_pa (vm, fe->tag);
402 *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
403 else if (aad_len != 0)
406 *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
407 *(u32 *) (cet->aad_buf + aad_offset + 8) = *(u32 *) (fe->aad + 8);
410 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
412 vec[0].len = b[0]->current_data + b[0]->current_length -
413 fe->crypto_start_offset;
415 cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
416 fe->crypto_total_length - vec[0].len);
422 rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
423 &digest_vec, &aad_vec, (void *) frame);
424 if (PREDICT_FALSE (status < 0))
427 ring->frames[ring->enq].enqueued += 1;
433 status = rte_cryptodev_raw_enqueue_done (cet->ctx, max_to_enq);
434 if (PREDICT_FALSE (status < 0))
437 cet->inflight += max_to_enq;
438 ring->frames[ring->enq].frame_inflight += max_to_enq;
439 if (ring->frames[ring->enq].enqueued == frame->n_elts)
442 ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
443 frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
444 cet->enqueued_not_dequeueq++;
450 cryptodev_mark_frame_err_status (frame,
451 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
452 VNET_CRYPTO_FRAME_STATE_ELT_ERROR);
453 cryptodev_reset_ctx (cet);
455 ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
459 static_always_inline void
460 cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success)
462 vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
464 f->elts[index].status = is_op_success ? VNET_CRYPTO_OP_STATUS_COMPLETED :
465 VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
468 static_always_inline u8
469 cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
470 u32 *enqueue_thread_idx)
472 cryptodev_main_t *cmt = &cryptodev_main;
473 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
474 vnet_crypto_async_frame_t *frame;
475 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
477 u16 n_deq, indice, i, left_to_deq;
479 u16 inflight = cet->inflight;
485 for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
487 if (PREDICT_TRUE (ring->frames[indice].frame_inflight > 0))
490 indice &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
493 ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
498 ring->frames[ring->deq].f->n_elts - ring->frames[ring->deq].dequeued;
499 max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
501 /* you can use deq field to track frame that is currently dequeued */
502 /* based on that you can specify the amount of elements to deq for the frame
505 n_deq = rte_cryptodev_raw_dequeue_burst (
506 cet->ctx, NULL, max_to_deq, cryptodev_post_dequeue, (void **) &frame, 0,
507 &n_success, &dequeue_status);
513 ring->frames[ring->deq].dequeued += n_deq;
514 ring->frames[ring->deq].deq_state += n_success;
515 ring->frames[ring->deq].frame_inflight -= n_deq;
517 if (ring->frames[ring->deq].dequeued == ring->frames[ring->deq].n_elts)
519 frame->state = ring->frames[ring->deq].deq_state == frame->n_elts ?
520 VNET_CRYPTO_FRAME_STATE_SUCCESS :
521 VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
522 *nb_elts_processed = frame->n_elts;
523 *enqueue_thread_idx = frame->enqueue_thread_index;
524 cet->deqeued_not_returned++;
525 cet->enqueued_not_dequeueq--;
527 ring->deq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
528 dequeue_more = max_to_deq < CRYPTODE_DEQ_MAX;
532 rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
534 cet->inflight = inflight;
538 static_always_inline void
539 cryptodev_enqueue_frame (vlib_main_t *vm, cryptodev_async_ring_elt *ring_elt)
541 cryptodev_op_type_t op_type = (cryptodev_op_type_t) ring_elt->op_type;
542 u8 linked_or_aad_len = ring_elt->aad_len;
544 if (linked_or_aad_len == 1)
545 cryptodev_frame_linked_algs_enqueue_internal (vm, ring_elt->f, op_type);
547 cryptodev_raw_aead_enqueue_internal (vm, ring_elt->f, op_type,
551 static_always_inline vnet_crypto_async_frame_t *
552 cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
553 u32 *enqueue_thread_idx)
555 cryptodev_main_t *cmt = &cryptodev_main;
556 vnet_crypto_main_t *cm = &crypto_main;
557 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
558 cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
559 cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->tail];
560 vnet_crypto_async_frame_t *ret_frame = 0;
563 while (cet->inflight > 0 && dequeue_more)
565 dequeue_more = cryptodev_raw_dequeue_internal (vm, nb_elts_processed,
569 if (PREDICT_TRUE (ring->frames[ring->enq].f != 0))
570 cryptodev_enqueue_frame (vm, &ring->frames[ring->enq]);
572 if (PREDICT_TRUE (ring_elt->f != 0))
574 if ((ring_elt->f->state == VNET_CRYPTO_FRAME_STATE_SUCCESS ||
575 ring_elt->f->state == VNET_CRYPTO_FRAME_STATE_ELT_ERROR) &&
576 ring_elt->enqueued == ring_elt->dequeued)
578 vlib_node_set_interrupt_pending (
579 vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
580 ret_frame = ring_elt->f;
582 ring_elt->dequeued = 0;
583 ring_elt->enqueued = 0;
584 ring_elt->deq_state = 0;
586 ring->tail &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
587 cet->frames_on_ring--;
588 cet->deqeued_not_returned--;
596 static_always_inline int
597 cryptodev_raw_enq_aead_aad_0_enc (vlib_main_t *vm,
598 vnet_crypto_async_frame_t *frame)
600 return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT, 0);
603 static_always_inline int
604 cryptodev_raw_enq_aead_aad_8_enc (vlib_main_t *vm,
605 vnet_crypto_async_frame_t *frame)
607 return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT, 8);
609 static_always_inline int
610 cryptodev_raw_enq_aead_aad_12_enc (vlib_main_t *vm,
611 vnet_crypto_async_frame_t *frame)
613 return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT, 12);
616 static_always_inline int
617 cryptodev_raw_enq_aead_aad_0_dec (vlib_main_t *vm,
618 vnet_crypto_async_frame_t *frame)
620 return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT, 0);
623 static_always_inline int
624 cryptodev_raw_enq_aead_aad_8_dec (vlib_main_t *vm,
625 vnet_crypto_async_frame_t *frame)
627 return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT, 8);
629 static_always_inline int
630 cryptodev_raw_enq_aead_aad_12_dec (vlib_main_t *vm,
631 vnet_crypto_async_frame_t *frame)
633 return cryptodev_raw_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT, 12);
636 static_always_inline int
637 cryptodev_raw_enq_linked_alg_enc (vlib_main_t *vm,
638 vnet_crypto_async_frame_t *frame)
640 return cryptodev_frame_linked_algs_enqueue (vm, frame,
641 CRYPTODEV_OP_TYPE_ENCRYPT);
644 static_always_inline int
645 cryptodev_raw_enq_linked_alg_dec (vlib_main_t *vm,
646 vnet_crypto_async_frame_t *frame)
648 return cryptodev_frame_linked_algs_enqueue (vm, frame,
649 CRYPTODEV_OP_TYPE_DECRYPT);
653 cryptodev_register_raw_hdl (vlib_main_t *vm, u32 eidx)
655 cryptodev_main_t *cmt = &cryptodev_main;
656 cryptodev_engine_thread_t *cet;
657 cryptodev_inst_t *cinst;
658 struct rte_cryptodev_info info;
659 struct rte_cryptodev_sym_capability_idx cap_auth_idx;
660 struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
661 struct rte_cryptodev_sym_capability_idx cap_aead_idx;
662 u32 support_raw_api = 1, max_ctx_size = 0;
663 clib_error_t *error = 0;
666 vec_foreach (cinst, cmt->cryptodev_inst)
669 rte_cryptodev_info_get (cinst->dev_id, &info);
670 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))
676 ctx_size = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
677 max_ctx_size = clib_max (ctx_size, max_ctx_size);
680 if (!support_raw_api)
681 return cryptodev_register_cop_hdl (vm, eidx);
683 vec_foreach (cet, cmt->per_thread_data)
685 u32 thread_id = cet - cmt->per_thread_data;
686 u32 numa = vlib_get_main_by_index (thread_id)->numa_node;
687 u8 *name = format (0, "cache_frame_ring_%u_%u", numa, thread_id);
689 cet->aad_buf = rte_zmalloc_socket (
690 0, CRYPTODEV_NB_CRYPTO_OPS * CRYPTODEV_MAX_AAD_SIZE,
691 CLIB_CACHE_LINE_BYTES, numa);
692 if (cet->aad_buf == 0)
694 error = clib_error_return (0, "Failed to alloc aad buf");
697 cet->aad_phy_addr = rte_malloc_virt2iova (cet->aad_buf);
700 rte_zmalloc_socket (0, max_ctx_size, CLIB_CACHE_LINE_BYTES, numa);
703 error = clib_error_return (0, "Failed to alloc raw dp ctx");
709 #define _(a, b, c, d, e, f, g) \
710 cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
711 cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
712 if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
714 vnet_crypto_register_enqueue_handler ( \
715 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
716 cryptodev_raw_enq_aead_aad_##f##_enc); \
717 vnet_crypto_register_enqueue_handler ( \
718 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
719 cryptodev_raw_enq_aead_aad_##f##_dec); \
722 foreach_vnet_aead_crypto_conversion
725 #define _(a, b, c, d, e) \
726 cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
727 cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC; \
728 cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
729 cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
730 if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
731 cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
733 vnet_crypto_register_enqueue_handler ( \
734 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
735 cryptodev_raw_enq_linked_alg_enc); \
736 vnet_crypto_register_enqueue_handler ( \
737 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
738 cryptodev_raw_enq_linked_alg_dec); \
741 foreach_cryptodev_link_async_alg
745 vnet_crypto_register_dequeue_handler (vm, eidx, cryptodev_raw_dequeue);