2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/vnet.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
36 #define always_inline static inline
38 #define always_inline static inline __attribute__ ((__always_inline__))
41 #define CRYPTODEV_NB_CRYPTO_OPS 1024
42 #define CRYPTODEV_MAX_INFLIGHT (CRYPTODEV_NB_CRYPTO_OPS - 1)
43 #define CRYPTODEV_AAD_MASK (CRYPTODEV_NB_CRYPTO_OPS - 1)
44 #define CRYPTODEV_DEQ_CACHE_SZ 32
45 #define CRYPTODEV_NB_SESSION 10240
46 #define CRYPTODEV_MAX_AAD_SIZE 16
47 #define CRYPTODEV_MAX_N_SGL 8 /**< maximum number of segments */
49 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN */
50 #define foreach_vnet_aead_crypto_conversion \
51 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \
52 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \
53 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \
54 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \
55 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \
56 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12)
59 * crypto (alg, cryptodev_alg), hash (alg, digest-size)
61 #define foreach_cryptodev_link_async_alg \
62 _ (AES_128_CBC, AES_CBC, SHA1, 12) \
63 _ (AES_192_CBC, AES_CBC, SHA1, 12) \
64 _ (AES_256_CBC, AES_CBC, SHA1, 12) \
65 _ (AES_128_CBC, AES_CBC, SHA224, 14) \
66 _ (AES_192_CBC, AES_CBC, SHA224, 14) \
67 _ (AES_256_CBC, AES_CBC, SHA224, 14) \
68 _ (AES_128_CBC, AES_CBC, SHA256, 16) \
69 _ (AES_192_CBC, AES_CBC, SHA256, 16) \
70 _ (AES_256_CBC, AES_CBC, SHA256, 16) \
71 _ (AES_128_CBC, AES_CBC, SHA384, 24) \
72 _ (AES_192_CBC, AES_CBC, SHA384, 24) \
73 _ (AES_256_CBC, AES_CBC, SHA384, 24) \
74 _ (AES_128_CBC, AES_CBC, SHA512, 32) \
75 _ (AES_192_CBC, AES_CBC, SHA512, 32) \
76 _ (AES_256_CBC, AES_CBC, SHA512, 32)
80 CRYPTODEV_OP_TYPE_ENCRYPT = 0,
81 CRYPTODEV_OP_TYPE_DECRYPT,
83 } cryptodev_op_type_t;
87 union rte_cryptodev_session_ctx keys[CRYPTODEV_N_OP_TYPES];
94 struct rte_crypto_raw_dp_ctx *raw_dp_ctx_buffer;
100 struct rte_mempool *sess_pool;
101 struct rte_mempool *sess_priv_pool;
102 } cryptodev_numa_data_t;
106 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
107 vlib_buffer_t *b[VNET_CRYPTO_FRAME_SIZE];
108 struct rte_crypto_raw_dp_ctx *ctx;
109 struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
110 struct rte_ring *cached_frame;
117 } cryptodev_engine_thread_t;
121 cryptodev_numa_data_t *per_numa_data;
122 cryptodev_key_t *keys;
123 cryptodev_engine_thread_t *per_thread_data;
124 enum rte_iova_mode iova_mode;
125 cryptodev_inst_t *cryptodev_inst;
126 clib_bitmap_t *active_cdev_inst_mask;
127 clib_spinlock_t tlock;
130 cryptodev_main_t cryptodev_main;
133 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
134 cryptodev_op_type_t op_type,
135 const vnet_crypto_key_t * key, u32 aad_len)
137 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
138 memset (xform, 0, sizeof (*xform));
139 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
142 if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
143 key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
144 key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
147 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
148 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
149 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
150 aead_xform->aad_length = aad_len;
151 aead_xform->digest_length = 16;
152 aead_xform->iv.offset = 0;
153 aead_xform->iv.length = 12;
154 aead_xform->key.data = key->data;
155 aead_xform->key.length = vec_len (key->data);
161 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
162 cryptodev_op_type_t op_type,
163 const vnet_crypto_key_t * key)
165 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
166 vnet_crypto_key_t *key_cipher, *key_auth;
167 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
168 enum rte_crypto_auth_algorithm auth_algo = ~0;
171 key_cipher = vnet_crypto_get_key (key->index_crypto);
172 key_auth = vnet_crypto_get_key (key->index_integ);
173 if (!key_cipher || !key_auth)
176 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
178 xform_cipher = xforms;
179 xform_auth = xforms + 1;
180 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
181 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
185 xform_cipher = xforms + 1;
187 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
188 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
191 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
192 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
193 xforms->next = xforms + 1;
195 switch (key->async_alg)
197 #define _(a, b, c, d) \
198 case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\
199 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
200 auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \
204 foreach_cryptodev_link_async_alg
210 xform_cipher->cipher.algo = cipher_algo;
211 xform_cipher->cipher.key.data = key_cipher->data;
212 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
213 xform_cipher->cipher.iv.length = 16;
214 xform_cipher->cipher.iv.offset = 0;
216 xform_auth->auth.algo = auth_algo;
217 xform_auth->auth.digest_length = digest_len;
218 xform_auth->auth.key.data = key_auth->data;
219 xform_auth->auth.key.length = vec_len (key_auth->data);
225 cryptodev_session_create (vnet_crypto_key_t * const key,
226 struct rte_mempool *sess_priv_pool,
227 cryptodev_key_t * session_pair, u32 aad_len)
229 struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
230 struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
231 cryptodev_main_t *cmt = &cryptodev_main;
232 cryptodev_inst_t *dev_inst;
233 struct rte_cryptodev *cdev;
237 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
238 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
240 ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key,
245 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
246 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
248 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
250 vec_foreach (dev_inst, cmt->cryptodev_inst)
252 dev_id = dev_inst->dev_id;
253 cdev = rte_cryptodev_pmd_get_dev (dev_id);
255 /* if the session is already configured for the driver type, avoid
256 configuring it again to increase the session data's refcnt */
257 if (session_pair->keys[0].crypto_sess->sess_data[cdev->driver_id].data &&
258 session_pair->keys[1].crypto_sess->sess_data[cdev->driver_id].data)
261 ret = rte_cryptodev_sym_session_init (dev_id,
262 session_pair->keys[0].crypto_sess,
263 xforms_enc, sess_priv_pool);
264 ret = rte_cryptodev_sym_session_init (dev_id,
265 session_pair->keys[1].crypto_sess,
266 xforms_dec, sess_priv_pool);
270 session_pair->keys[0].crypto_sess->opaque_data = aad_len;
271 session_pair->keys[1].crypto_sess->opaque_data = aad_len;
277 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
284 n_devs = rte_cryptodev_count ();
286 for (i = 0; i < n_devs; i++)
287 rte_cryptodev_sym_session_clear (i, sess);
289 rte_cryptodev_sym_session_free (sess);
293 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key)
295 vnet_crypto_alg_t alg;
296 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
301 #define _(a, b, c, d, e, f) \
302 if (alg == VNET_CRYPTO_ALG_##a) \
305 foreach_vnet_aead_crypto_conversion
310 static_always_inline void
311 cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
312 vnet_crypto_key_index_t idx, u32 aad_len)
314 cryptodev_main_t *cmt = &cryptodev_main;
315 cryptodev_numa_data_t *numa_data;
316 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
317 struct rte_mempool *sess_pool, *sess_priv_pool;
318 cryptodev_key_t *ckey = 0;
321 if (kop == VNET_CRYPTO_KEY_OP_DEL)
323 if (idx >= vec_len (cmt->keys))
326 ckey = pool_elt_at_index (cmt->keys, idx);
327 cryptodev_session_del (ckey->keys[0].crypto_sess);
328 cryptodev_session_del (ckey->keys[1].crypto_sess);
329 ckey->keys[0].crypto_sess = 0;
330 ckey->keys[1].crypto_sess = 0;
331 pool_put (cmt->keys, ckey);
334 else if (kop == VNET_CRYPTO_KEY_OP_MODIFY)
336 if (idx >= vec_len (cmt->keys))
339 ckey = pool_elt_at_index (cmt->keys, idx);
341 cryptodev_session_del (ckey->keys[0].crypto_sess);
342 cryptodev_session_del (ckey->keys[1].crypto_sess);
343 ckey->keys[0].crypto_sess = 0;
344 ckey->keys[1].crypto_sess = 0;
346 else /* create key */
347 pool_get_zero (cmt->keys, ckey);
349 /* do not create session for unsupported alg */
350 if (cryptodev_check_supported_vnet_alg (key))
353 numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
354 sess_pool = numa_data->sess_pool;
355 sess_priv_pool = numa_data->sess_priv_pool;
357 ckey->keys[0].crypto_sess = rte_cryptodev_sym_session_create (sess_pool);
358 if (!ckey->keys[0].crypto_sess)
364 ckey->keys[1].crypto_sess = rte_cryptodev_sym_session_create (sess_pool);
365 if (!ckey->keys[1].crypto_sess)
371 ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len);
376 cryptodev_session_del (ckey->keys[0].crypto_sess);
377 cryptodev_session_del (ckey->keys[1].crypto_sess);
378 memset (ckey, 0, sizeof (*ckey));
379 pool_put (cmt->keys, ckey);
384 cryptodev_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
385 vnet_crypto_key_index_t idx)
387 cryptodev_sess_handler (vm, kop, idx, 8);
390 static_always_inline void
391 cryptodev_mark_frame_err_status (vnet_crypto_async_frame_t * f,
392 vnet_crypto_op_status_t s)
394 u32 n_elts = f->n_elts, i;
396 for (i = 0; i < n_elts; i++)
397 f->elts[i].status = s;
398 f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
401 static_always_inline int
402 cryptodev_frame_build_sgl (vlib_main_t * vm, enum rte_iova_mode iova_mode,
403 struct rte_crypto_vec *data_vec,
404 u16 * n_seg, vlib_buffer_t * b, u32 size)
406 struct rte_crypto_vec *vec = data_vec + 1;
407 if (vlib_buffer_chain_linearize (vm, b) > CRYPTODEV_MAX_N_SGL)
410 while ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && size)
413 b = vlib_get_buffer (vm, b->next_buffer);
414 len = clib_min (b->current_length, size);
415 vec->base = (void *) vlib_buffer_get_current (b);
416 if (iova_mode == RTE_IOVA_VA)
417 vec->iova = pointer_to_uword (vec->base);
419 vec->iova = vlib_buffer_get_current_pa (vm, b);
432 static_always_inline u64
433 compute_ofs_linked_alg (vnet_crypto_async_frame_elt_t * fe, i16 * min_ofs,
436 union rte_crypto_sym_ofs ofs;
437 u32 crypto_end = fe->crypto_start_offset + fe->crypto_total_length;
438 u32 integ_end = fe->integ_start_offset + fe->crypto_total_length +
439 fe->integ_length_adj;
441 *min_ofs = clib_min (fe->crypto_start_offset, fe->integ_start_offset);
442 *max_end = clib_max (crypto_end, integ_end);
444 ofs.ofs.cipher.head = fe->crypto_start_offset - *min_ofs;
445 ofs.ofs.cipher.tail = *max_end - crypto_end;
446 ofs.ofs.auth.head = fe->integ_start_offset - *min_ofs;
447 ofs.ofs.auth.tail = *max_end - integ_end;
452 /* Reset cryptodev dp context to previous queue pair state */
453 static_always_inline void
454 cryptodev_reset_ctx (u16 cdev_id, u16 qid, struct rte_crypto_raw_dp_ctx *ctx)
456 union rte_cryptodev_session_ctx session_ctx = {.crypto_sess = NULL };
458 rte_cryptodev_configure_raw_dp_ctx (cdev_id, qid, ctx, ~0, session_ctx, 0);
461 static_always_inline int
462 cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
463 vnet_crypto_async_frame_t * frame,
464 cryptodev_op_type_t op_type)
466 cryptodev_main_t *cmt = &cryptodev_main;
467 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
468 vnet_crypto_async_frame_elt_t *fe;
469 struct rte_crypto_vec *vec;
470 struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
473 cryptodev_key_t *key;
474 u32 last_key_index = ~0;
475 union rte_crypto_sym_ofs cofs;
480 n_elts = frame->n_elts;
482 if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
484 cryptodev_mark_frame_err_status (frame,
485 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
489 vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
496 cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
504 CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
505 CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
506 vlib_prefetch_buffer_header (b[1], LOAD);
507 vlib_prefetch_buffer_header (b[2], LOAD);
510 if (PREDICT_FALSE (last_key_index != fe->key_index))
512 cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
514 key = pool_elt_at_index (cmt->keys, fe->key_index);
515 last_key_index = fe->key_index;
518 (rte_cryptodev_configure_raw_dp_ctx
519 (cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
520 RTE_CRYPTO_OP_WITH_SESSION, key->keys[op_type], 1) < 0))
522 cryptodev_mark_frame_err_status (frame,
523 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
524 cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q,
530 vec->len = max_end - min_ofs;
531 if (cmt->iova_mode == RTE_IOVA_VA)
533 vec->base = (void *) (b[0]->data + min_ofs);
534 vec->iova = pointer_to_uword (b[0]->data) + min_ofs;
535 iv_vec.va = (void *) fe->iv;
536 iv_vec.iova = pointer_to_uword (fe->iv);
537 digest_vec.va = (void *) fe->tag;
538 digest_vec.iova = pointer_to_uword (fe->tag);
542 vec->base = (void *) (b[0]->data + min_ofs);
543 vec->iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
544 iv_vec.va = (void *) fe->iv;
545 iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
546 digest_vec.va = (void *) fe->tag;
547 digest_vec.iova = vlib_physmem_get_pa (vm, fe->digest);
550 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
552 vec->len = b[0]->current_data + b[0]->current_length - min_ofs;
553 if (cryptodev_frame_build_sgl
554 (vm, cmt->iova_mode, vec, &n_seg, b[0],
555 max_end - min_ofs - vec->len) < 0)
557 cryptodev_mark_frame_err_status (frame,
558 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
559 cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q,
565 status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
566 &digest_vec, 0, (void *) frame);
569 cryptodev_mark_frame_err_status (frame,
570 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
571 cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx);
580 status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
581 if (PREDICT_FALSE (status < 0))
583 cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx);
587 cet->inflight += frame->n_elts;
591 static_always_inline int
592 cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
593 vnet_crypto_async_frame_t * frame,
594 cryptodev_op_type_t op_type, u8 aad_len)
596 cryptodev_main_t *cmt = &cryptodev_main;
597 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
598 vnet_crypto_async_frame_elt_t *fe;
601 cryptodev_key_t *key;
602 u32 last_key_index = ~0;
603 union rte_crypto_sym_ofs cofs;
604 struct rte_crypto_vec *vec;
605 struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
610 n_elts = frame->n_elts;
612 if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
614 cryptodev_mark_frame_err_status (frame,
615 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
619 vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
628 u32 aad_offset = ((cet->aad_index++) & CRYPTODEV_AAD_MASK) << 4;
633 CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
634 vlib_prefetch_buffer_header (b[1], LOAD);
637 if (last_key_index != fe->key_index)
639 key = pool_elt_at_index (cmt->keys, fe->key_index);
640 sess_aad_len = (u8) key->keys[op_type].crypto_sess->opaque_data;
641 if (PREDICT_FALSE (sess_aad_len != aad_len))
643 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY,
644 fe->key_index, aad_len);
646 last_key_index = fe->key_index;
649 (rte_cryptodev_configure_raw_dp_ctx
650 (cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
651 RTE_CRYPTO_OP_WITH_SESSION, key->keys[op_type], 1) < 0))
653 cryptodev_mark_frame_err_status (frame,
654 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
655 cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q,
661 if (cmt->iova_mode == RTE_IOVA_VA)
663 vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
664 vec[0].iova = pointer_to_uword (vec[0].base);
665 vec[0].len = fe->crypto_total_length;
666 iv_vec.va = (void *) fe->iv;
667 iv_vec.iova = pointer_to_uword (fe->iv);
668 digest_vec.va = (void *) fe->tag;
669 digest_vec.iova = pointer_to_uword (fe->tag);
670 aad_vec.va = (void *) (cet->aad_buf + aad_offset);
671 aad_vec.iova = cet->aad_phy_addr + aad_offset;
675 vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
677 vlib_buffer_get_pa (vm, b[0]) + fe->crypto_start_offset;
678 vec[0].len = fe->crypto_total_length;
679 iv_vec.va = (void *) fe->iv;
680 iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
681 aad_vec.va = (void *) (cet->aad_buf + aad_offset);
682 aad_vec.iova = cet->aad_phy_addr + aad_offset;
683 digest_vec.va = (void *) fe->tag;
684 digest_vec.iova = vlib_physmem_get_pa (vm, fe->tag);
688 *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
692 *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
693 *(u32 *) (cet->aad_buf + aad_offset + 8) = *(u32 *) (fe->aad + 8);
696 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
698 vec[0].len = b[0]->current_data +
699 b[0]->current_length - fe->crypto_start_offset;
700 if (cryptodev_frame_build_sgl
701 (vm, cmt->iova_mode, vec, &n_seg, b[0],
702 fe->crypto_total_length - vec[0].len) < 0)
704 cryptodev_mark_frame_err_status (frame,
705 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
706 cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q,
713 rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs,
714 &iv_vec, &digest_vec, &aad_vec,
716 if (PREDICT_FALSE (status < 0))
718 cryptodev_mark_frame_err_status (frame,
719 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
720 cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx);
728 status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
729 if (PREDICT_FALSE (status < 0))
731 cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx);
735 cet->inflight += frame->n_elts;
741 cryptodev_get_frame_n_elts (void *frame)
743 vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
748 cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success)
750 vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
752 f->elts[index].status = is_op_success ? VNET_CRYPTO_OP_STATUS_COMPLETED :
753 VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
756 #define GET_RING_OBJ(r, pos, f) do { \
757 vnet_crypto_async_frame_t **ring = (void *)&r[1]; \
758 f = ring[(r->cons.head + pos) & r->mask]; \
761 static_always_inline vnet_crypto_async_frame_t *
762 cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
763 u32 * enqueue_thread_idx)
765 cryptodev_main_t *cmt = &cryptodev_main;
766 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
767 vnet_crypto_async_frame_t *frame, *frame_ret = 0;
768 u32 n_deq, n_success;
769 u32 n_cached_frame = rte_ring_count (cet->cached_frame), n_room_left;
770 u8 no_job_to_deq = 0;
771 u16 inflight = cet->inflight;
774 n_room_left = CRYPTODEV_DEQ_CACHE_SZ - n_cached_frame - 1;
779 for (i = 0; i < n_cached_frame; i++)
781 vnet_crypto_async_frame_t *f;
783 enum rte_crypto_op_status op_status;
786 GET_RING_OBJ (cet->cached_frame, i, f);
788 if (i < n_cached_frame - 2)
790 vnet_crypto_async_frame_t *f1, *f2;
791 GET_RING_OBJ (cet->cached_frame, i + 1, f1);
792 GET_RING_OBJ (cet->cached_frame, i + 2, f2);
793 CLIB_PREFETCH (f1, CLIB_CACHE_LINE_BYTES, LOAD);
794 CLIB_PREFETCH (f2, CLIB_CACHE_LINE_BYTES, LOAD);
797 n_left = f->state & 0x7f;
798 err = f->state & 0x80;
800 for (j = f->n_elts - n_left; j < f->n_elts && inflight; j++)
803 f_ret = rte_cryptodev_raw_dequeue (cet->ctx, &ret, &op_status);
810 case RTE_CRYPTO_OP_STATUS_SUCCESS:
811 f->elts[j].status = VNET_CRYPTO_OP_STATUS_COMPLETED;
814 f->elts[j].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
826 f->state = err ? VNET_CRYPTO_FRAME_STATE_ELT_ERROR :
827 VNET_CRYPTO_FRAME_STATE_SUCCESS;
831 f->state = f->n_elts - j;
838 /* to here f is not completed dequeued and no more job can be
841 f->state = f->n_elts - j;
849 rte_ring_sc_dequeue (cet->cached_frame, (void **) &frame_ret);
854 /* no point to dequeue further */
855 if (!inflight || no_job_to_deq || !n_room_left)
858 n_deq = rte_cryptodev_raw_dequeue_burst (cet->ctx,
859 cryptodev_get_frame_n_elts,
860 cryptodev_post_dequeue,
861 (void **) &frame, 0, &n_success,
867 no_job_to_deq = n_deq < frame->n_elts;
868 /* we have to cache the frame */
869 if (frame_ret || n_cached_frame || no_job_to_deq)
871 frame->state = frame->n_elts - n_deq;
872 frame->state |= ((n_success < n_deq) << 7);
873 rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
878 frame->state = n_success == frame->n_elts ?
879 VNET_CRYPTO_FRAME_STATE_SUCCESS : VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
883 /* see if we can dequeue more */
884 while (inflight && n_room_left && !no_job_to_deq)
886 n_deq = rte_cryptodev_raw_dequeue_burst (cet->ctx,
887 cryptodev_get_frame_n_elts,
888 cryptodev_post_dequeue,
890 &n_success, &dequeue_status);
894 no_job_to_deq = n_deq < frame->n_elts;
895 frame->state = frame->n_elts - n_deq;
896 frame->state |= ((n_success < n_deq) << 7);
897 rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
902 if (inflight < cet->inflight)
905 rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
907 cet->inflight = inflight;
912 *nb_elts_processed = frame_ret->n_elts;
913 *enqueue_thread_idx = frame_ret->enqueue_thread_index;
920 static_always_inline int
921 cryptodev_enqueue_gcm_aad_8_enc (vlib_main_t * vm,
922 vnet_crypto_async_frame_t * frame)
924 return cryptodev_frame_gcm_enqueue (vm, frame,
925 CRYPTODEV_OP_TYPE_ENCRYPT, 8);
927 static_always_inline int
928 cryptodev_enqueue_gcm_aad_12_enc (vlib_main_t * vm,
929 vnet_crypto_async_frame_t * frame)
931 return cryptodev_frame_gcm_enqueue (vm, frame,
932 CRYPTODEV_OP_TYPE_ENCRYPT, 12);
935 static_always_inline int
936 cryptodev_enqueue_gcm_aad_8_dec (vlib_main_t * vm,
937 vnet_crypto_async_frame_t * frame)
939 return cryptodev_frame_gcm_enqueue (vm, frame,
940 CRYPTODEV_OP_TYPE_DECRYPT, 8);
942 static_always_inline int
943 cryptodev_enqueue_gcm_aad_12_dec (vlib_main_t * vm,
944 vnet_crypto_async_frame_t * frame)
946 return cryptodev_frame_gcm_enqueue (vm, frame,
947 CRYPTODEV_OP_TYPE_DECRYPT, 12);
950 static_always_inline int
951 cryptodev_enqueue_linked_alg_enc (vlib_main_t * vm,
952 vnet_crypto_async_frame_t * frame)
954 return cryptodev_frame_linked_algs_enqueue (vm, frame,
955 CRYPTODEV_OP_TYPE_ENCRYPT);
958 static_always_inline int
959 cryptodev_enqueue_linked_alg_dec (vlib_main_t * vm,
960 vnet_crypto_async_frame_t * frame)
962 return cryptodev_frame_linked_algs_enqueue (vm, frame,
963 CRYPTODEV_OP_TYPE_DECRYPT);
968 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
969 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
970 } cryptodev_resource_assign_op_t;
973 * assign a cryptodev resource to a worker.
974 * @param cet: the worker thread data
975 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
976 * @param op: the assignment method.
977 * @return: 0 if successfully, negative number otherwise.
979 static_always_inline int
980 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
981 u32 cryptodev_inst_index,
982 cryptodev_resource_assign_op_t op)
984 cryptodev_main_t *cmt = &cryptodev_main;
985 cryptodev_inst_t *cinst = 0;
988 /* assign resource is only allowed when no inflight op is in the queue */
994 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
995 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
996 vec_len (cmt->cryptodev_inst))
999 clib_spinlock_lock (&cmt->tlock);
1000 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
1001 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
1002 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
1003 cet->cryptodev_id = cinst->dev_id;
1004 cet->cryptodev_q = cinst->q_id;
1005 cet->ctx = cinst->raw_dp_ctx_buffer;
1006 clib_spinlock_unlock (&cmt->tlock);
1008 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
1009 /* assigning a used cryptodev resource is not allowed */
1010 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
1013 vec_foreach_index (idx, cmt->cryptodev_inst)
1015 cinst = cmt->cryptodev_inst + idx;
1016 if (cinst->dev_id == cet->cryptodev_id &&
1017 cinst->q_id == cet->cryptodev_q)
1020 /* invalid existing worker resource assignment */
1021 if (idx == vec_len (cmt->cryptodev_inst))
1023 clib_spinlock_lock (&cmt->tlock);
1024 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
1025 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
1026 cryptodev_inst_index, 1);
1027 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
1028 cet->cryptodev_id = cinst->dev_id;
1029 cet->cryptodev_q = cinst->q_id;
1030 cet->ctx = cinst->raw_dp_ctx_buffer;
1031 clib_spinlock_unlock (&cmt->tlock);
1040 format_cryptodev_inst (u8 * s, va_list * args)
1042 cryptodev_main_t *cmt = &cryptodev_main;
1043 u32 inst = va_arg (*args, u32);
1044 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
1045 u32 thread_index = 0;
1046 struct rte_cryptodev_info info;
1048 rte_cryptodev_info_get (cit->dev_id, &info);
1049 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
1051 vec_foreach_index (thread_index, cmt->per_thread_data)
1053 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
1054 if (vlib_num_workers () > 0 && thread_index == 0)
1057 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
1059 s = format (s, "%u (%v)\n", thread_index,
1060 vlib_worker_threads[thread_index].name);
1065 if (thread_index == vec_len (cmt->per_thread_data))
1066 s = format (s, "%s\n", "free");
1071 static clib_error_t *
1072 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
1073 vlib_cli_command_t * cmd)
1075 cryptodev_main_t *cmt = &cryptodev_main;
1078 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
1080 if (vec_len (cmt->cryptodev_inst) == 0)
1082 vlib_cli_output (vm, "(nil)\n");
1086 vec_foreach_index (inst, cmt->cryptodev_inst)
1087 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
1092 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
1093 .path = "show cryptodev assignment",
1094 .short_help = "show cryptodev assignment",
1095 .function = cryptodev_show_assignment_fn,
1098 static clib_error_t *
1099 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
1100 vlib_cli_command_t * cmd)
1102 cryptodev_main_t *cmt = &cryptodev_main;
1103 cryptodev_engine_thread_t *cet;
1104 unformat_input_t _line_input, *line_input = &_line_input;
1105 u32 thread_index, inst_index;
1106 u32 thread_present = 0, inst_present = 0;
1107 clib_error_t *error = 0;
1110 /* Get a line of input. */
1111 if (!unformat_user (input, unformat_line_input, line_input))
1114 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1116 if (unformat (line_input, "thread %u", &thread_index))
1118 else if (unformat (line_input, "resource %u", &inst_index))
1122 error = clib_error_return (0, "unknown input `%U'",
1123 format_unformat_error, line_input);
1128 if (!thread_present || !inst_present)
1130 error = clib_error_return (0, "mandatory argument(s) missing");
1134 if (thread_index == 0 && vlib_num_workers () > 0)
1137 clib_error_return (0, "assign crypto resource for master thread");
1141 if (thread_index > vec_len (cmt->per_thread_data) ||
1142 inst_index > vec_len (cmt->cryptodev_inst))
1144 error = clib_error_return (0, "wrong thread id or resource id");
1148 cet = cmt->per_thread_data + thread_index;
1149 ret = cryptodev_assign_resource (cet, inst_index,
1150 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
1153 error = clib_error_return (0, "cryptodev_assign_resource returned %i",
1161 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
1162 .path = "set cryptodev assignment",
1163 .short_help = "set cryptodev assignment thread <thread_index> "
1164 "resource <inst_index>",
1165 .function = cryptodev_set_assignment_fn,
1169 check_cryptodev_alg_support (u32 dev_id)
1171 const struct rte_cryptodev_symmetric_capability *cap;
1172 struct rte_cryptodev_sym_capability_idx cap_idx;
1174 #define _(a, b, c, d, e, f) \
1175 cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \
1176 cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
1177 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1179 return -RTE_CRYPTO_##b##_##c; \
1182 if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \
1183 return -RTE_CRYPTO_##b##_##c; \
1184 if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \
1185 return -RTE_CRYPTO_##b##_##c; \
1186 if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \
1187 return -RTE_CRYPTO_##b##_##c; \
1190 foreach_vnet_aead_crypto_conversion
1193 #define _(a, b, c, d) \
1194 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
1195 cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
1196 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1198 return -RTE_CRYPTO_CIPHER_##b; \
1199 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
1200 cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \
1201 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1203 return -RTE_CRYPTO_AUTH_##c;
1205 foreach_cryptodev_link_async_alg
1211 cryptodev_count_queue (u32 numa)
1213 struct rte_cryptodev_info info;
1214 u32 n_cryptodev = rte_cryptodev_count ();
1217 for (i = 0; i < n_cryptodev; i++)
1219 rte_cryptodev_info_get (i, &info);
1220 if (rte_cryptodev_socket_id (i) != numa)
1222 clib_warning ("DPDK crypto resource %s is in different numa node "
1223 "as %u, ignored", info.device->name, numa);
1226 q_count += info.max_nb_queue_pairs;
1233 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
1235 struct rte_cryptodev_info info;
1236 struct rte_cryptodev *cdev;
1237 cryptodev_main_t *cmt = &cryptodev_main;
1238 cryptodev_numa_data_t *numa_data = vec_elt_at_index (cmt->per_numa_data,
1244 cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1245 rte_cryptodev_info_get (cryptodev_id, &info);
1247 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))
1250 ret = check_cryptodev_alg_support (cryptodev_id);
1256 /** If the device is already started, we reuse it, otherwise configure
1257 * both the device and queue pair.
1259 if (!cdev->data->dev_started)
1261 struct rte_cryptodev_config cfg;
1263 cfg.socket_id = vm->numa_node;
1264 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1266 rte_cryptodev_configure (cryptodev_id, &cfg);
1268 for (i = 0; i < info.max_nb_queue_pairs; i++)
1270 struct rte_cryptodev_qp_conf qp_cfg;
1272 qp_cfg.mp_session = numa_data->sess_pool;
1273 qp_cfg.mp_session_private = numa_data->sess_priv_pool;
1274 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
1276 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1281 if (i != info.max_nb_queue_pairs)
1284 /* start the device */
1285 rte_cryptodev_start (i);
1288 ret = rte_cryptodev_get_raw_dp_ctx_size (cryptodev_id);
1293 for (i = 0; i < info.max_nb_queue_pairs; i++)
1295 cryptodev_inst_t *cdev_inst;
1296 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
1297 cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
1298 cdev_inst->dev_id = cryptodev_id;
1299 cdev_inst->q_id = i;
1300 vec_validate_aligned (cdev_inst->raw_dp_ctx_buffer, dp_size, 8);
1301 cryptodev_reset_ctx (cdev_inst->dev_id, cdev_inst->q_id,
1302 cdev_inst->raw_dp_ctx_buffer);
1304 snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
1305 "%s_q%u", info.device->name, i);
1312 cryptodev_cmp (void *v1, void *v2)
1314 cryptodev_inst_t *a1 = v1;
1315 cryptodev_inst_t *a2 = v2;
1317 if (a1->q_id > a2->q_id)
1319 if (a1->q_id < a2->q_id)
1325 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1327 cryptodev_main_t *cmt = &cryptodev_main;
1328 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1332 if (n_queues < n_workers)
1335 for (i = 0; i < rte_cryptodev_count (); i++)
1337 ret = cryptodev_configure (vm, i);
1342 vec_sort_with_function(cmt->cryptodev_inst, cryptodev_cmp);
1344 /* if there is not enough device stop cryptodev */
1345 if (vec_len (cmt->cryptodev_inst) < n_workers)
1352 cryptodev_get_session_sz (vlib_main_t *vm, u32 n_workers)
1354 u32 sess_data_sz = 0, i;
1356 if (rte_cryptodev_count () == 0)
1359 for (i = 0; i < rte_cryptodev_count (); i++)
1361 u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i);
1363 sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1366 return sess_data_sz;
1370 dpdk_disable_cryptodev_engine (vlib_main_t * vm)
1372 cryptodev_main_t *cmt = &cryptodev_main;
1373 cryptodev_numa_data_t *numa_data;
1374 cryptodev_engine_thread_t *ptd;
1376 vec_validate (cmt->per_numa_data, vm->numa_node);
1377 numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
1379 if (numa_data->sess_pool)
1380 rte_mempool_free (numa_data->sess_pool);
1381 if (numa_data->sess_priv_pool)
1382 rte_mempool_free (numa_data->sess_priv_pool);
1384 vec_foreach (ptd, cmt->per_thread_data)
1387 rte_free (ptd->aad_buf);
1388 if (ptd->cached_frame)
1389 rte_ring_free (ptd->cached_frame);
1394 dpdk_cryptodev_init (vlib_main_t * vm)
1396 cryptodev_main_t *cmt = &cryptodev_main;
1397 vlib_thread_main_t *tm = vlib_get_thread_main ();
1398 cryptodev_engine_thread_t *ptd;
1399 cryptodev_numa_data_t *numa_data;
1400 struct rte_mempool *mp;
1401 u32 skip_master = vlib_num_workers () > 0;
1402 u32 n_workers = tm->n_vlib_mains - skip_master;
1403 u32 numa = vm->numa_node;
1408 clib_error_t *error;
1410 cmt->iova_mode = rte_eal_iova_mode ();
1412 sess_sz = cryptodev_get_session_sz(vm, n_workers);
1415 error = clib_error_return (0, "Not enough cryptodevs");
1419 vec_validate (cmt->per_numa_data, vm->numa_node);
1420 numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1422 /* create session pool for the numa node */
1423 name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1424 mp = rte_cryptodev_sym_session_pool_create ((char *) name,
1425 CRYPTODEV_NB_SESSION,
1429 error = clib_error_return (0, "Not enough memory for mp %s", name);
1434 numa_data->sess_pool = mp;
1436 /* create session private pool for the numa node */
1437 name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1438 mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
1439 0, NULL, NULL, NULL, NULL, numa, 0);
1442 error = clib_error_return (0, "Not enough memory for mp %s", name);
1449 numa_data->sess_priv_pool = mp;
1451 /* probe all cryptodev devices and get queue info */
1452 if (cryptodev_probe (vm, n_workers) < 0)
1454 error = clib_error_return (0, "Failed to configure cryptodev");
1458 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1459 clib_spinlock_init (&cmt->tlock);
1461 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1462 CLIB_CACHE_LINE_BYTES);
1463 for (i = skip_master; i < tm->n_vlib_mains; i++)
1465 ptd = cmt->per_thread_data + i;
1466 cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
1467 ptd->aad_buf = rte_zmalloc_socket (0, CRYPTODEV_NB_CRYPTO_OPS *
1468 CRYPTODEV_MAX_AAD_SIZE,
1469 CLIB_CACHE_LINE_BYTES,
1471 if (ptd->aad_buf == 0)
1473 error = clib_error_return (0, "Failed to alloc aad buf");
1477 ptd->aad_phy_addr = rte_malloc_virt2iova (ptd->aad_buf);
1479 name = format (0, "cache_frame_ring_%u%u", numa, i);
1480 ptd->cached_frame = rte_ring_create ((char *)name,
1481 CRYPTODEV_DEQ_CACHE_SZ, numa,
1482 RING_F_SC_DEQ | RING_F_SP_ENQ);
1484 if (ptd->cached_frame == 0)
1486 error = clib_error_return (0, "Failed to frame ring");
1492 /* register handler */
1493 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79,
1494 "DPDK Cryptodev Engine");
1496 #define _(a, b, c, d, e, f) \
1497 vnet_crypto_register_async_handler \
1498 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
1499 cryptodev_enqueue_gcm_aad_##f##_enc,\
1500 cryptodev_frame_dequeue); \
1501 vnet_crypto_register_async_handler \
1502 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
1503 cryptodev_enqueue_gcm_aad_##f##_dec, \
1504 cryptodev_frame_dequeue);
1506 foreach_vnet_aead_crypto_conversion
1509 #define _(a, b, c, d) \
1510 vnet_crypto_register_async_handler \
1511 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \
1512 cryptodev_enqueue_linked_alg_enc, \
1513 cryptodev_frame_dequeue); \
1514 vnet_crypto_register_async_handler \
1515 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \
1516 cryptodev_enqueue_linked_alg_dec, \
1517 cryptodev_frame_dequeue);
1519 foreach_cryptodev_link_async_alg
1522 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1527 dpdk_disable_cryptodev_engine (vm);
1534 * fd.io coding-style-patch-verification: ON
1537 * eval: (c-set-style "gnu")