2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
36 #define always_inline static inline
38 #define always_inline static inline __attribute__ ((__always_inline__))
41 #define CRYPTODEV_NB_CRYPTO_OPS 1024
42 #define CRYPTODEV_MAX_INFLIGHT (CRYPTODEV_NB_CRYPTO_OPS - 1)
43 #define CRYPTODEV_AAD_MASK (CRYPTODEV_NB_CRYPTO_OPS - 1)
44 #define CRYPTODEV_DEQ_CACHE_SZ 32
45 #define CRYPTODEV_NB_SESSION 10240
46 #define CRYPTODEV_MAX_AAD_SIZE 16
47 #define CRYPTODEV_MAX_N_SGL 8 /**< maximum number of segments */
49 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN, KEY_LEN
51 #define foreach_vnet_aead_crypto_conversion \
52 _ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 8, 16) \
53 _ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 12, 16) \
54 _ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 8, 24) \
55 _ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 12, 24) \
56 _ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 8, 32) \
57 _ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 12, 32)
60 * crypto (alg, cryptodev_alg, key_size), hash (alg, digest-size)
62 #define foreach_cryptodev_link_async_alg \
63 _ (AES_128_CBC, AES_CBC, 16, SHA1, 12) \
64 _ (AES_192_CBC, AES_CBC, 24, SHA1, 12) \
65 _ (AES_256_CBC, AES_CBC, 32, SHA1, 12) \
66 _ (AES_128_CBC, AES_CBC, 16, SHA224, 14) \
67 _ (AES_192_CBC, AES_CBC, 24, SHA224, 14) \
68 _ (AES_256_CBC, AES_CBC, 32, SHA224, 14) \
69 _ (AES_128_CBC, AES_CBC, 16, SHA256, 16) \
70 _ (AES_192_CBC, AES_CBC, 24, SHA256, 16) \
71 _ (AES_256_CBC, AES_CBC, 32, SHA256, 16) \
72 _ (AES_128_CBC, AES_CBC, 16, SHA384, 24) \
73 _ (AES_192_CBC, AES_CBC, 24, SHA384, 24) \
74 _ (AES_256_CBC, AES_CBC, 32, SHA384, 24) \
75 _ (AES_128_CBC, AES_CBC, 16, SHA512, 32) \
76 _ (AES_192_CBC, AES_CBC, 24, SHA512, 32) \
77 _ (AES_256_CBC, AES_CBC, 32, SHA512, 32)
81 CRYPTODEV_OP_TYPE_ENCRYPT = 0,
82 CRYPTODEV_OP_TYPE_DECRYPT,
84 } cryptodev_op_type_t;
88 union rte_cryptodev_session_ctx **keys;
91 /* Replicate DPDK rte_cryptodev_sym_capability structure with key size ranges
92 * in favor of vpp vector */
95 enum rte_crypto_sym_xform_type xform_type;
100 enum rte_crypto_auth_algorithm algo; /*auth algo */
101 u32 *digest_sizes; /* vector of auth digest sizes */
105 enum rte_crypto_cipher_algorithm algo; /* cipher algo */
106 u32 *key_sizes; /* vector of cipher key sizes */
110 enum rte_crypto_aead_algorithm algo; /* aead algo */
111 u32 *key_sizes; /*vector of aead key sizes */
112 u32 *aad_sizes; /*vector of aad sizes */
113 u32 *digest_sizes; /* vector of aead digest sizes */
116 } cryptodev_capability_t;
127 struct rte_mempool *sess_pool;
128 struct rte_mempool *sess_priv_pool;
129 } cryptodev_numa_data_t;
133 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
134 vlib_buffer_t *b[VNET_CRYPTO_FRAME_SIZE];
135 struct rte_crypto_raw_dp_ctx *ctx;
136 struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
137 struct rte_ring *cached_frame;
144 union rte_cryptodev_session_ctx reset_sess; /* session data for reset ctx */
145 } cryptodev_engine_thread_t;
149 cryptodev_numa_data_t *per_numa_data;
150 cryptodev_key_t *keys;
151 cryptodev_engine_thread_t *per_thread_data;
152 enum rte_iova_mode iova_mode;
153 cryptodev_inst_t *cryptodev_inst;
154 clib_bitmap_t *active_cdev_inst_mask;
155 clib_spinlock_t tlock;
156 cryptodev_capability_t *supported_caps;
159 cryptodev_main_t cryptodev_main;
161 static_always_inline int
162 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
163 cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
166 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
167 memset (xform, 0, sizeof (*xform));
168 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
171 if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
172 key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
173 key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
176 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
177 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
178 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
179 aead_xform->aad_length = aad_len;
180 aead_xform->digest_length = 16;
181 aead_xform->iv.offset = 0;
182 aead_xform->iv.length = 12;
183 aead_xform->key.data = key->data;
184 aead_xform->key.length = vec_len (key->data);
189 static_always_inline int
190 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
191 cryptodev_op_type_t op_type,
192 const vnet_crypto_key_t *key)
194 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
195 vnet_crypto_key_t *key_cipher, *key_auth;
196 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
197 enum rte_crypto_auth_algorithm auth_algo = ~0;
200 key_cipher = vnet_crypto_get_key (key->index_crypto);
201 key_auth = vnet_crypto_get_key (key->index_integ);
202 if (!key_cipher || !key_auth)
205 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
207 xform_cipher = xforms;
208 xform_auth = xforms + 1;
209 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
210 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
214 xform_cipher = xforms + 1;
216 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
217 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
220 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
221 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
222 xforms->next = xforms + 1;
224 switch (key->async_alg)
226 #define _(a, b, c, d, e) \
227 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
228 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
229 auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC; \
233 foreach_cryptodev_link_async_alg
239 xform_cipher->cipher.algo = cipher_algo;
240 xform_cipher->cipher.key.data = key_cipher->data;
241 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
242 xform_cipher->cipher.iv.length = 16;
243 xform_cipher->cipher.iv.offset = 0;
245 xform_auth->auth.algo = auth_algo;
246 xform_auth->auth.digest_length = digest_len;
247 xform_auth->auth.key.data = key_auth->data;
248 xform_auth->auth.key.length = vec_len (key_auth->data);
253 static_always_inline void
254 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
261 n_devs = rte_cryptodev_count ();
263 for (i = 0; i < n_devs; i++)
264 rte_cryptodev_sym_session_clear (i, sess);
266 rte_cryptodev_sym_session_free (sess);
269 static_always_inline int
270 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
273 cryptodev_main_t *cmt = &cryptodev_main;
274 cryptodev_numa_data_t *numa_data;
275 cryptodev_inst_t *dev_inst;
276 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
277 struct rte_mempool *sess_pool, *sess_priv_pool;
278 cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
279 struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
280 struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
281 struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
282 u32 numa_node = vm->numa_node;
285 numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
286 sess_pool = numa_data->sess_pool;
287 sess_priv_pool = numa_data->sess_priv_pool;
289 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
290 rte_cryptodev_sym_session_create (sess_pool);
291 if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
297 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
298 rte_cryptodev_sym_session_create (sess_pool);
299 if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
305 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
306 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
308 ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key,
313 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
314 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
316 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
318 vec_foreach (dev_inst, cmt->cryptodev_inst)
320 u32 dev_id = dev_inst->dev_id;
321 struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
323 /* if the session is already configured for the driver type, avoid
324 configuring it again to increase the session data's refcnt */
325 if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[cdev->driver_id].data &&
326 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[cdev->driver_id].data)
329 ret = rte_cryptodev_sym_session_init (
330 dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc, sess_priv_pool);
331 ret = rte_cryptodev_sym_session_init (
332 dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec, sess_priv_pool);
337 sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
338 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
340 CLIB_MEMORY_STORE_BARRIER ();
341 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess =
342 sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
343 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess =
344 sessions[CRYPTODEV_OP_TYPE_DECRYPT];
349 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
350 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
356 cryptodev_supports_param_value (u32 *params, u32 param_value)
359 vec_foreach (value, params)
361 if (*value == param_value)
368 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
369 u32 key_size, u32 digest_size, u32 aad_size)
371 cryptodev_main_t *cmt = &cryptodev_main;
372 cryptodev_capability_t *cap;
373 vec_foreach (cap, cmt->supported_caps)
376 if (cap->xform_type != idx->type)
379 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
380 cap->auth.algo == idx->algo.auth &&
381 cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
384 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
385 cap->cipher.algo == idx->algo.cipher &&
386 cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
389 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
390 cap->aead.algo == idx->algo.aead &&
391 cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
392 cryptodev_supports_param_value (cap->aead.digest_sizes,
394 cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
401 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key)
403 vnet_crypto_alg_t alg;
404 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
409 #define _(a, b, c, d, e, f, g) \
410 if (alg == VNET_CRYPTO_ALG_##a) \
413 foreach_vnet_aead_crypto_conversion
418 static_always_inline void
419 cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
420 vnet_crypto_key_index_t idx, u32 aad_len)
422 cryptodev_main_t *cmt = &cryptodev_main;
423 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
424 cryptodev_key_t *ckey = 0;
427 vec_validate (cmt->keys, idx);
428 ckey = vec_elt_at_index (cmt->keys, idx);
430 if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
432 if (idx >= vec_len (cmt->keys))
435 vec_foreach_index (i, cmt->per_numa_data)
437 if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess)
439 cryptodev_session_del (
440 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess);
441 cryptodev_session_del (
442 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess);
444 CLIB_MEMORY_STORE_BARRIER ();
445 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess = 0;
446 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess = 0;
454 /* do not create session for unsupported alg */
455 if (cryptodev_check_supported_vnet_alg (key))
458 vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
459 vec_foreach_index (i, ckey->keys)
460 vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
464 cryptodev_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
465 vnet_crypto_key_index_t idx)
467 cryptodev_sess_handler (vm, kop, idx, 8);
470 static_always_inline void
471 cryptodev_mark_frame_err_status (vnet_crypto_async_frame_t * f,
472 vnet_crypto_op_status_t s)
474 u32 n_elts = f->n_elts, i;
476 for (i = 0; i < n_elts; i++)
477 f->elts[i].status = s;
478 f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
481 static_always_inline int
482 cryptodev_frame_build_sgl (vlib_main_t * vm, enum rte_iova_mode iova_mode,
483 struct rte_crypto_vec *data_vec,
484 u16 * n_seg, vlib_buffer_t * b, u32 size)
486 struct rte_crypto_vec *vec = data_vec + 1;
487 if (vlib_buffer_chain_linearize (vm, b) > CRYPTODEV_MAX_N_SGL)
490 while ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && size)
493 b = vlib_get_buffer (vm, b->next_buffer);
494 len = clib_min (b->current_length, size);
495 vec->base = (void *) vlib_buffer_get_current (b);
496 if (iova_mode == RTE_IOVA_VA)
497 vec->iova = pointer_to_uword (vec->base);
499 vec->iova = vlib_buffer_get_current_pa (vm, b);
512 static_always_inline u64
513 compute_ofs_linked_alg (vnet_crypto_async_frame_elt_t * fe, i16 * min_ofs,
516 union rte_crypto_sym_ofs ofs;
517 u32 crypto_end = fe->crypto_start_offset + fe->crypto_total_length;
518 u32 integ_end = fe->integ_start_offset + fe->crypto_total_length +
519 fe->integ_length_adj;
521 *min_ofs = clib_min (fe->crypto_start_offset, fe->integ_start_offset);
522 *max_end = clib_max (crypto_end, integ_end);
524 ofs.ofs.cipher.head = fe->crypto_start_offset - *min_ofs;
525 ofs.ofs.cipher.tail = *max_end - crypto_end;
526 ofs.ofs.auth.head = fe->integ_start_offset - *min_ofs;
527 ofs.ofs.auth.tail = *max_end - integ_end;
532 static_always_inline void
533 cryptodev_reset_ctx (cryptodev_engine_thread_t *cet)
535 rte_cryptodev_configure_raw_dp_ctx (cet->cryptodev_id, cet->cryptodev_q,
536 cet->ctx, RTE_CRYPTO_OP_WITH_SESSION,
540 static_always_inline int
541 cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
542 vnet_crypto_async_frame_t * frame,
543 cryptodev_op_type_t op_type)
545 cryptodev_main_t *cmt = &cryptodev_main;
546 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
547 vnet_crypto_async_frame_elt_t *fe;
548 struct rte_crypto_vec *vec;
549 struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
552 u32 last_key_index = ~0;
557 n_elts = frame->n_elts;
559 if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
561 cryptodev_mark_frame_err_status (frame,
562 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
566 vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
574 union rte_crypto_sym_ofs cofs;
579 CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
580 CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
581 vlib_prefetch_buffer_header (b[1], LOAD);
582 vlib_prefetch_buffer_header (b[2], LOAD);
585 if (PREDICT_FALSE (last_key_index != fe->key_index))
587 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
589 if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess ==
592 status = cryptodev_session_create (vm, fe->key_index, 0);
593 if (PREDICT_FALSE (status < 0))
597 status = rte_cryptodev_configure_raw_dp_ctx (
598 cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
599 RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type],
601 if (PREDICT_FALSE (status < 0))
604 last_key_index = fe->key_index;
607 cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
609 vec->len = max_end - min_ofs;
610 if (cmt->iova_mode == RTE_IOVA_VA)
612 vec[0].base = (void *) (b[0]->data + min_ofs);
613 vec[0].iova = pointer_to_uword (b[0]->data) + min_ofs;
614 iv_vec.va = (void *) fe->iv;
615 iv_vec.iova = pointer_to_uword (fe->iv);
616 digest_vec.va = (void *) fe->tag;
617 digest_vec.iova = pointer_to_uword (fe->tag);
621 vec[0].base = (void *) (b[0]->data + min_ofs);
622 vec[0].iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
623 iv_vec.va = (void *) fe->iv;
624 iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
625 digest_vec.va = (void *) fe->tag;
626 digest_vec.iova = vlib_physmem_get_pa (vm, fe->digest);
629 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
631 vec[0].len = b[0]->current_data + b[0]->current_length - min_ofs;
632 if (cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
633 max_end - min_ofs - vec->len) < 0)
637 status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
638 &digest_vec, 0, (void *) frame);
639 if (PREDICT_FALSE (status < 0))
647 status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
648 if (PREDICT_FALSE (status < 0))
650 cryptodev_reset_ctx (cet);
654 cet->inflight += frame->n_elts;
658 cryptodev_mark_frame_err_status (frame,
659 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
660 cryptodev_reset_ctx (cet);
664 static_always_inline int
665 cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
666 vnet_crypto_async_frame_t * frame,
667 cryptodev_op_type_t op_type, u8 aad_len)
669 cryptodev_main_t *cmt = &cryptodev_main;
670 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
671 vnet_crypto_async_frame_elt_t *fe;
674 union rte_crypto_sym_ofs cofs;
675 struct rte_crypto_vec *vec;
676 struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
677 u32 last_key_index = ~0;
680 n_elts = frame->n_elts;
682 if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
684 cryptodev_mark_frame_err_status (frame,
685 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
689 vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
698 u32 aad_offset = ((cet->aad_index++) & CRYPTODEV_AAD_MASK) << 4;
703 CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
704 vlib_prefetch_buffer_header (b[1], LOAD);
707 if (PREDICT_FALSE (last_key_index != fe->key_index))
709 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
711 if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess ==
714 status = cryptodev_session_create (vm, fe->key_index, aad_len);
715 if (PREDICT_FALSE (status < 0))
719 if (PREDICT_FALSE ((u8) key->keys[vm->numa_node][op_type]
720 .crypto_sess->opaque_data != aad_len))
722 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
723 fe->key_index, aad_len);
724 status = cryptodev_session_create (vm, fe->key_index, aad_len);
725 if (PREDICT_FALSE (status < 0))
729 status = rte_cryptodev_configure_raw_dp_ctx (
730 cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
731 RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type],
733 if (PREDICT_FALSE (status < 0))
736 last_key_index = fe->key_index;
739 if (cmt->iova_mode == RTE_IOVA_VA)
741 vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
742 vec[0].iova = pointer_to_uword (vec[0].base);
743 vec[0].len = fe->crypto_total_length;
744 iv_vec.va = (void *) fe->iv;
745 iv_vec.iova = pointer_to_uword (fe->iv);
746 digest_vec.va = (void *) fe->tag;
747 digest_vec.iova = pointer_to_uword (fe->tag);
748 aad_vec.va = (void *) (cet->aad_buf + aad_offset);
749 aad_vec.iova = cet->aad_phy_addr + aad_offset;
753 vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
755 vlib_buffer_get_pa (vm, b[0]) + fe->crypto_start_offset;
756 vec[0].len = fe->crypto_total_length;
757 iv_vec.va = (void *) fe->iv;
758 iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
759 aad_vec.va = (void *) (cet->aad_buf + aad_offset);
760 aad_vec.iova = cet->aad_phy_addr + aad_offset;
761 digest_vec.va = (void *) fe->tag;
762 digest_vec.iova = vlib_physmem_get_pa (vm, fe->tag);
766 *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
770 *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
771 *(u32 *) (cet->aad_buf + aad_offset + 8) = *(u32 *) (fe->aad + 8);
774 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
776 vec[0].len = b[0]->current_data + b[0]->current_length -
777 fe->crypto_start_offset;
779 cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
780 fe->crypto_total_length - vec[0].len);
786 rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
787 &digest_vec, &aad_vec, (void *) frame);
788 if (PREDICT_FALSE (status < 0))
796 status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
797 if (PREDICT_FALSE (status < 0))
800 cet->inflight += frame->n_elts;
805 cryptodev_mark_frame_err_status (frame,
806 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
807 cryptodev_reset_ctx (cet);
812 cryptodev_get_frame_n_elts (void *frame)
814 vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
819 cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success)
821 vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
823 f->elts[index].status = is_op_success ? VNET_CRYPTO_OP_STATUS_COMPLETED :
824 VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
827 #define GET_RING_OBJ(r, pos, f) do { \
828 vnet_crypto_async_frame_t **ring = (void *)&r[1]; \
829 f = ring[(r->cons.head + pos) & r->mask]; \
832 static_always_inline vnet_crypto_async_frame_t *
833 cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
834 u32 * enqueue_thread_idx)
836 cryptodev_main_t *cmt = &cryptodev_main;
837 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
838 vnet_crypto_async_frame_t *frame, *frame_ret = 0;
839 u32 n_deq, n_success;
840 u32 n_cached_frame = rte_ring_count (cet->cached_frame), n_room_left;
841 u8 no_job_to_deq = 0;
842 u16 inflight = cet->inflight;
845 n_room_left = CRYPTODEV_DEQ_CACHE_SZ - n_cached_frame - 1;
850 for (i = 0; i < n_cached_frame; i++)
852 vnet_crypto_async_frame_t *f;
854 enum rte_crypto_op_status op_status;
857 GET_RING_OBJ (cet->cached_frame, i, f);
859 if (i < n_cached_frame - 2)
861 vnet_crypto_async_frame_t *f1, *f2;
862 GET_RING_OBJ (cet->cached_frame, i + 1, f1);
863 GET_RING_OBJ (cet->cached_frame, i + 2, f2);
864 CLIB_PREFETCH (f1, CLIB_CACHE_LINE_BYTES, LOAD);
865 CLIB_PREFETCH (f2, CLIB_CACHE_LINE_BYTES, LOAD);
868 n_left = f->state & 0x7f;
869 err = f->state & 0x80;
871 for (j = f->n_elts - n_left; j < f->n_elts && inflight; j++)
874 f_ret = rte_cryptodev_raw_dequeue (cet->ctx, &ret, &op_status);
881 case RTE_CRYPTO_OP_STATUS_SUCCESS:
882 f->elts[j].status = VNET_CRYPTO_OP_STATUS_COMPLETED;
885 f->elts[j].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
897 f->state = err ? VNET_CRYPTO_FRAME_STATE_ELT_ERROR :
898 VNET_CRYPTO_FRAME_STATE_SUCCESS;
902 f->state = f->n_elts - j;
909 /* to here f is not completed dequeued and no more job can be
912 f->state = f->n_elts - j;
920 rte_ring_sc_dequeue (cet->cached_frame, (void **) &frame_ret);
925 /* no point to dequeue further */
926 if (!inflight || no_job_to_deq || !n_room_left)
929 n_deq = rte_cryptodev_raw_dequeue_burst (cet->ctx,
930 cryptodev_get_frame_n_elts,
931 cryptodev_post_dequeue,
932 (void **) &frame, 0, &n_success,
938 no_job_to_deq = n_deq < frame->n_elts;
939 /* we have to cache the frame */
940 if (frame_ret || n_cached_frame || no_job_to_deq)
942 frame->state = frame->n_elts - n_deq;
943 frame->state |= ((n_success < n_deq) << 7);
944 rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
949 frame->state = n_success == frame->n_elts ?
950 VNET_CRYPTO_FRAME_STATE_SUCCESS : VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
954 /* see if we can dequeue more */
955 while (inflight && n_room_left && !no_job_to_deq)
957 n_deq = rte_cryptodev_raw_dequeue_burst (cet->ctx,
958 cryptodev_get_frame_n_elts,
959 cryptodev_post_dequeue,
961 &n_success, &dequeue_status);
965 no_job_to_deq = n_deq < frame->n_elts;
966 frame->state = frame->n_elts - n_deq;
967 frame->state |= ((n_success < n_deq) << 7);
968 rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
973 if (inflight < cet->inflight)
976 rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
978 cet->inflight = inflight;
983 *nb_elts_processed = frame_ret->n_elts;
984 *enqueue_thread_idx = frame_ret->enqueue_thread_index;
991 static_always_inline int
992 cryptodev_enqueue_gcm_aad_8_enc (vlib_main_t * vm,
993 vnet_crypto_async_frame_t * frame)
995 return cryptodev_frame_gcm_enqueue (vm, frame,
996 CRYPTODEV_OP_TYPE_ENCRYPT, 8);
998 static_always_inline int
999 cryptodev_enqueue_gcm_aad_12_enc (vlib_main_t * vm,
1000 vnet_crypto_async_frame_t * frame)
1002 return cryptodev_frame_gcm_enqueue (vm, frame,
1003 CRYPTODEV_OP_TYPE_ENCRYPT, 12);
1006 static_always_inline int
1007 cryptodev_enqueue_gcm_aad_8_dec (vlib_main_t * vm,
1008 vnet_crypto_async_frame_t * frame)
1010 return cryptodev_frame_gcm_enqueue (vm, frame,
1011 CRYPTODEV_OP_TYPE_DECRYPT, 8);
1013 static_always_inline int
1014 cryptodev_enqueue_gcm_aad_12_dec (vlib_main_t * vm,
1015 vnet_crypto_async_frame_t * frame)
1017 return cryptodev_frame_gcm_enqueue (vm, frame,
1018 CRYPTODEV_OP_TYPE_DECRYPT, 12);
1021 static_always_inline int
1022 cryptodev_enqueue_linked_alg_enc (vlib_main_t * vm,
1023 vnet_crypto_async_frame_t * frame)
1025 return cryptodev_frame_linked_algs_enqueue (vm, frame,
1026 CRYPTODEV_OP_TYPE_ENCRYPT);
1029 static_always_inline int
1030 cryptodev_enqueue_linked_alg_dec (vlib_main_t * vm,
1031 vnet_crypto_async_frame_t * frame)
1033 return cryptodev_frame_linked_algs_enqueue (vm, frame,
1034 CRYPTODEV_OP_TYPE_DECRYPT);
1039 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
1040 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
1041 } cryptodev_resource_assign_op_t;
1044 * assign a cryptodev resource to a worker.
1045 * @param cet: the worker thread data
1046 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
1047 * @param op: the assignment method.
1048 * @return: 0 if successfully, negative number otherwise.
1050 static_always_inline int
1051 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
1052 u32 cryptodev_inst_index,
1053 cryptodev_resource_assign_op_t op)
1055 cryptodev_main_t *cmt = &cryptodev_main;
1056 cryptodev_inst_t *cinst = 0;
1059 /* assign resource is only allowed when no inflight op is in the queue */
1065 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
1066 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
1067 vec_len (cmt->cryptodev_inst))
1070 clib_spinlock_lock (&cmt->tlock);
1071 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
1072 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
1073 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
1074 cet->cryptodev_id = cinst->dev_id;
1075 cet->cryptodev_q = cinst->q_id;
1076 cryptodev_reset_ctx (cet);
1077 clib_spinlock_unlock (&cmt->tlock);
1079 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
1080 /* assigning a used cryptodev resource is not allowed */
1081 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
1084 vec_foreach_index (idx, cmt->cryptodev_inst)
1086 cinst = cmt->cryptodev_inst + idx;
1087 if (cinst->dev_id == cet->cryptodev_id &&
1088 cinst->q_id == cet->cryptodev_q)
1091 /* invalid existing worker resource assignment */
1092 if (idx == vec_len (cmt->cryptodev_inst))
1094 clib_spinlock_lock (&cmt->tlock);
1095 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
1096 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
1097 cryptodev_inst_index, 1);
1098 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
1099 cet->cryptodev_id = cinst->dev_id;
1100 cet->cryptodev_q = cinst->q_id;
1101 cryptodev_reset_ctx (cet);
1102 clib_spinlock_unlock (&cmt->tlock);
1111 format_cryptodev_inst (u8 * s, va_list * args)
1113 cryptodev_main_t *cmt = &cryptodev_main;
1114 u32 inst = va_arg (*args, u32);
1115 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
1116 u32 thread_index = 0;
1117 struct rte_cryptodev_info info;
1119 rte_cryptodev_info_get (cit->dev_id, &info);
1120 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
1122 vec_foreach_index (thread_index, cmt->per_thread_data)
1124 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
1125 if (vlib_num_workers () > 0 && thread_index == 0)
1128 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
1130 s = format (s, "%u (%v)\n", thread_index,
1131 vlib_worker_threads[thread_index].name);
1136 if (thread_index == vec_len (cmt->per_thread_data))
1137 s = format (s, "%s\n", "free");
1142 static clib_error_t *
1143 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
1144 vlib_cli_command_t * cmd)
1146 cryptodev_main_t *cmt = &cryptodev_main;
1149 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
1151 if (vec_len (cmt->cryptodev_inst) == 0)
1153 vlib_cli_output (vm, "(nil)\n");
1157 vec_foreach_index (inst, cmt->cryptodev_inst)
1158 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
1163 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
1164 .path = "show cryptodev assignment",
1165 .short_help = "show cryptodev assignment",
1166 .function = cryptodev_show_assignment_fn,
1169 static clib_error_t *
1170 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
1171 vlib_cli_command_t * cmd)
1173 cryptodev_main_t *cmt = &cryptodev_main;
1174 cryptodev_engine_thread_t *cet;
1175 unformat_input_t _line_input, *line_input = &_line_input;
1176 u32 thread_index, inst_index;
1177 u32 thread_present = 0, inst_present = 0;
1178 clib_error_t *error = 0;
1181 /* Get a line of input. */
1182 if (!unformat_user (input, unformat_line_input, line_input))
1185 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1187 if (unformat (line_input, "thread %u", &thread_index))
1189 else if (unformat (line_input, "resource %u", &inst_index))
1193 error = clib_error_return (0, "unknown input `%U'",
1194 format_unformat_error, line_input);
1199 if (!thread_present || !inst_present)
1201 error = clib_error_return (0, "mandatory argument(s) missing");
1205 if (thread_index == 0 && vlib_num_workers () > 0)
1208 clib_error_return (0, "assign crypto resource for master thread");
1212 if (thread_index > vec_len (cmt->per_thread_data) ||
1213 inst_index > vec_len (cmt->cryptodev_inst))
1215 error = clib_error_return (0, "wrong thread id or resource id");
1219 cet = cmt->per_thread_data + thread_index;
1220 ret = cryptodev_assign_resource (cet, inst_index,
1221 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
1224 error = clib_error_return (0, "cryptodev_assign_resource returned %i",
1232 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
1233 .path = "set cryptodev assignment",
1234 .short_help = "set cryptodev assignment thread <thread_index> "
1235 "resource <inst_index>",
1236 .function = cryptodev_set_assignment_fn,
1240 cryptodev_count_queue (u32 numa)
1242 struct rte_cryptodev_info info;
1243 u32 n_cryptodev = rte_cryptodev_count ();
1246 for (i = 0; i < n_cryptodev; i++)
1248 rte_cryptodev_info_get (i, &info);
1249 q_count += info.max_nb_queue_pairs;
1256 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
1258 struct rte_cryptodev_config cfg;
1259 struct rte_cryptodev_info info;
1260 cryptodev_main_t *cmt = &cryptodev_main;
1264 rte_cryptodev_info_get (cryptodev_id, &info);
1266 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))
1269 cfg.socket_id = info.device->numa_node;
1270 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1272 rte_cryptodev_configure (cryptodev_id, &cfg);
1274 for (i = 0; i < info.max_nb_queue_pairs; i++)
1276 struct rte_cryptodev_qp_conf qp_cfg;
1278 qp_cfg.mp_session = 0;
1279 qp_cfg.mp_session_private = 0;
1280 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
1282 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1283 info.device->numa_node);
1286 clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
1287 cryptodev_id, i, ret);
1292 if (i != info.max_nb_queue_pairs)
1295 /* start the device */
1296 rte_cryptodev_start (cryptodev_id);
1298 for (i = 0; i < info.max_nb_queue_pairs; i++)
1300 cryptodev_inst_t *cdev_inst;
1301 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
1302 cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
1303 cdev_inst->dev_id = cryptodev_id;
1304 cdev_inst->q_id = i;
1306 snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
1307 "%s_q%u", info.device->name, i);
1314 cryptodev_cmp (void *v1, void *v2)
1316 cryptodev_inst_t *a1 = v1;
1317 cryptodev_inst_t *a2 = v2;
1319 if (a1->q_id > a2->q_id)
1321 if (a1->q_id < a2->q_id)
1327 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
1328 u32 param_size_max, u32 increment)
1333 while (i < vec_len (*param_sizes))
1335 u32 found_param = 0;
1336 for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
1337 cap_param_size += increment)
1339 if ((*param_sizes)[i] == cap_param_size)
1348 /* no such param_size in cap so delete this size in temp_cap params */
1349 vec_delete (*param_sizes, 1, i);
1356 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
1358 cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
1360 switch (temp_cap.xform_type)
1362 case RTE_CRYPTO_SYM_XFORM_AUTH:
1363 vec_free (temp_cap.auth.digest_sizes);
1365 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1366 vec_free (temp_cap.cipher.key_sizes);
1368 case RTE_CRYPTO_SYM_XFORM_AEAD:
1369 vec_free (temp_cap.aead.key_sizes);
1370 vec_free (temp_cap.aead.aad_sizes);
1371 vec_free (temp_cap.aead.digest_sizes);
1376 vec_delete (*temp_caps, 1, temp_cap_id);
1380 cryptodev_remove_unsupported_param_sizes (
1381 cryptodev_capability_t *temp_cap,
1382 const struct rte_cryptodev_capabilities *dev_caps)
1385 const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
1387 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1389 if (cap->sym.xform_type == temp_cap->xform_type)
1390 switch (cap->sym.xform_type)
1392 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1393 if (cap->sym.cipher.algo == temp_cap->cipher.algo)
1395 remove_unsupported_param_size (
1396 &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
1397 cap->sym.cipher.key_size.max,
1398 cap->sym.cipher.key_size.increment);
1399 if (vec_len (temp_cap->cipher.key_sizes) > 0)
1403 case RTE_CRYPTO_SYM_XFORM_AUTH:
1404 if (cap->sym.auth.algo == temp_cap->auth.algo)
1406 remove_unsupported_param_size (
1407 &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
1408 cap->sym.auth.digest_size.max,
1409 cap->sym.auth.digest_size.increment);
1410 if (vec_len (temp_cap->auth.digest_sizes) > 0)
1414 case RTE_CRYPTO_SYM_XFORM_AEAD:
1415 if (cap->sym.aead.algo == temp_cap->aead.algo)
1417 remove_unsupported_param_size (
1418 &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
1419 cap->sym.aead.key_size.max,
1420 cap->sym.aead.key_size.increment);
1421 remove_unsupported_param_size (
1422 &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
1423 cap->sym.aead.aad_size.max,
1424 cap->sym.aead.aad_size.increment);
1425 remove_unsupported_param_size (
1426 &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
1427 cap->sym.aead.digest_size.max,
1428 cap->sym.aead.digest_size.increment);
1429 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1430 vec_len (temp_cap->aead.aad_sizes) > 0 &&
1431 vec_len (temp_cap->aead.digest_sizes) > 0)
1447 cryptodev_get_common_capabilities ()
1449 cryptodev_main_t *cmt = &cryptodev_main;
1450 cryptodev_inst_t *dev_inst;
1451 struct rte_cryptodev_info dev_info;
1452 u32 previous_dev_id, dev_id;
1455 cryptodev_capability_t tmp_cap;
1456 const struct rte_cryptodev_capabilities *cap;
1457 const struct rte_cryptodev_capabilities *dev_caps;
1459 if (vec_len (cmt->cryptodev_inst) == 0)
1461 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1462 rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1463 cap = &dev_info.capabilities[0];
1465 /*init capabilities vector*/
1466 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1468 ASSERT (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC);
1469 tmp_cap.xform_type = cap->sym.xform_type;
1470 switch (cap->sym.xform_type)
1472 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1473 tmp_cap.cipher.key_sizes = 0;
1474 tmp_cap.cipher.algo = cap->sym.cipher.algo;
1475 for (param = cap->sym.cipher.key_size.min;
1476 param <= cap->sym.cipher.key_size.max;
1477 param += cap->sym.cipher.key_size.increment)
1479 vec_add1 (tmp_cap.cipher.key_sizes, param);
1480 if (cap->sym.cipher.key_size.increment == 0)
1484 case RTE_CRYPTO_SYM_XFORM_AUTH:
1485 tmp_cap.auth.algo = cap->sym.auth.algo;
1486 tmp_cap.auth.digest_sizes = 0;
1487 for (param = cap->sym.auth.digest_size.min;
1488 param <= cap->sym.auth.digest_size.max;
1489 param += cap->sym.auth.digest_size.increment)
1491 vec_add1 (tmp_cap.auth.digest_sizes, param);
1492 if (cap->sym.auth.digest_size.increment == 0)
1496 case RTE_CRYPTO_SYM_XFORM_AEAD:
1497 tmp_cap.aead.key_sizes = 0;
1498 tmp_cap.aead.aad_sizes = 0;
1499 tmp_cap.aead.digest_sizes = 0;
1500 tmp_cap.aead.algo = cap->sym.aead.algo;
1501 for (param = cap->sym.aead.key_size.min;
1502 param <= cap->sym.aead.key_size.max;
1503 param += cap->sym.aead.key_size.increment)
1505 vec_add1 (tmp_cap.aead.key_sizes, param);
1506 if (cap->sym.aead.key_size.increment == 0)
1509 for (param = cap->sym.aead.aad_size.min;
1510 param <= cap->sym.aead.aad_size.max;
1511 param += cap->sym.aead.aad_size.increment)
1513 vec_add1 (tmp_cap.aead.aad_sizes, param);
1514 if (cap->sym.aead.aad_size.increment == 0)
1517 for (param = cap->sym.aead.digest_size.min;
1518 param <= cap->sym.aead.digest_size.max;
1519 param += cap->sym.aead.digest_size.increment)
1521 vec_add1 (tmp_cap.aead.digest_sizes, param);
1522 if (cap->sym.aead.digest_size.increment == 0)
1530 vec_add1 (cmt->supported_caps, tmp_cap);
1534 while (cap_id < vec_len (cmt->supported_caps))
1536 u32 cap_is_supported = 1;
1537 previous_dev_id = cmt->cryptodev_inst->dev_id;
1539 vec_foreach (dev_inst, cmt->cryptodev_inst)
1541 dev_id = dev_inst->dev_id;
1542 if (previous_dev_id != dev_id)
1544 previous_dev_id = dev_id;
1545 rte_cryptodev_info_get (dev_id, &dev_info);
1546 dev_caps = &dev_info.capabilities[0];
1547 cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1548 &cmt->supported_caps[cap_id], dev_caps);
1549 if (!cap_is_supported)
1551 cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1552 /*no need to check other devices as this one doesn't support
1558 if (cap_is_supported)
1564 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1566 cryptodev_main_t *cmt = &cryptodev_main;
1567 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1570 if (n_queues < n_workers)
1573 for (i = 0; i < rte_cryptodev_count (); i++)
1574 cryptodev_configure (vm, i);
1576 cryptodev_get_common_capabilities ();
1577 vec_sort_with_function(cmt->cryptodev_inst, cryptodev_cmp);
1579 /* if there is not enough device stop cryptodev */
1580 if (vec_len (cmt->cryptodev_inst) < n_workers)
1587 cryptodev_get_max_sz (u32 *max_sess_sz, u32 *max_dp_sz)
1589 cryptodev_main_t *cmt = &cryptodev_main;
1590 cryptodev_inst_t *cinst;
1591 u32 max_sess = 0, max_dp = 0;
1593 vec_foreach (cinst, cmt->cryptodev_inst)
1595 u32 sess_sz = rte_cryptodev_sym_get_private_session_size (cinst->dev_id);
1596 u32 dp_sz = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
1598 max_sess = clib_max (sess_sz, max_sess);
1599 max_dp = clib_max (dp_sz, max_dp);
1602 *max_sess_sz = max_sess;
1603 *max_dp_sz = max_dp;
1607 dpdk_disable_cryptodev_engine (vlib_main_t * vm)
1609 cryptodev_main_t *cmt = &cryptodev_main;
1610 cryptodev_numa_data_t *numa_data;
1611 cryptodev_engine_thread_t *ptd;
1613 vec_validate (cmt->per_numa_data, vm->numa_node);
1614 numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
1616 if (numa_data->sess_pool)
1617 rte_mempool_free (numa_data->sess_pool);
1618 if (numa_data->sess_priv_pool)
1619 rte_mempool_free (numa_data->sess_priv_pool);
1621 vec_foreach (ptd, cmt->per_thread_data)
1624 rte_free (ptd->aad_buf);
1625 if (ptd->cached_frame)
1626 rte_ring_free (ptd->cached_frame);
1627 if (ptd->reset_sess.crypto_sess)
1629 struct rte_mempool *mp =
1630 rte_mempool_from_obj ((void *) ptd->reset_sess.crypto_sess);
1632 rte_mempool_free (mp);
1633 ptd->reset_sess.crypto_sess = 0;
1638 static clib_error_t *
1639 create_reset_sess (cryptodev_engine_thread_t *ptd, u32 lcore, u32 numa,
1642 struct rte_crypto_sym_xform xform = { 0 };
1643 struct rte_crypto_aead_xform *aead_xform = &xform.aead;
1644 struct rte_cryptodev_sym_session *sess;
1645 struct rte_mempool *mp = 0;
1646 u8 key[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1648 clib_error_t *error = 0;
1650 /* create session pool for the numa node */
1651 name = format (0, "vcryptodev_s_reset_%u_%u", numa, lcore);
1652 mp = rte_cryptodev_sym_session_pool_create ((char *) name, 2, sess_sz, 0, 0,
1656 error = clib_error_return (0, "Not enough memory for mp %s", name);
1661 xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1662 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
1663 aead_xform->op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
1664 aead_xform->aad_length = 8;
1665 aead_xform->digest_length = 16;
1666 aead_xform->iv.offset = 0;
1667 aead_xform->iv.length = 12;
1668 aead_xform->key.data = key;
1669 aead_xform->key.length = 16;
1671 sess = rte_cryptodev_sym_session_create (mp);
1674 error = clib_error_return (0, "failed to create session");
1678 if (rte_cryptodev_sym_session_init (ptd->cryptodev_id, sess, &xform, mp) < 0)
1680 error = clib_error_return (0, "failed to create session private");
1684 ptd->reset_sess.crypto_sess = sess;
1690 rte_mempool_free (mp);
1698 dpdk_cryptodev_init (vlib_main_t * vm)
1700 cryptodev_main_t *cmt = &cryptodev_main;
1701 vlib_thread_main_t *tm = vlib_get_thread_main ();
1702 cryptodev_engine_thread_t *ptd;
1703 cryptodev_numa_data_t *numa_data;
1704 struct rte_mempool *mp;
1705 u32 skip_master = vlib_num_workers () > 0;
1706 u32 n_workers = tm->n_vlib_mains - skip_master;
1707 u32 numa = vm->numa_node;
1712 clib_error_t *error;
1713 struct rte_cryptodev_sym_capability_idx cap_auth_idx;
1714 struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
1715 struct rte_cryptodev_sym_capability_idx cap_aead_idx;
1717 cmt->iova_mode = rte_eal_iova_mode ();
1719 vec_validate (cmt->per_numa_data, vm->numa_node);
1721 /* probe all cryptodev devices and get queue info */
1722 if (cryptodev_probe (vm, n_workers) < 0)
1724 error = clib_error_return (0, "Failed to configure cryptodev");
1728 cryptodev_get_max_sz (&sess_sz, &dp_sz);
1730 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1731 clib_spinlock_init (&cmt->tlock);
1733 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1734 CLIB_CACHE_LINE_BYTES);
1735 for (i = skip_master; i < tm->n_vlib_mains; i++)
1737 ptd = cmt->per_thread_data + i;
1738 numa = vlib_mains[i]->numa_node;
1740 ptd->aad_buf = rte_zmalloc_socket (0, CRYPTODEV_NB_CRYPTO_OPS *
1741 CRYPTODEV_MAX_AAD_SIZE,
1742 CLIB_CACHE_LINE_BYTES,
1745 if (ptd->aad_buf == 0)
1747 error = clib_error_return (0, "Failed to alloc aad buf");
1751 ptd->aad_phy_addr = rte_malloc_virt2iova (ptd->aad_buf);
1753 ptd->ctx = rte_zmalloc_socket (0, dp_sz, CLIB_CACHE_LINE_BYTES, numa);
1756 error = clib_error_return (0, "Failed to alloc raw dp ctx");
1760 name = format (0, "cache_frame_ring_%u%u", numa, i);
1761 ptd->cached_frame = rte_ring_create ((char *)name,
1762 CRYPTODEV_DEQ_CACHE_SZ, numa,
1763 RING_F_SC_DEQ | RING_F_SP_ENQ);
1765 if (ptd->cached_frame == 0)
1767 error = clib_error_return (0, "Failed to alloc frame ring");
1772 vec_validate (cmt->per_numa_data, numa);
1773 numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1775 if (!numa_data->sess_pool)
1777 /* create session pool for the numa node */
1778 name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1779 mp = rte_cryptodev_sym_session_pool_create (
1780 (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
1784 clib_error_return (0, "Not enough memory for mp %s", name);
1789 numa_data->sess_pool = mp;
1791 /* create session private pool for the numa node */
1792 name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1794 rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz,
1795 0, 0, NULL, NULL, NULL, NULL, numa, 0);
1799 clib_error_return (0, "Not enough memory for mp %s", name);
1806 numa_data->sess_priv_pool = mp;
1809 error = create_reset_sess (ptd, i, numa, sess_sz);
1813 cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
1816 /* register handler */
1817 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1818 "DPDK Cryptodev Engine");
1820 #define _(a, b, c, d, e, f, g) \
1821 cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
1822 cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
1823 if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
1825 vnet_crypto_register_async_handler ( \
1826 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
1827 cryptodev_enqueue_gcm_aad_##f##_enc, cryptodev_frame_dequeue); \
1828 vnet_crypto_register_async_handler ( \
1829 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
1830 cryptodev_enqueue_gcm_aad_##f##_dec, cryptodev_frame_dequeue); \
1833 foreach_vnet_aead_crypto_conversion
1835 /* clang-format off */
1836 #define _(a, b, c, d, e) \
1837 cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
1838 cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC; \
1839 cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
1840 cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
1841 if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
1842 cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
1844 vnet_crypto_register_async_handler ( \
1845 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
1846 cryptodev_enqueue_linked_alg_enc, cryptodev_frame_dequeue); \
1847 vnet_crypto_register_async_handler ( \
1848 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
1849 cryptodev_enqueue_linked_alg_dec, cryptodev_frame_dequeue); \
1852 foreach_cryptodev_link_async_alg
1855 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1856 /* clang-format on */
1858 /* this engine is only enabled when cryptodev device(s) are presented in
1859 * startup.conf. Assume it is wanted to be used, turn on async mode here.
1861 vnet_crypto_request_async_mode (1);
1862 ipsec_set_async_mode (1);
1867 dpdk_disable_cryptodev_engine (vm);
1874 * fd.io coding-style-patch-verification: ON
1877 * eval: (c-set-style "gnu")