2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
36 #define always_inline static inline
38 #define always_inline static inline __attribute__ ((__always_inline__))
41 #define CRYPTODEV_NB_CRYPTO_OPS 1024
42 #define CRYPTODEV_MAX_INFLIGHT (CRYPTODEV_NB_CRYPTO_OPS - 1)
43 #define CRYPTODEV_AAD_MASK (CRYPTODEV_NB_CRYPTO_OPS - 1)
44 #define CRYPTODEV_DEQ_CACHE_SZ 32
45 #define CRYPTODEV_NB_SESSION 10240
46 #define CRYPTODEV_MAX_AAD_SIZE 16
47 #define CRYPTODEV_MAX_N_SGL 8 /**< maximum number of segments */
49 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN */
50 #define foreach_vnet_aead_crypto_conversion \
51 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \
52 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \
53 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \
54 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \
55 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \
56 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12)
59 * crypto (alg, cryptodev_alg), hash (alg, digest-size)
61 #define foreach_cryptodev_link_async_alg \
62 _ (AES_128_CBC, AES_CBC, SHA1, 12) \
63 _ (AES_192_CBC, AES_CBC, SHA1, 12) \
64 _ (AES_256_CBC, AES_CBC, SHA1, 12) \
65 _ (AES_128_CBC, AES_CBC, SHA224, 14) \
66 _ (AES_192_CBC, AES_CBC, SHA224, 14) \
67 _ (AES_256_CBC, AES_CBC, SHA224, 14) \
68 _ (AES_128_CBC, AES_CBC, SHA256, 16) \
69 _ (AES_192_CBC, AES_CBC, SHA256, 16) \
70 _ (AES_256_CBC, AES_CBC, SHA256, 16) \
71 _ (AES_128_CBC, AES_CBC, SHA384, 24) \
72 _ (AES_192_CBC, AES_CBC, SHA384, 24) \
73 _ (AES_256_CBC, AES_CBC, SHA384, 24) \
74 _ (AES_128_CBC, AES_CBC, SHA512, 32) \
75 _ (AES_192_CBC, AES_CBC, SHA512, 32) \
76 _ (AES_256_CBC, AES_CBC, SHA512, 32)
80 CRYPTODEV_OP_TYPE_ENCRYPT = 0,
81 CRYPTODEV_OP_TYPE_DECRYPT,
83 } cryptodev_op_type_t;
87 union rte_cryptodev_session_ctx **keys;
99 struct rte_mempool *sess_pool;
100 struct rte_mempool *sess_priv_pool;
101 } cryptodev_numa_data_t;
105 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
106 vlib_buffer_t *b[VNET_CRYPTO_FRAME_SIZE];
107 struct rte_crypto_raw_dp_ctx *ctx;
108 struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
109 struct rte_ring *cached_frame;
116 union rte_cryptodev_session_ctx reset_sess; /* session data for reset ctx */
117 } cryptodev_engine_thread_t;
121 cryptodev_numa_data_t *per_numa_data;
122 cryptodev_key_t *keys;
123 cryptodev_engine_thread_t *per_thread_data;
124 enum rte_iova_mode iova_mode;
125 cryptodev_inst_t *cryptodev_inst;
126 clib_bitmap_t *active_cdev_inst_mask;
127 clib_spinlock_t tlock;
130 cryptodev_main_t cryptodev_main;
132 static_always_inline int
133 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
134 cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
137 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
138 memset (xform, 0, sizeof (*xform));
139 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
142 if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
143 key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
144 key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
147 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
148 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
149 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
150 aead_xform->aad_length = aad_len;
151 aead_xform->digest_length = 16;
152 aead_xform->iv.offset = 0;
153 aead_xform->iv.length = 12;
154 aead_xform->key.data = key->data;
155 aead_xform->key.length = vec_len (key->data);
160 static_always_inline int
161 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
162 cryptodev_op_type_t op_type,
163 const vnet_crypto_key_t *key)
165 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
166 vnet_crypto_key_t *key_cipher, *key_auth;
167 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
168 enum rte_crypto_auth_algorithm auth_algo = ~0;
171 key_cipher = vnet_crypto_get_key (key->index_crypto);
172 key_auth = vnet_crypto_get_key (key->index_integ);
173 if (!key_cipher || !key_auth)
176 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
178 xform_cipher = xforms;
179 xform_auth = xforms + 1;
180 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
181 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
185 xform_cipher = xforms + 1;
187 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
188 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
191 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
192 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
193 xforms->next = xforms + 1;
195 switch (key->async_alg)
197 #define _(a, b, c, d) \
198 case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\
199 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
200 auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \
204 foreach_cryptodev_link_async_alg
210 xform_cipher->cipher.algo = cipher_algo;
211 xform_cipher->cipher.key.data = key_cipher->data;
212 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
213 xform_cipher->cipher.iv.length = 16;
214 xform_cipher->cipher.iv.offset = 0;
216 xform_auth->auth.algo = auth_algo;
217 xform_auth->auth.digest_length = digest_len;
218 xform_auth->auth.key.data = key_auth->data;
219 xform_auth->auth.key.length = vec_len (key_auth->data);
224 static_always_inline void
225 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
232 n_devs = rte_cryptodev_count ();
234 for (i = 0; i < n_devs; i++)
235 rte_cryptodev_sym_session_clear (i, sess);
237 rte_cryptodev_sym_session_free (sess);
240 static_always_inline int
241 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
244 cryptodev_main_t *cmt = &cryptodev_main;
245 cryptodev_numa_data_t *numa_data;
246 cryptodev_inst_t *dev_inst;
247 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
248 struct rte_mempool *sess_pool, *sess_priv_pool;
249 cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
250 struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
251 struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
252 struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
253 u32 numa_node = vm->numa_node;
256 numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
257 sess_pool = numa_data->sess_pool;
258 sess_priv_pool = numa_data->sess_priv_pool;
260 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
261 rte_cryptodev_sym_session_create (sess_pool);
262 if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
268 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
269 rte_cryptodev_sym_session_create (sess_pool);
270 if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
276 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
277 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
279 ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key,
284 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
285 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
287 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
289 vec_foreach (dev_inst, cmt->cryptodev_inst)
291 u32 dev_id = dev_inst->dev_id;
292 struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
294 /* if the session is already configured for the driver type, avoid
295 configuring it again to increase the session data's refcnt */
296 if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[cdev->driver_id].data &&
297 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[cdev->driver_id].data)
300 ret = rte_cryptodev_sym_session_init (
301 dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc, sess_priv_pool);
302 ret = rte_cryptodev_sym_session_init (
303 dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec, sess_priv_pool);
308 sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
309 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
311 CLIB_MEMORY_STORE_BARRIER ();
312 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess =
313 sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
314 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess =
315 sessions[CRYPTODEV_OP_TYPE_DECRYPT];
320 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
321 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
327 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key)
329 vnet_crypto_alg_t alg;
330 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
335 #define _(a, b, c, d, e, f) \
336 if (alg == VNET_CRYPTO_ALG_##a) \
339 foreach_vnet_aead_crypto_conversion
344 static_always_inline void
345 cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
346 vnet_crypto_key_index_t idx, u32 aad_len)
348 cryptodev_main_t *cmt = &cryptodev_main;
349 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
350 cryptodev_key_t *ckey = 0;
353 vec_validate (cmt->keys, idx);
354 ckey = vec_elt_at_index (cmt->keys, idx);
356 if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
358 if (idx >= vec_len (cmt->keys))
361 vec_foreach_index (i, cmt->per_numa_data)
363 if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess)
365 cryptodev_session_del (
366 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess);
367 cryptodev_session_del (
368 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess);
370 CLIB_MEMORY_STORE_BARRIER ();
371 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess = 0;
372 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess = 0;
380 /* do not create session for unsupported alg */
381 if (cryptodev_check_supported_vnet_alg (key))
384 vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
385 vec_foreach_index (i, ckey->keys)
386 vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
390 cryptodev_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
391 vnet_crypto_key_index_t idx)
393 cryptodev_sess_handler (vm, kop, idx, 8);
396 static_always_inline void
397 cryptodev_mark_frame_err_status (vnet_crypto_async_frame_t * f,
398 vnet_crypto_op_status_t s)
400 u32 n_elts = f->n_elts, i;
402 for (i = 0; i < n_elts; i++)
403 f->elts[i].status = s;
404 f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
407 static_always_inline int
408 cryptodev_frame_build_sgl (vlib_main_t * vm, enum rte_iova_mode iova_mode,
409 struct rte_crypto_vec *data_vec,
410 u16 * n_seg, vlib_buffer_t * b, u32 size)
412 struct rte_crypto_vec *vec = data_vec + 1;
413 if (vlib_buffer_chain_linearize (vm, b) > CRYPTODEV_MAX_N_SGL)
416 while ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && size)
419 b = vlib_get_buffer (vm, b->next_buffer);
420 len = clib_min (b->current_length, size);
421 vec->base = (void *) vlib_buffer_get_current (b);
422 if (iova_mode == RTE_IOVA_VA)
423 vec->iova = pointer_to_uword (vec->base);
425 vec->iova = vlib_buffer_get_current_pa (vm, b);
438 static_always_inline u64
439 compute_ofs_linked_alg (vnet_crypto_async_frame_elt_t * fe, i16 * min_ofs,
442 union rte_crypto_sym_ofs ofs;
443 u32 crypto_end = fe->crypto_start_offset + fe->crypto_total_length;
444 u32 integ_end = fe->integ_start_offset + fe->crypto_total_length +
445 fe->integ_length_adj;
447 *min_ofs = clib_min (fe->crypto_start_offset, fe->integ_start_offset);
448 *max_end = clib_max (crypto_end, integ_end);
450 ofs.ofs.cipher.head = fe->crypto_start_offset - *min_ofs;
451 ofs.ofs.cipher.tail = *max_end - crypto_end;
452 ofs.ofs.auth.head = fe->integ_start_offset - *min_ofs;
453 ofs.ofs.auth.tail = *max_end - integ_end;
458 static_always_inline void
459 cryptodev_reset_ctx (cryptodev_engine_thread_t *cet)
461 rte_cryptodev_configure_raw_dp_ctx (cet->cryptodev_id, cet->cryptodev_q,
462 cet->ctx, RTE_CRYPTO_OP_WITH_SESSION,
466 static_always_inline int
467 cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
468 vnet_crypto_async_frame_t * frame,
469 cryptodev_op_type_t op_type)
471 cryptodev_main_t *cmt = &cryptodev_main;
472 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
473 vnet_crypto_async_frame_elt_t *fe;
474 struct rte_crypto_vec *vec;
475 struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
478 u32 last_key_index = ~0;
483 n_elts = frame->n_elts;
485 if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
487 cryptodev_mark_frame_err_status (frame,
488 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
492 vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
500 union rte_crypto_sym_ofs cofs;
505 CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
506 CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
507 vlib_prefetch_buffer_header (b[1], LOAD);
508 vlib_prefetch_buffer_header (b[2], LOAD);
511 if (PREDICT_FALSE (last_key_index != fe->key_index))
513 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
515 if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess ==
518 status = cryptodev_session_create (vm, fe->key_index, 0);
519 if (PREDICT_FALSE (status < 0))
523 status = rte_cryptodev_configure_raw_dp_ctx (
524 cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
525 RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type],
527 if (PREDICT_FALSE (status < 0))
530 last_key_index = fe->key_index;
533 cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
535 vec->len = max_end - min_ofs;
536 if (cmt->iova_mode == RTE_IOVA_VA)
538 vec[0].base = (void *) (b[0]->data + min_ofs);
539 vec[0].iova = pointer_to_uword (b[0]->data) + min_ofs;
540 iv_vec.va = (void *) fe->iv;
541 iv_vec.iova = pointer_to_uword (fe->iv);
542 digest_vec.va = (void *) fe->tag;
543 digest_vec.iova = pointer_to_uword (fe->tag);
547 vec[0].base = (void *) (b[0]->data + min_ofs);
548 vec[0].iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
549 iv_vec.va = (void *) fe->iv;
550 iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
551 digest_vec.va = (void *) fe->tag;
552 digest_vec.iova = vlib_physmem_get_pa (vm, fe->digest);
555 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
557 vec[0].len = b[0]->current_data + b[0]->current_length - min_ofs;
558 if (cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
559 max_end - min_ofs - vec->len) < 0)
563 status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
564 &digest_vec, 0, (void *) frame);
565 if (PREDICT_FALSE (status < 0))
573 status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
574 if (PREDICT_FALSE (status < 0))
576 cryptodev_reset_ctx (cet);
580 cet->inflight += frame->n_elts;
584 cryptodev_mark_frame_err_status (frame,
585 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
586 cryptodev_reset_ctx (cet);
590 static_always_inline int
591 cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
592 vnet_crypto_async_frame_t * frame,
593 cryptodev_op_type_t op_type, u8 aad_len)
595 cryptodev_main_t *cmt = &cryptodev_main;
596 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
597 vnet_crypto_async_frame_elt_t *fe;
600 union rte_crypto_sym_ofs cofs;
601 struct rte_crypto_vec *vec;
602 struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
603 u32 last_key_index = ~0;
606 n_elts = frame->n_elts;
608 if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
610 cryptodev_mark_frame_err_status (frame,
611 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
615 vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
624 u32 aad_offset = ((cet->aad_index++) & CRYPTODEV_AAD_MASK) << 4;
629 CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
630 vlib_prefetch_buffer_header (b[1], LOAD);
633 if (PREDICT_FALSE (last_key_index != fe->key_index))
635 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
637 if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess ==
640 status = cryptodev_session_create (vm, fe->key_index, aad_len);
641 if (PREDICT_FALSE (status < 0))
645 if (PREDICT_FALSE ((u8) key->keys[vm->numa_node][op_type]
646 .crypto_sess->opaque_data != aad_len))
648 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
649 fe->key_index, aad_len);
650 status = cryptodev_session_create (vm, fe->key_index, aad_len);
651 if (PREDICT_FALSE (status < 0))
655 status = rte_cryptodev_configure_raw_dp_ctx (
656 cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
657 RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type],
659 if (PREDICT_FALSE (status < 0))
662 last_key_index = fe->key_index;
665 if (cmt->iova_mode == RTE_IOVA_VA)
667 vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
668 vec[0].iova = pointer_to_uword (vec[0].base);
669 vec[0].len = fe->crypto_total_length;
670 iv_vec.va = (void *) fe->iv;
671 iv_vec.iova = pointer_to_uword (fe->iv);
672 digest_vec.va = (void *) fe->tag;
673 digest_vec.iova = pointer_to_uword (fe->tag);
674 aad_vec.va = (void *) (cet->aad_buf + aad_offset);
675 aad_vec.iova = cet->aad_phy_addr + aad_offset;
679 vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
681 vlib_buffer_get_pa (vm, b[0]) + fe->crypto_start_offset;
682 vec[0].len = fe->crypto_total_length;
683 iv_vec.va = (void *) fe->iv;
684 iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
685 aad_vec.va = (void *) (cet->aad_buf + aad_offset);
686 aad_vec.iova = cet->aad_phy_addr + aad_offset;
687 digest_vec.va = (void *) fe->tag;
688 digest_vec.iova = vlib_physmem_get_pa (vm, fe->tag);
692 *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
696 *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
697 *(u32 *) (cet->aad_buf + aad_offset + 8) = *(u32 *) (fe->aad + 8);
700 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
702 vec[0].len = b[0]->current_data + b[0]->current_length -
703 fe->crypto_start_offset;
705 cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
706 fe->crypto_total_length - vec[0].len);
712 rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
713 &digest_vec, &aad_vec, (void *) frame);
714 if (PREDICT_FALSE (status < 0))
722 status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
723 if (PREDICT_FALSE (status < 0))
726 cet->inflight += frame->n_elts;
731 cryptodev_mark_frame_err_status (frame,
732 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
733 cryptodev_reset_ctx (cet);
738 cryptodev_get_frame_n_elts (void *frame)
740 vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
745 cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success)
747 vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
749 f->elts[index].status = is_op_success ? VNET_CRYPTO_OP_STATUS_COMPLETED :
750 VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
753 #define GET_RING_OBJ(r, pos, f) do { \
754 vnet_crypto_async_frame_t **ring = (void *)&r[1]; \
755 f = ring[(r->cons.head + pos) & r->mask]; \
758 static_always_inline vnet_crypto_async_frame_t *
759 cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
760 u32 * enqueue_thread_idx)
762 cryptodev_main_t *cmt = &cryptodev_main;
763 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
764 vnet_crypto_async_frame_t *frame, *frame_ret = 0;
765 u32 n_deq, n_success;
766 u32 n_cached_frame = rte_ring_count (cet->cached_frame), n_room_left;
767 u8 no_job_to_deq = 0;
768 u16 inflight = cet->inflight;
771 n_room_left = CRYPTODEV_DEQ_CACHE_SZ - n_cached_frame - 1;
776 for (i = 0; i < n_cached_frame; i++)
778 vnet_crypto_async_frame_t *f;
780 enum rte_crypto_op_status op_status;
783 GET_RING_OBJ (cet->cached_frame, i, f);
785 if (i < n_cached_frame - 2)
787 vnet_crypto_async_frame_t *f1, *f2;
788 GET_RING_OBJ (cet->cached_frame, i + 1, f1);
789 GET_RING_OBJ (cet->cached_frame, i + 2, f2);
790 CLIB_PREFETCH (f1, CLIB_CACHE_LINE_BYTES, LOAD);
791 CLIB_PREFETCH (f2, CLIB_CACHE_LINE_BYTES, LOAD);
794 n_left = f->state & 0x7f;
795 err = f->state & 0x80;
797 for (j = f->n_elts - n_left; j < f->n_elts && inflight; j++)
800 f_ret = rte_cryptodev_raw_dequeue (cet->ctx, &ret, &op_status);
807 case RTE_CRYPTO_OP_STATUS_SUCCESS:
808 f->elts[j].status = VNET_CRYPTO_OP_STATUS_COMPLETED;
811 f->elts[j].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
823 f->state = err ? VNET_CRYPTO_FRAME_STATE_ELT_ERROR :
824 VNET_CRYPTO_FRAME_STATE_SUCCESS;
828 f->state = f->n_elts - j;
835 /* to here f is not completed dequeued and no more job can be
838 f->state = f->n_elts - j;
846 rte_ring_sc_dequeue (cet->cached_frame, (void **) &frame_ret);
851 /* no point to dequeue further */
852 if (!inflight || no_job_to_deq || !n_room_left)
855 n_deq = rte_cryptodev_raw_dequeue_burst (cet->ctx,
856 cryptodev_get_frame_n_elts,
857 cryptodev_post_dequeue,
858 (void **) &frame, 0, &n_success,
864 no_job_to_deq = n_deq < frame->n_elts;
865 /* we have to cache the frame */
866 if (frame_ret || n_cached_frame || no_job_to_deq)
868 frame->state = frame->n_elts - n_deq;
869 frame->state |= ((n_success < n_deq) << 7);
870 rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
875 frame->state = n_success == frame->n_elts ?
876 VNET_CRYPTO_FRAME_STATE_SUCCESS : VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
880 /* see if we can dequeue more */
881 while (inflight && n_room_left && !no_job_to_deq)
883 n_deq = rte_cryptodev_raw_dequeue_burst (cet->ctx,
884 cryptodev_get_frame_n_elts,
885 cryptodev_post_dequeue,
887 &n_success, &dequeue_status);
891 no_job_to_deq = n_deq < frame->n_elts;
892 frame->state = frame->n_elts - n_deq;
893 frame->state |= ((n_success < n_deq) << 7);
894 rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
899 if (inflight < cet->inflight)
902 rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
904 cet->inflight = inflight;
909 *nb_elts_processed = frame_ret->n_elts;
910 *enqueue_thread_idx = frame_ret->enqueue_thread_index;
917 static_always_inline int
918 cryptodev_enqueue_gcm_aad_8_enc (vlib_main_t * vm,
919 vnet_crypto_async_frame_t * frame)
921 return cryptodev_frame_gcm_enqueue (vm, frame,
922 CRYPTODEV_OP_TYPE_ENCRYPT, 8);
924 static_always_inline int
925 cryptodev_enqueue_gcm_aad_12_enc (vlib_main_t * vm,
926 vnet_crypto_async_frame_t * frame)
928 return cryptodev_frame_gcm_enqueue (vm, frame,
929 CRYPTODEV_OP_TYPE_ENCRYPT, 12);
932 static_always_inline int
933 cryptodev_enqueue_gcm_aad_8_dec (vlib_main_t * vm,
934 vnet_crypto_async_frame_t * frame)
936 return cryptodev_frame_gcm_enqueue (vm, frame,
937 CRYPTODEV_OP_TYPE_DECRYPT, 8);
939 static_always_inline int
940 cryptodev_enqueue_gcm_aad_12_dec (vlib_main_t * vm,
941 vnet_crypto_async_frame_t * frame)
943 return cryptodev_frame_gcm_enqueue (vm, frame,
944 CRYPTODEV_OP_TYPE_DECRYPT, 12);
947 static_always_inline int
948 cryptodev_enqueue_linked_alg_enc (vlib_main_t * vm,
949 vnet_crypto_async_frame_t * frame)
951 return cryptodev_frame_linked_algs_enqueue (vm, frame,
952 CRYPTODEV_OP_TYPE_ENCRYPT);
955 static_always_inline int
956 cryptodev_enqueue_linked_alg_dec (vlib_main_t * vm,
957 vnet_crypto_async_frame_t * frame)
959 return cryptodev_frame_linked_algs_enqueue (vm, frame,
960 CRYPTODEV_OP_TYPE_DECRYPT);
965 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
966 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
967 } cryptodev_resource_assign_op_t;
970 * assign a cryptodev resource to a worker.
971 * @param cet: the worker thread data
972 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
973 * @param op: the assignment method.
974 * @return: 0 if successfully, negative number otherwise.
976 static_always_inline int
977 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
978 u32 cryptodev_inst_index,
979 cryptodev_resource_assign_op_t op)
981 cryptodev_main_t *cmt = &cryptodev_main;
982 cryptodev_inst_t *cinst = 0;
985 /* assign resource is only allowed when no inflight op is in the queue */
991 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
992 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
993 vec_len (cmt->cryptodev_inst))
996 clib_spinlock_lock (&cmt->tlock);
997 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
998 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
999 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
1000 cet->cryptodev_id = cinst->dev_id;
1001 cet->cryptodev_q = cinst->q_id;
1002 cryptodev_reset_ctx (cet);
1003 clib_spinlock_unlock (&cmt->tlock);
1005 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
1006 /* assigning a used cryptodev resource is not allowed */
1007 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
1010 vec_foreach_index (idx, cmt->cryptodev_inst)
1012 cinst = cmt->cryptodev_inst + idx;
1013 if (cinst->dev_id == cet->cryptodev_id &&
1014 cinst->q_id == cet->cryptodev_q)
1017 /* invalid existing worker resource assignment */
1018 if (idx == vec_len (cmt->cryptodev_inst))
1020 clib_spinlock_lock (&cmt->tlock);
1021 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
1022 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
1023 cryptodev_inst_index, 1);
1024 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
1025 cet->cryptodev_id = cinst->dev_id;
1026 cet->cryptodev_q = cinst->q_id;
1027 cryptodev_reset_ctx (cet);
1028 clib_spinlock_unlock (&cmt->tlock);
1037 format_cryptodev_inst (u8 * s, va_list * args)
1039 cryptodev_main_t *cmt = &cryptodev_main;
1040 u32 inst = va_arg (*args, u32);
1041 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
1042 u32 thread_index = 0;
1043 struct rte_cryptodev_info info;
1045 rte_cryptodev_info_get (cit->dev_id, &info);
1046 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
1048 vec_foreach_index (thread_index, cmt->per_thread_data)
1050 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
1051 if (vlib_num_workers () > 0 && thread_index == 0)
1054 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
1056 s = format (s, "%u (%v)\n", thread_index,
1057 vlib_worker_threads[thread_index].name);
1062 if (thread_index == vec_len (cmt->per_thread_data))
1063 s = format (s, "%s\n", "free");
1068 static clib_error_t *
1069 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
1070 vlib_cli_command_t * cmd)
1072 cryptodev_main_t *cmt = &cryptodev_main;
1075 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
1077 if (vec_len (cmt->cryptodev_inst) == 0)
1079 vlib_cli_output (vm, "(nil)\n");
1083 vec_foreach_index (inst, cmt->cryptodev_inst)
1084 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
1089 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
1090 .path = "show cryptodev assignment",
1091 .short_help = "show cryptodev assignment",
1092 .function = cryptodev_show_assignment_fn,
1095 static clib_error_t *
1096 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
1097 vlib_cli_command_t * cmd)
1099 cryptodev_main_t *cmt = &cryptodev_main;
1100 cryptodev_engine_thread_t *cet;
1101 unformat_input_t _line_input, *line_input = &_line_input;
1102 u32 thread_index, inst_index;
1103 u32 thread_present = 0, inst_present = 0;
1104 clib_error_t *error = 0;
1107 /* Get a line of input. */
1108 if (!unformat_user (input, unformat_line_input, line_input))
1111 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1113 if (unformat (line_input, "thread %u", &thread_index))
1115 else if (unformat (line_input, "resource %u", &inst_index))
1119 error = clib_error_return (0, "unknown input `%U'",
1120 format_unformat_error, line_input);
1125 if (!thread_present || !inst_present)
1127 error = clib_error_return (0, "mandatory argument(s) missing");
1131 if (thread_index == 0 && vlib_num_workers () > 0)
1134 clib_error_return (0, "assign crypto resource for master thread");
1138 if (thread_index > vec_len (cmt->per_thread_data) ||
1139 inst_index > vec_len (cmt->cryptodev_inst))
1141 error = clib_error_return (0, "wrong thread id or resource id");
1145 cet = cmt->per_thread_data + thread_index;
1146 ret = cryptodev_assign_resource (cet, inst_index,
1147 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
1150 error = clib_error_return (0, "cryptodev_assign_resource returned %i",
1158 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
1159 .path = "set cryptodev assignment",
1160 .short_help = "set cryptodev assignment thread <thread_index> "
1161 "resource <inst_index>",
1162 .function = cryptodev_set_assignment_fn,
1166 check_cryptodev_alg_support (u32 dev_id)
1168 const struct rte_cryptodev_symmetric_capability *cap;
1169 struct rte_cryptodev_sym_capability_idx cap_idx;
1171 #define _(a, b, c, d, e, f) \
1172 cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \
1173 cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
1174 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1176 return -RTE_CRYPTO_##b##_##c; \
1179 if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \
1180 return -RTE_CRYPTO_##b##_##c; \
1181 if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \
1182 return -RTE_CRYPTO_##b##_##c; \
1183 if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \
1184 return -RTE_CRYPTO_##b##_##c; \
1187 foreach_vnet_aead_crypto_conversion
1190 #define _(a, b, c, d) \
1191 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
1192 cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
1193 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1195 return -RTE_CRYPTO_CIPHER_##b; \
1196 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
1197 cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \
1198 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1200 return -RTE_CRYPTO_AUTH_##c;
1202 foreach_cryptodev_link_async_alg
1208 cryptodev_count_queue (u32 numa)
1210 struct rte_cryptodev_info info;
1211 u32 n_cryptodev = rte_cryptodev_count ();
1214 for (i = 0; i < n_cryptodev; i++)
1216 rte_cryptodev_info_get (i, &info);
1217 q_count += info.max_nb_queue_pairs;
1224 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
1226 struct rte_cryptodev_config cfg;
1227 struct rte_cryptodev_info info;
1228 cryptodev_main_t *cmt = &cryptodev_main;
1232 rte_cryptodev_info_get (cryptodev_id, &info);
1234 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))
1237 ret = check_cryptodev_alg_support (cryptodev_id);
1241 "Cryptodev: device %u does not support required algorithms",
1246 cfg.socket_id = info.device->numa_node;
1247 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1249 rte_cryptodev_configure (cryptodev_id, &cfg);
1251 for (i = 0; i < info.max_nb_queue_pairs; i++)
1253 struct rte_cryptodev_qp_conf qp_cfg;
1255 qp_cfg.mp_session = 0;
1256 qp_cfg.mp_session_private = 0;
1257 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
1259 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1260 info.device->numa_node);
1263 clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
1264 cryptodev_id, i, ret);
1269 if (i != info.max_nb_queue_pairs)
1272 /* start the device */
1273 rte_cryptodev_start (cryptodev_id);
1275 for (i = 0; i < info.max_nb_queue_pairs; i++)
1277 cryptodev_inst_t *cdev_inst;
1278 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
1279 cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
1280 cdev_inst->dev_id = cryptodev_id;
1281 cdev_inst->q_id = i;
1283 snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
1284 "%s_q%u", info.device->name, i);
1291 cryptodev_cmp (void *v1, void *v2)
1293 cryptodev_inst_t *a1 = v1;
1294 cryptodev_inst_t *a2 = v2;
1296 if (a1->q_id > a2->q_id)
1298 if (a1->q_id < a2->q_id)
1304 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1306 cryptodev_main_t *cmt = &cryptodev_main;
1307 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1311 if (n_queues < n_workers)
1314 for (i = 0; i < rte_cryptodev_count (); i++)
1316 ret = cryptodev_configure (vm, i);
1321 vec_sort_with_function(cmt->cryptodev_inst, cryptodev_cmp);
1323 /* if there is not enough device stop cryptodev */
1324 if (vec_len (cmt->cryptodev_inst) < n_workers)
1331 cryptodev_get_max_sz (u32 *max_sess_sz, u32 *max_dp_sz)
1333 cryptodev_main_t *cmt = &cryptodev_main;
1334 cryptodev_inst_t *cinst;
1335 u32 max_sess = 0, max_dp = 0;
1337 vec_foreach (cinst, cmt->cryptodev_inst)
1339 u32 sess_sz = rte_cryptodev_sym_get_private_session_size (cinst->dev_id);
1340 u32 dp_sz = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
1342 max_sess = clib_max (sess_sz, max_sess);
1343 max_dp = clib_max (dp_sz, max_dp);
1346 *max_sess_sz = max_sess;
1347 *max_dp_sz = max_dp;
1351 dpdk_disable_cryptodev_engine (vlib_main_t * vm)
1353 cryptodev_main_t *cmt = &cryptodev_main;
1354 cryptodev_numa_data_t *numa_data;
1355 cryptodev_engine_thread_t *ptd;
1357 vec_validate (cmt->per_numa_data, vm->numa_node);
1358 numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
1360 if (numa_data->sess_pool)
1361 rte_mempool_free (numa_data->sess_pool);
1362 if (numa_data->sess_priv_pool)
1363 rte_mempool_free (numa_data->sess_priv_pool);
1365 vec_foreach (ptd, cmt->per_thread_data)
1368 rte_free (ptd->aad_buf);
1369 if (ptd->cached_frame)
1370 rte_ring_free (ptd->cached_frame);
1371 if (ptd->reset_sess.crypto_sess)
1373 struct rte_mempool *mp =
1374 rte_mempool_from_obj ((void *) ptd->reset_sess.crypto_sess);
1376 rte_mempool_free (mp);
1377 ptd->reset_sess.crypto_sess = 0;
1382 static clib_error_t *
1383 create_reset_sess (cryptodev_engine_thread_t *ptd, u32 lcore, u32 numa,
1386 struct rte_crypto_sym_xform xform = { 0 };
1387 struct rte_crypto_aead_xform *aead_xform = &xform.aead;
1388 struct rte_cryptodev_sym_session *sess;
1389 struct rte_mempool *mp = 0;
1390 u8 key[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1392 clib_error_t *error = 0;
1394 /* create session pool for the numa node */
1395 name = format (0, "vcryptodev_s_reset_%u_%u", numa, lcore);
1396 mp = rte_cryptodev_sym_session_pool_create ((char *) name, 2, sess_sz, 0, 0,
1400 error = clib_error_return (0, "Not enough memory for mp %s", name);
1405 xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1406 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
1407 aead_xform->op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
1408 aead_xform->aad_length = 8;
1409 aead_xform->digest_length = 16;
1410 aead_xform->iv.offset = 0;
1411 aead_xform->iv.length = 12;
1412 aead_xform->key.data = key;
1413 aead_xform->key.length = 16;
1415 sess = rte_cryptodev_sym_session_create (mp);
1418 error = clib_error_return (0, "failed to create session");
1422 if (rte_cryptodev_sym_session_init (ptd->cryptodev_id, sess, &xform, mp) < 0)
1424 error = clib_error_return (0, "failed to create session private");
1428 ptd->reset_sess.crypto_sess = sess;
1434 rte_mempool_free (mp);
1442 dpdk_cryptodev_init (vlib_main_t * vm)
1444 cryptodev_main_t *cmt = &cryptodev_main;
1445 vlib_thread_main_t *tm = vlib_get_thread_main ();
1446 cryptodev_engine_thread_t *ptd;
1447 cryptodev_numa_data_t *numa_data;
1448 struct rte_mempool *mp;
1449 u32 skip_master = vlib_num_workers () > 0;
1450 u32 n_workers = tm->n_vlib_mains - skip_master;
1451 u32 numa = vm->numa_node;
1456 clib_error_t *error;
1458 cmt->iova_mode = rte_eal_iova_mode ();
1460 vec_validate (cmt->per_numa_data, vm->numa_node);
1462 /* probe all cryptodev devices and get queue info */
1463 if (cryptodev_probe (vm, n_workers) < 0)
1465 error = clib_error_return (0, "Failed to configure cryptodev");
1469 cryptodev_get_max_sz (&sess_sz, &dp_sz);
1471 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1472 clib_spinlock_init (&cmt->tlock);
1474 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1475 CLIB_CACHE_LINE_BYTES);
1476 for (i = skip_master; i < tm->n_vlib_mains; i++)
1478 ptd = cmt->per_thread_data + i;
1479 numa = vlib_mains[i]->numa_node;
1481 ptd->aad_buf = rte_zmalloc_socket (0, CRYPTODEV_NB_CRYPTO_OPS *
1482 CRYPTODEV_MAX_AAD_SIZE,
1483 CLIB_CACHE_LINE_BYTES,
1486 if (ptd->aad_buf == 0)
1488 error = clib_error_return (0, "Failed to alloc aad buf");
1492 ptd->aad_phy_addr = rte_malloc_virt2iova (ptd->aad_buf);
1494 ptd->ctx = rte_zmalloc_socket (0, dp_sz, CLIB_CACHE_LINE_BYTES, numa);
1497 error = clib_error_return (0, "Failed to alloc raw dp ctx");
1501 name = format (0, "cache_frame_ring_%u%u", numa, i);
1502 ptd->cached_frame = rte_ring_create ((char *)name,
1503 CRYPTODEV_DEQ_CACHE_SZ, numa,
1504 RING_F_SC_DEQ | RING_F_SP_ENQ);
1506 if (ptd->cached_frame == 0)
1508 error = clib_error_return (0, "Failed to alloc frame ring");
1513 vec_validate (cmt->per_numa_data, numa);
1514 numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1516 if (!numa_data->sess_pool)
1518 /* create session pool for the numa node */
1519 name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1520 mp = rte_cryptodev_sym_session_pool_create (
1521 (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
1525 clib_error_return (0, "Not enough memory for mp %s", name);
1530 numa_data->sess_pool = mp;
1532 /* create session private pool for the numa node */
1533 name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1535 rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz,
1536 0, 0, NULL, NULL, NULL, NULL, numa, 0);
1540 clib_error_return (0, "Not enough memory for mp %s", name);
1547 numa_data->sess_priv_pool = mp;
1550 error = create_reset_sess (ptd, i, numa, sess_sz);
1554 cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
1557 /* register handler */
1558 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1559 "DPDK Cryptodev Engine");
1561 #define _(a, b, c, d, e, f) \
1562 vnet_crypto_register_async_handler \
1563 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
1564 cryptodev_enqueue_gcm_aad_##f##_enc,\
1565 cryptodev_frame_dequeue); \
1566 vnet_crypto_register_async_handler \
1567 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
1568 cryptodev_enqueue_gcm_aad_##f##_dec, \
1569 cryptodev_frame_dequeue);
1571 foreach_vnet_aead_crypto_conversion
1574 #define _(a, b, c, d) \
1575 vnet_crypto_register_async_handler \
1576 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \
1577 cryptodev_enqueue_linked_alg_enc, \
1578 cryptodev_frame_dequeue); \
1579 vnet_crypto_register_async_handler \
1580 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \
1581 cryptodev_enqueue_linked_alg_dec, \
1582 cryptodev_frame_dequeue);
1584 foreach_cryptodev_link_async_alg
1587 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1589 /* this engine is only enabled when cryptodev device(s) are presented in
1590 * startup.conf. Assume it is wanted to be used, turn on async mode here.
1592 vnet_crypto_request_async_mode (1);
1593 ipsec_set_async_mode (1);
1598 dpdk_disable_cryptodev_engine (vm);
1605 * fd.io coding-style-patch-verification: ON
1608 * eval: (c-set-style "gnu")