2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
34 #include "cryptodev.h"
37 #define always_inline static inline
39 #define always_inline static inline __attribute__ ((__always_inline__))
42 cryptodev_main_t cryptodev_main;
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46 cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
49 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50 memset (xform, 0, sizeof (*xform));
51 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
54 if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55 key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56 key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
58 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
60 else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
62 aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
67 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69 aead_xform->aad_length = aad_len;
70 aead_xform->digest_length = 16;
71 aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72 aead_xform->iv.length = 12;
73 aead_xform->key.data = key->data;
74 aead_xform->key.length = vec_len (key->data);
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81 cryptodev_op_type_t op_type,
82 const vnet_crypto_key_t *key)
84 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85 vnet_crypto_key_t *key_cipher, *key_auth;
86 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87 enum rte_crypto_auth_algorithm auth_algo = ~0;
90 key_cipher = vnet_crypto_get_key (key->index_crypto);
91 key_auth = vnet_crypto_get_key (key->index_integ);
92 if (!key_cipher || !key_auth)
95 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
97 xform_cipher = xforms;
98 xform_auth = xforms + 1;
99 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
104 xform_cipher = xforms + 1;
106 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
110 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112 xforms->next = xforms + 1;
114 switch (key->async_alg)
116 #define _(a, b, c, d, e) \
117 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
118 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
119 auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC; \
123 foreach_cryptodev_link_async_alg
129 xform_cipher->cipher.algo = cipher_algo;
130 xform_cipher->cipher.key.data = key_cipher->data;
131 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132 xform_cipher->cipher.iv.length = 16;
133 xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
135 xform_auth->auth.algo = auth_algo;
136 xform_auth->auth.digest_length = digest_len;
137 xform_auth->auth.key.data = key_auth->data;
138 xform_auth->auth.key.length = vec_len (key_auth->data);
143 static_always_inline void
144 cryptodev_session_del (cryptodev_session_t *sess)
151 n_devs = rte_cryptodev_count ();
153 for (i = 0; i < n_devs; i++)
154 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
155 if (rte_cryptodev_sym_session_free (i, sess) == 0)
158 rte_cryptodev_sym_session_clear (i, sess);
160 rte_cryptodev_sym_session_free (sess);
165 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
167 cryptodev_main_t *cmt = &cryptodev_main;
168 cryptodev_capability_t *vcap;
171 vec_foreach (vcap, cmt->supported_caps)
173 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
175 if (vcap->cipher.algo != algo)
177 vec_foreach (s, vcap->cipher.key_sizes)
186 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
188 cryptodev_main_t *cmt = &cryptodev_main;
189 cryptodev_capability_t *vcap;
192 vec_foreach (vcap, cmt->supported_caps)
194 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
196 if (vcap->auth.algo != algo)
198 vec_foreach (s, vcap->auth.digest_sizes)
199 if (*s == digest_size)
206 static_always_inline int
207 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
208 u32 digest_size, u32 aad_size)
210 cryptodev_main_t *cmt = &cryptodev_main;
211 cryptodev_capability_t *vcap;
213 u32 key_match = 0, digest_match = 0, aad_match = 0;
215 vec_foreach (vcap, cmt->supported_caps)
217 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
219 if (vcap->aead.algo != algo)
221 vec_foreach (s, vcap->aead.digest_sizes)
222 if (*s == digest_size)
227 vec_foreach (s, vcap->aead.key_sizes)
233 vec_foreach (s, vcap->aead.aad_sizes)
241 if (key_match == 1 && digest_match == 1 && aad_match == 1)
247 static_always_inline int
248 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
252 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
254 switch (key->async_alg)
256 #define _(a, b, c, d, e) \
257 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
258 if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) && \
259 check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e)) \
261 foreach_cryptodev_link_async_alg
268 #define _(a, b, c, d, e, f, g) \
269 if (key->alg == VNET_CRYPTO_ALG_##a) \
271 if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f)) \
274 foreach_vnet_aead_crypto_conversion
277 if (matched < 2) return 0;
283 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
284 vnet_crypto_key_index_t idx, u32 aad_len)
286 cryptodev_main_t *cmt = &cryptodev_main;
287 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
288 cryptodev_key_t *ckey = 0;
291 vec_validate (cmt->keys, idx);
292 ckey = vec_elt_at_index (cmt->keys, idx);
294 if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
296 if (idx >= vec_len (cmt->keys))
299 vec_foreach_index (i, cmt->per_numa_data)
305 if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
307 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
308 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
310 CLIB_MEMORY_STORE_BARRIER ();
311 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
312 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
320 /* do not create session for unsupported alg */
321 if (cryptodev_check_supported_vnet_alg (key) == 0)
324 vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
325 vec_foreach_index (i, ckey->keys)
326 vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
330 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
331 vnet_crypto_key_index_t idx)
333 cryptodev_sess_handler (vm, kop, idx, 8);
337 allocate_session_pools (u32 numa_node,
338 cryptodev_session_pool_t *sess_pools_elt, u32 len)
340 cryptodev_main_t *cmt = &cryptodev_main;
342 clib_error_t *error = NULL;
344 name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
345 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
346 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
347 (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
349 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
350 (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
353 if (!sess_pools_elt->sess_pool)
355 error = clib_error_return (0, "Not enough memory for mp %s", name);
360 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
361 name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
362 sess_pools_elt->sess_priv_pool = rte_mempool_create (
363 (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
364 0, NULL, NULL, NULL, NULL, numa_node, 0);
366 if (!sess_pools_elt->sess_priv_pool)
368 error = clib_error_return (0, "Not enough memory for mp %s", name);
378 if (sess_pools_elt->sess_pool)
379 rte_mempool_free (sess_pools_elt->sess_pool);
380 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
381 if (sess_pools_elt->sess_priv_pool)
382 rte_mempool_free (sess_pools_elt->sess_priv_pool);
390 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
393 cryptodev_main_t *cmt = &cryptodev_main;
394 cryptodev_numa_data_t *numa_data;
395 cryptodev_inst_t *dev_inst;
396 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
397 struct rte_mempool *sess_pool;
398 cryptodev_session_pool_t *sess_pools_elt;
399 cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
400 struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
401 struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
402 cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
403 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
404 struct rte_mempool *sess_priv_pool;
405 struct rte_cryptodev_info dev_info;
407 u32 numa_node = vm->numa_node;
412 numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
414 clib_spinlock_lock (&cmt->tlock);
415 vec_foreach (sess_pools_elt, numa_data->sess_pools)
417 if (sess_pools_elt->sess_pool == NULL)
419 error = allocate_session_pools (numa_node, sess_pools_elt,
420 vec_len (numa_data->sess_pools) - 1);
427 if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
436 vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
437 error = allocate_session_pools (numa_node, sess_pools_elt,
438 vec_len (numa_data->sess_pools) - 1);
446 sess_pool = sess_pools_elt->sess_pool;
447 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
448 sess_priv_pool = sess_pools_elt->sess_priv_pool;
450 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
451 rte_cryptodev_sym_session_create (sess_pool);
453 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
454 rte_cryptodev_sym_session_create (sess_pool);
457 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
458 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
461 prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
468 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
469 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
471 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
473 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
474 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
475 u32 dev_id = dev_inst->dev_id;
476 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
477 rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
478 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
479 rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
480 if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
481 !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
487 rte_cryptodev_sym_session_opaque_data_set (
488 sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
489 rte_cryptodev_sym_session_opaque_data_set (
490 sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
492 vec_foreach (dev_inst, cmt->cryptodev_inst)
494 u32 dev_id = dev_inst->dev_id;
495 rte_cryptodev_info_get (dev_id, &dev_info);
496 u32 driver_id = dev_info.driver_id;
498 /* if the session is already configured for the driver type, avoid
499 configuring it again to increase the session data's refcnt */
500 if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
501 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
504 ret = rte_cryptodev_sym_session_init (
505 dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
507 ret = rte_cryptodev_sym_session_init (
508 dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
514 sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
515 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
518 CLIB_MEMORY_STORE_BARRIER ();
519 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
520 sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
521 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
522 sessions[CRYPTODEV_OP_TYPE_DECRYPT];
527 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
528 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
530 clib_spinlock_unlock (&cmt->tlock);
536 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
537 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
538 } cryptodev_resource_assign_op_t;
541 * assign a cryptodev resource to a worker.
542 * @param cet: the worker thread data
543 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
544 * @param op: the assignment method.
545 * @return: 0 if successfully, negative number otherwise.
547 static_always_inline int
548 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
549 u32 cryptodev_inst_index,
550 cryptodev_resource_assign_op_t op)
552 cryptodev_main_t *cmt = &cryptodev_main;
553 cryptodev_inst_t *cinst = 0;
556 /* assign resource is only allowed when no inflight op is in the queue */
562 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
563 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
564 vec_len (cmt->cryptodev_inst))
567 clib_spinlock_lock (&cmt->tlock);
568 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
569 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
570 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
571 cet->cryptodev_id = cinst->dev_id;
572 cet->cryptodev_q = cinst->q_id;
573 clib_spinlock_unlock (&cmt->tlock);
575 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
576 /* assigning a used cryptodev resource is not allowed */
577 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
581 vec_foreach_index (idx, cmt->cryptodev_inst)
583 cinst = cmt->cryptodev_inst + idx;
584 if (cinst->dev_id == cet->cryptodev_id &&
585 cinst->q_id == cet->cryptodev_q)
588 /* invalid existing worker resource assignment */
589 if (idx >= vec_len (cmt->cryptodev_inst))
591 clib_spinlock_lock (&cmt->tlock);
592 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
593 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
594 cryptodev_inst_index, 1);
595 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
596 cet->cryptodev_id = cinst->dev_id;
597 cet->cryptodev_q = cinst->q_id;
598 clib_spinlock_unlock (&cmt->tlock);
607 format_cryptodev_inst (u8 * s, va_list * args)
609 cryptodev_main_t *cmt = &cryptodev_main;
610 u32 inst = va_arg (*args, u32);
611 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
612 u32 thread_index = 0;
613 struct rte_cryptodev_info info;
615 rte_cryptodev_info_get (cit->dev_id, &info);
616 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
618 vec_foreach_index (thread_index, cmt->per_thread_data)
620 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
621 if (vlib_num_workers () > 0 && thread_index == 0)
624 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
626 s = format (s, "%u (%v)\n", thread_index,
627 vlib_worker_threads[thread_index].name);
632 if (thread_index == vec_len (cmt->per_thread_data))
633 s = format (s, "%s\n", "free");
638 static clib_error_t *
639 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
640 vlib_cli_command_t * cmd)
642 cryptodev_main_t *cmt = &cryptodev_main;
645 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
647 if (vec_len (cmt->cryptodev_inst) == 0)
649 vlib_cli_output (vm, "(nil)\n");
653 vec_foreach_index (inst, cmt->cryptodev_inst)
654 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
657 vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
659 vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
663 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
664 .path = "show cryptodev assignment",
665 .short_help = "show cryptodev assignment",
666 .function = cryptodev_show_assignment_fn,
669 static clib_error_t *
670 cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
671 vlib_cli_command_t *cmd)
673 cryptodev_main_t *cmt = &cryptodev_main;
674 u32 thread_index = 0;
676 vec_foreach_index (thread_index, cmt->per_thread_data)
678 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
679 cryptodev_cache_ring_t *ring = &cet->cache_ring;
680 u16 head = ring->head;
681 u16 tail = ring->tail;
682 u16 n_cached = (CRYPTODEV_CACHE_QUEUE_SIZE - tail + head) &
683 CRYPTODEV_CACHE_QUEUE_MASK;
685 u16 enq_head = ring->enq_head;
686 u16 deq_tail = ring->deq_tail;
687 u16 n_frames_inflight =
688 (enq_head == deq_tail) ?
690 ((CRYPTODEV_CACHE_QUEUE_SIZE + enq_head - deq_tail) &
691 CRYPTODEV_CACHE_QUEUE_MASK);
692 /* even if some elements of dequeued frame are still pending for deq
693 * we consider the frame as processed */
694 u16 n_frames_processed =
695 ((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ?
697 ((CRYPTODEV_CACHE_QUEUE_SIZE - tail + deq_tail) &
698 CRYPTODEV_CACHE_QUEUE_MASK) +
700 /* even if some elements of enqueued frame are still pending for enq
701 * we consider the frame as enqueued */
702 u16 n_frames_pending =
703 (head == enq_head) ? 0 :
704 ((CRYPTODEV_CACHE_QUEUE_SIZE - enq_head + head) &
705 CRYPTODEV_CACHE_QUEUE_MASK) -
709 (ring->frames[enq_head].n_elts - ring->frames[enq_head].enq_elts_head);
711 (ring->frames[deq_tail].n_elts - ring->frames[deq_tail].deq_elts_tail);
715 for (i = 0; i < CRYPTODEV_CACHE_QUEUE_SIZE; i++)
716 elts_total += ring->frames[i].n_elts;
718 if (vlib_num_workers () > 0 && thread_index == 0)
721 vlib_cli_output (vm, "\n\n");
722 vlib_cli_output (vm, "Frames cached in the ring: %u", n_cached);
723 vlib_cli_output (vm, "Frames cached but not processed: %u",
725 vlib_cli_output (vm, "Frames inflight: %u", n_frames_inflight);
726 vlib_cli_output (vm, "Frames processed: %u", n_frames_processed);
727 vlib_cli_output (vm, "Elements total: %u", elts_total);
728 vlib_cli_output (vm, "Elements inflight: %u", cet->inflight);
729 vlib_cli_output (vm, "Head index: %u", head);
730 vlib_cli_output (vm, "Tail index: %u", tail);
731 vlib_cli_output (vm, "Current frame index beeing enqueued: %u",
733 vlib_cli_output (vm, "Current frame index being dequeued: %u", deq_tail);
735 "Elements in current frame to be enqueued: %u, waiting "
736 "to be enqueued: %u",
737 ring->frames[enq_head].n_elts, elts_to_enq);
739 "Elements in current frame to be dequeued: %u, waiting "
740 "to be dequeued: %u",
741 ring->frames[deq_tail].n_elts, elts_to_deq);
742 vlib_cli_output (vm, "\n\n");
747 VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
748 .path = "show cryptodev cache status",
749 .short_help = "show status of all cryptodev cache rings",
750 .function = cryptodev_show_cache_rings_fn,
753 static clib_error_t *
754 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
755 vlib_cli_command_t * cmd)
757 cryptodev_main_t *cmt = &cryptodev_main;
758 cryptodev_engine_thread_t *cet;
759 unformat_input_t _line_input, *line_input = &_line_input;
760 u32 thread_index, inst_index;
761 u32 thread_present = 0, inst_present = 0;
762 clib_error_t *error = 0;
765 /* Get a line of input. */
766 if (!unformat_user (input, unformat_line_input, line_input))
769 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
771 if (unformat (line_input, "thread %u", &thread_index))
773 else if (unformat (line_input, "resource %u", &inst_index))
777 error = clib_error_return (0, "unknown input `%U'",
778 format_unformat_error, line_input);
783 if (!thread_present || !inst_present)
785 error = clib_error_return (0, "mandatory argument(s) missing");
789 if (thread_index == 0 && vlib_num_workers () > 0)
792 clib_error_return (0, "assign crypto resource for master thread");
796 if (thread_index > vec_len (cmt->per_thread_data) ||
797 inst_index > vec_len (cmt->cryptodev_inst))
799 error = clib_error_return (0, "wrong thread id or resource id");
803 cet = cmt->per_thread_data + thread_index;
804 ret = cryptodev_assign_resource (cet, inst_index,
805 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
809 clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
816 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
817 .path = "set cryptodev assignment",
818 .short_help = "set cryptodev assignment thread <thread_index> "
819 "resource <inst_index>",
820 .function = cryptodev_set_assignment_fn,
824 cryptodev_count_queue (u32 numa)
826 struct rte_cryptodev_info info;
827 u32 n_cryptodev = rte_cryptodev_count ();
830 for (i = 0; i < n_cryptodev; i++)
832 rte_cryptodev_info_get (i, &info);
833 q_count += info.max_nb_queue_pairs;
840 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
842 struct rte_cryptodev_config cfg;
843 struct rte_cryptodev_info info;
844 cryptodev_main_t *cmt = &cryptodev_main;
848 rte_cryptodev_info_get (cryptodev_id, &info);
850 /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
851 anymore. Only devices that have the same driver type as the first
852 initialized device can be initialized.
854 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
855 if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
859 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
862 cfg.socket_id = info.device->numa_node;
863 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
865 rte_cryptodev_configure (cryptodev_id, &cfg);
867 for (i = 0; i < info.max_nb_queue_pairs; i++)
869 struct rte_cryptodev_qp_conf qp_cfg;
871 qp_cfg.mp_session = 0;
872 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
873 qp_cfg.mp_session_private = 0;
875 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
877 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
878 info.device->numa_node);
881 clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
882 cryptodev_id, i, ret);
887 if (i != info.max_nb_queue_pairs)
890 /* start the device */
891 rte_cryptodev_start (cryptodev_id);
893 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
894 if (cmt->drivers_cnt == 0)
896 cmt->drivers_cnt = 1;
897 cmt->driver_id = info.driver_id;
898 cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
902 for (i = 0; i < info.max_nb_queue_pairs; i++)
904 cryptodev_inst_t *cdev_inst;
905 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
906 const char *dev_name = rte_dev_name (info.device);
908 const char *dev_name = info.device->name;
910 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
911 cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
912 cdev_inst->dev_id = cryptodev_id;
915 snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
916 info.device->name, i);
923 cryptodev_cmp (void *v1, void *v2)
925 cryptodev_inst_t *a1 = v1;
926 cryptodev_inst_t *a2 = v2;
928 if (a1->q_id > a2->q_id)
930 if (a1->q_id < a2->q_id)
936 cryptodev_supports_param_value (u32 *params, u32 param_value)
939 vec_foreach (value, params)
941 if (*value == param_value)
948 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
949 u32 key_size, u32 digest_size, u32 aad_size)
951 cryptodev_main_t *cmt = &cryptodev_main;
952 cryptodev_capability_t *cap;
953 vec_foreach (cap, cmt->supported_caps)
956 if (cap->xform_type != idx->type)
959 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
960 cap->auth.algo == idx->algo.auth &&
961 cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
964 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
965 cap->cipher.algo == idx->algo.cipher &&
966 cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
969 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
970 cap->aead.algo == idx->algo.aead &&
971 cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
972 cryptodev_supports_param_value (cap->aead.digest_sizes,
974 cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
981 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
982 u32 param_size_max, u32 increment)
987 while (i < vec_len (*param_sizes))
990 for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
991 cap_param_size += increment)
993 if ((*param_sizes)[i] == cap_param_size)
1002 /* no such param_size in cap so delete this size in temp_cap params */
1003 vec_delete (*param_sizes, 1, i);
1010 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
1012 cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
1014 switch (temp_cap.xform_type)
1016 case RTE_CRYPTO_SYM_XFORM_AUTH:
1017 vec_free (temp_cap.auth.digest_sizes);
1019 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1020 vec_free (temp_cap.cipher.key_sizes);
1022 case RTE_CRYPTO_SYM_XFORM_AEAD:
1023 vec_free (temp_cap.aead.key_sizes);
1024 vec_free (temp_cap.aead.aad_sizes);
1025 vec_free (temp_cap.aead.digest_sizes);
1030 vec_delete (*temp_caps, 1, temp_cap_id);
1034 cryptodev_remove_unsupported_param_sizes (
1035 cryptodev_capability_t *temp_cap,
1036 const struct rte_cryptodev_capabilities *dev_caps)
1039 const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
1041 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1043 if (cap->sym.xform_type == temp_cap->xform_type)
1044 switch (cap->sym.xform_type)
1046 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1047 if (cap->sym.cipher.algo == temp_cap->cipher.algo)
1049 remove_unsupported_param_size (
1050 &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
1051 cap->sym.cipher.key_size.max,
1052 cap->sym.cipher.key_size.increment);
1053 if (vec_len (temp_cap->cipher.key_sizes) > 0)
1057 case RTE_CRYPTO_SYM_XFORM_AUTH:
1058 if (cap->sym.auth.algo == temp_cap->auth.algo)
1060 remove_unsupported_param_size (
1061 &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
1062 cap->sym.auth.digest_size.max,
1063 cap->sym.auth.digest_size.increment);
1064 if (vec_len (temp_cap->auth.digest_sizes) > 0)
1068 case RTE_CRYPTO_SYM_XFORM_AEAD:
1069 if (cap->sym.aead.algo == temp_cap->aead.algo)
1071 remove_unsupported_param_size (
1072 &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
1073 cap->sym.aead.key_size.max,
1074 cap->sym.aead.key_size.increment);
1075 remove_unsupported_param_size (
1076 &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
1077 cap->sym.aead.aad_size.max,
1078 cap->sym.aead.aad_size.increment);
1079 remove_unsupported_param_size (
1080 &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
1081 cap->sym.aead.digest_size.max,
1082 cap->sym.aead.digest_size.increment);
1083 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1084 vec_len (temp_cap->aead.aad_sizes) > 0 &&
1085 vec_len (temp_cap->aead.digest_sizes) > 0)
1101 cryptodev_get_common_capabilities ()
1103 cryptodev_main_t *cmt = &cryptodev_main;
1104 cryptodev_inst_t *dev_inst;
1105 struct rte_cryptodev_info dev_info;
1106 u32 previous_dev_id, dev_id;
1109 cryptodev_capability_t tmp_cap;
1110 const struct rte_cryptodev_capabilities *cap;
1111 const struct rte_cryptodev_capabilities *dev_caps;
1113 clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
1114 if (vec_len (cmt->cryptodev_inst) == 0)
1116 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1117 rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1118 cap = &dev_info.capabilities[0];
1120 /*init capabilities vector*/
1121 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1123 if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1129 tmp_cap.xform_type = cap->sym.xform_type;
1130 switch (cap->sym.xform_type)
1132 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1133 tmp_cap.cipher.key_sizes = 0;
1134 tmp_cap.cipher.algo = cap->sym.cipher.algo;
1135 for (param = cap->sym.cipher.key_size.min;
1136 param <= cap->sym.cipher.key_size.max;
1137 param += cap->sym.cipher.key_size.increment)
1139 vec_add1 (tmp_cap.cipher.key_sizes, param);
1140 if (cap->sym.cipher.key_size.increment == 0)
1144 case RTE_CRYPTO_SYM_XFORM_AUTH:
1145 tmp_cap.auth.algo = cap->sym.auth.algo;
1146 tmp_cap.auth.digest_sizes = 0;
1147 for (param = cap->sym.auth.digest_size.min;
1148 param <= cap->sym.auth.digest_size.max;
1149 param += cap->sym.auth.digest_size.increment)
1151 vec_add1 (tmp_cap.auth.digest_sizes, param);
1152 if (cap->sym.auth.digest_size.increment == 0)
1156 case RTE_CRYPTO_SYM_XFORM_AEAD:
1157 tmp_cap.aead.key_sizes = 0;
1158 tmp_cap.aead.aad_sizes = 0;
1159 tmp_cap.aead.digest_sizes = 0;
1160 tmp_cap.aead.algo = cap->sym.aead.algo;
1161 for (param = cap->sym.aead.key_size.min;
1162 param <= cap->sym.aead.key_size.max;
1163 param += cap->sym.aead.key_size.increment)
1165 vec_add1 (tmp_cap.aead.key_sizes, param);
1166 if (cap->sym.aead.key_size.increment == 0)
1169 for (param = cap->sym.aead.aad_size.min;
1170 param <= cap->sym.aead.aad_size.max;
1171 param += cap->sym.aead.aad_size.increment)
1173 vec_add1 (tmp_cap.aead.aad_sizes, param);
1174 if (cap->sym.aead.aad_size.increment == 0)
1177 for (param = cap->sym.aead.digest_size.min;
1178 param <= cap->sym.aead.digest_size.max;
1179 param += cap->sym.aead.digest_size.increment)
1181 vec_add1 (tmp_cap.aead.digest_sizes, param);
1182 if (cap->sym.aead.digest_size.increment == 0)
1190 vec_add1 (cmt->supported_caps, tmp_cap);
1194 while (cap_id < vec_len (cmt->supported_caps))
1196 u32 cap_is_supported = 1;
1197 previous_dev_id = cmt->cryptodev_inst->dev_id;
1199 vec_foreach (dev_inst, cmt->cryptodev_inst)
1201 dev_id = dev_inst->dev_id;
1202 if (previous_dev_id != dev_id)
1204 previous_dev_id = dev_id;
1205 rte_cryptodev_info_get (dev_id, &dev_info);
1206 dev_caps = &dev_info.capabilities[0];
1207 cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1208 &cmt->supported_caps[cap_id], dev_caps);
1209 if (!cap_is_supported)
1211 cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1212 /*no need to check other devices as this one doesn't support
1218 if (cap_is_supported)
1224 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1226 cryptodev_main_t *cmt = &cryptodev_main;
1227 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1230 if (n_queues < n_workers)
1233 for (i = 0; i < rte_cryptodev_count (); i++)
1234 cryptodev_configure (vm, i);
1236 if (vec_len (cmt->cryptodev_inst) == 0)
1238 cryptodev_get_common_capabilities ();
1239 vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1241 /* if there is not enough device stop cryptodev */
1242 if (vec_len (cmt->cryptodev_inst) < n_workers)
1248 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1250 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1255 vec_foreach (unique_elt, *unique_drivers)
1257 if (*unique_elt == driver_id)
1265 vec_add1 (*unique_drivers, driver_id);
1270 dpdk_cryptodev_init (vlib_main_t * vm)
1272 cryptodev_main_t *cmt = &cryptodev_main;
1273 vlib_thread_main_t *tm = vlib_get_thread_main ();
1274 cryptodev_engine_thread_t *cet;
1275 cryptodev_numa_data_t *numa_data;
1278 u32 skip_master = vlib_num_workers () > 0;
1279 u32 n_workers = tm->n_vlib_mains - skip_master;
1282 clib_error_t *error;
1284 cmt->iova_mode = rte_eal_iova_mode ();
1286 clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1292 vec_validate (cmt->per_numa_data, nodes);
1293 vec_foreach (numa_data, cmt->per_numa_data)
1295 vec_validate (numa_data->sess_pools, 0);
1298 /* probe all cryptodev devices and get queue info */
1299 if (cryptodev_probe (vm, n_workers) < 0)
1302 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1303 struct rte_cryptodev_info dev_info;
1304 cryptodev_inst_t *dev_inst;
1305 u32 *unique_drivers = 0;
1306 vec_foreach (dev_inst, cmt->cryptodev_inst)
1308 u32 dev_id = dev_inst->dev_id;
1309 rte_cryptodev_info_get (dev_id, &dev_info);
1310 u32 driver_id = dev_info.driver_id;
1311 is_drv_unique (driver_id, &unique_drivers);
1314 rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1315 cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1318 cmt->drivers_cnt = vec_len (unique_drivers);
1319 vec_free (unique_drivers);
1322 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers);
1323 clib_spinlock_init (&cmt->tlock);
1325 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1326 CLIB_CACHE_LINE_BYTES);
1327 for (i = skip_master; i < tm->n_vlib_mains; i++)
1329 cet = cmt->per_thread_data + i;
1331 if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1334 error = clib_error_return (0, "Failed to configure cryptodev");
1339 /* register handler */
1340 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1341 "DPDK Cryptodev Engine");
1343 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1345 if (cryptodev_register_raw_hdl)
1346 error = cryptodev_register_raw_hdl (vm, eidx);
1348 error = cryptodev_register_cop_hdl (vm, eidx);
1353 /* this engine is only enabled when cryptodev device(s) are presented in
1354 * startup.conf. Assume it is wanted to be used, turn on async mode here.
1356 ipsec_set_async_mode (1);