2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
34 #include "cryptodev.h"
37 #define always_inline static inline
39 #define always_inline static inline __attribute__ ((__always_inline__))
42 cryptodev_main_t cryptodev_main;
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46 cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
49 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50 memset (xform, 0, sizeof (*xform));
51 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
54 if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55 key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56 key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
58 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
60 else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
62 aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
67 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69 aead_xform->aad_length = aad_len;
70 aead_xform->digest_length = 16;
71 aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72 aead_xform->iv.length = 12;
73 aead_xform->key.data = key->data;
74 aead_xform->key.length = vec_len (key->data);
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81 cryptodev_op_type_t op_type,
82 const vnet_crypto_key_t *key)
84 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85 vnet_crypto_key_t *key_cipher, *key_auth;
86 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87 enum rte_crypto_auth_algorithm auth_algo = ~0;
90 key_cipher = vnet_crypto_get_key (key->index_crypto);
91 key_auth = vnet_crypto_get_key (key->index_integ);
92 if (!key_cipher || !key_auth)
95 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
97 xform_cipher = xforms;
98 xform_auth = xforms + 1;
99 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
104 xform_cipher = xforms + 1;
106 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
110 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112 xforms->next = xforms + 1;
114 switch (key->async_alg)
116 #define _(a, b, c, d, e) \
117 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
118 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
119 auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC; \
123 foreach_cryptodev_link_async_alg
129 xform_cipher->cipher.algo = cipher_algo;
130 xform_cipher->cipher.key.data = key_cipher->data;
131 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132 xform_cipher->cipher.iv.length = 16;
133 xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
135 xform_auth->auth.algo = auth_algo;
136 xform_auth->auth.digest_length = digest_len;
137 xform_auth->auth.key.data = key_auth->data;
138 xform_auth->auth.key.length = vec_len (key_auth->data);
143 static_always_inline void
144 cryptodev_session_del (cryptodev_session_t *sess)
151 n_devs = rte_cryptodev_count ();
153 for (i = 0; i < n_devs; i++)
154 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
155 if (rte_cryptodev_sym_session_free (i, sess) == 0)
158 rte_cryptodev_sym_session_clear (i, sess);
160 rte_cryptodev_sym_session_free (sess);
165 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
167 cryptodev_main_t *cmt = &cryptodev_main;
168 cryptodev_capability_t *vcap;
171 vec_foreach (vcap, cmt->supported_caps)
173 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
175 if (vcap->cipher.algo != algo)
177 vec_foreach (s, vcap->cipher.key_sizes)
186 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
188 cryptodev_main_t *cmt = &cryptodev_main;
189 cryptodev_capability_t *vcap;
192 vec_foreach (vcap, cmt->supported_caps)
194 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
196 if (vcap->auth.algo != algo)
198 vec_foreach (s, vcap->auth.digest_sizes)
199 if (*s == digest_size)
206 static_always_inline int
207 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
208 u32 digest_size, u32 aad_size)
210 cryptodev_main_t *cmt = &cryptodev_main;
211 cryptodev_capability_t *vcap;
213 u32 key_match = 0, digest_match = 0, aad_match = 0;
215 vec_foreach (vcap, cmt->supported_caps)
217 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
219 if (vcap->aead.algo != algo)
221 vec_foreach (s, vcap->aead.digest_sizes)
222 if (*s == digest_size)
227 vec_foreach (s, vcap->aead.key_sizes)
233 vec_foreach (s, vcap->aead.aad_sizes)
241 if (key_match == 1 && digest_match == 1 && aad_match == 1)
247 static_always_inline int
248 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
252 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
254 switch (key->async_alg)
256 #define _(a, b, c, d, e) \
257 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
258 if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) && \
259 check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e)) \
261 foreach_cryptodev_link_async_alg
268 #define _(a, b, c, d, e, f, g) \
269 if (key->alg == VNET_CRYPTO_ALG_##a) \
271 if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f)) \
274 foreach_vnet_aead_crypto_conversion
277 if (matched < 2) return 0;
283 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
284 vnet_crypto_key_index_t idx, u32 aad_len)
286 cryptodev_main_t *cmt = &cryptodev_main;
287 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
288 cryptodev_key_t *ckey = 0;
291 vec_validate (cmt->keys, idx);
292 ckey = vec_elt_at_index (cmt->keys, idx);
294 if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
296 if (idx >= vec_len (cmt->keys))
299 vec_foreach_index (i, cmt->per_numa_data)
305 if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
307 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
308 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
310 CLIB_MEMORY_STORE_BARRIER ();
311 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
312 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
320 /* do not create session for unsupported alg */
321 if (cryptodev_check_supported_vnet_alg (key) == 0)
324 vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
325 vec_foreach_index (i, ckey->keys)
326 vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
330 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
331 vnet_crypto_key_index_t idx)
333 cryptodev_sess_handler (vm, kop, idx, 8);
337 allocate_session_pools (u32 numa_node,
338 cryptodev_session_pool_t *sess_pools_elt, u32 len)
340 cryptodev_main_t *cmt = &cryptodev_main;
342 clib_error_t *error = NULL;
344 name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
345 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
346 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
347 (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
349 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
350 (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
353 if (!sess_pools_elt->sess_pool)
355 error = clib_error_return (0, "Not enough memory for mp %s", name);
360 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
361 name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
362 sess_pools_elt->sess_priv_pool = rte_mempool_create (
363 (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
364 0, NULL, NULL, NULL, NULL, numa_node, 0);
366 if (!sess_pools_elt->sess_priv_pool)
368 error = clib_error_return (0, "Not enough memory for mp %s", name);
378 if (sess_pools_elt->sess_pool)
379 rte_mempool_free (sess_pools_elt->sess_pool);
380 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
381 if (sess_pools_elt->sess_priv_pool)
382 rte_mempool_free (sess_pools_elt->sess_priv_pool);
390 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
393 cryptodev_main_t *cmt = &cryptodev_main;
394 cryptodev_numa_data_t *numa_data;
395 cryptodev_inst_t *dev_inst;
396 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
397 struct rte_mempool *sess_pool;
398 cryptodev_session_pool_t *sess_pools_elt;
399 cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
400 struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
401 struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
402 cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
403 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
404 struct rte_mempool *sess_priv_pool;
405 struct rte_cryptodev_info dev_info;
407 u32 numa_node = vm->numa_node;
412 numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
414 clib_spinlock_lock (&cmt->tlock);
415 vec_foreach (sess_pools_elt, numa_data->sess_pools)
417 if (sess_pools_elt->sess_pool == NULL)
419 error = allocate_session_pools (numa_node, sess_pools_elt,
420 vec_len (numa_data->sess_pools) - 1);
427 if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
436 vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
437 error = allocate_session_pools (numa_node, sess_pools_elt,
438 vec_len (numa_data->sess_pools) - 1);
446 sess_pool = sess_pools_elt->sess_pool;
447 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
448 sess_priv_pool = sess_pools_elt->sess_priv_pool;
450 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
451 rte_cryptodev_sym_session_create (sess_pool);
453 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
454 rte_cryptodev_sym_session_create (sess_pool);
457 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
458 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
461 prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
468 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
469 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
471 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
473 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
474 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
475 u32 dev_id = dev_inst->dev_id;
476 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
477 rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
478 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
479 rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
480 if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
481 !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
487 rte_cryptodev_sym_session_opaque_data_set (
488 sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
489 rte_cryptodev_sym_session_opaque_data_set (
490 sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
492 vec_foreach (dev_inst, cmt->cryptodev_inst)
494 u32 dev_id = dev_inst->dev_id;
495 rte_cryptodev_info_get (dev_id, &dev_info);
496 u32 driver_id = dev_info.driver_id;
498 /* if the session is already configured for the driver type, avoid
499 configuring it again to increase the session data's refcnt */
500 if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
501 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
504 ret = rte_cryptodev_sym_session_init (
505 dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
507 ret = rte_cryptodev_sym_session_init (
508 dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
514 sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
515 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
518 CLIB_MEMORY_STORE_BARRIER ();
519 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
520 sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
521 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
522 sessions[CRYPTODEV_OP_TYPE_DECRYPT];
527 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
528 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
530 clib_spinlock_unlock (&cmt->tlock);
536 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
537 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
538 } cryptodev_resource_assign_op_t;
541 * assign a cryptodev resource to a worker.
542 * @param cet: the worker thread data
543 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
544 * @param op: the assignment method.
545 * @return: 0 if successfully, negative number otherwise.
547 static_always_inline int
548 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
549 u32 cryptodev_inst_index,
550 cryptodev_resource_assign_op_t op)
552 cryptodev_main_t *cmt = &cryptodev_main;
553 cryptodev_inst_t *cinst = 0;
556 /* assign resource is only allowed when no inflight op is in the queue */
562 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
563 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
564 vec_len (cmt->cryptodev_inst))
567 clib_spinlock_lock (&cmt->tlock);
568 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
569 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
570 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
571 cet->cryptodev_id = cinst->dev_id;
572 cet->cryptodev_q = cinst->q_id;
573 clib_spinlock_unlock (&cmt->tlock);
575 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
576 /* assigning a used cryptodev resource is not allowed */
577 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
581 vec_foreach_index (idx, cmt->cryptodev_inst)
583 cinst = cmt->cryptodev_inst + idx;
584 if (cinst->dev_id == cet->cryptodev_id &&
585 cinst->q_id == cet->cryptodev_q)
588 /* invalid existing worker resource assignment */
589 if (idx == vec_len (cmt->cryptodev_inst))
591 clib_spinlock_lock (&cmt->tlock);
592 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
593 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
594 cryptodev_inst_index, 1);
595 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
596 cet->cryptodev_id = cinst->dev_id;
597 cet->cryptodev_q = cinst->q_id;
598 clib_spinlock_unlock (&cmt->tlock);
607 format_cryptodev_inst (u8 * s, va_list * args)
609 cryptodev_main_t *cmt = &cryptodev_main;
610 u32 inst = va_arg (*args, u32);
611 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
612 u32 thread_index = 0;
613 struct rte_cryptodev_info info;
615 rte_cryptodev_info_get (cit->dev_id, &info);
616 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
618 vec_foreach_index (thread_index, cmt->per_thread_data)
620 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
621 if (vlib_num_workers () > 0 && thread_index == 0)
624 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
626 s = format (s, "%u (%v)\n", thread_index,
627 vlib_worker_threads[thread_index].name);
632 if (thread_index == vec_len (cmt->per_thread_data))
633 s = format (s, "%s\n", "free");
638 static clib_error_t *
639 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
640 vlib_cli_command_t * cmd)
642 cryptodev_main_t *cmt = &cryptodev_main;
645 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
647 if (vec_len (cmt->cryptodev_inst) == 0)
649 vlib_cli_output (vm, "(nil)\n");
653 vec_foreach_index (inst, cmt->cryptodev_inst)
654 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
657 vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
659 vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
663 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
664 .path = "show cryptodev assignment",
665 .short_help = "show cryptodev assignment",
666 .function = cryptodev_show_assignment_fn,
669 static clib_error_t *
670 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
671 vlib_cli_command_t * cmd)
673 cryptodev_main_t *cmt = &cryptodev_main;
674 cryptodev_engine_thread_t *cet;
675 unformat_input_t _line_input, *line_input = &_line_input;
676 u32 thread_index, inst_index;
677 u32 thread_present = 0, inst_present = 0;
678 clib_error_t *error = 0;
681 /* Get a line of input. */
682 if (!unformat_user (input, unformat_line_input, line_input))
685 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
687 if (unformat (line_input, "thread %u", &thread_index))
689 else if (unformat (line_input, "resource %u", &inst_index))
693 error = clib_error_return (0, "unknown input `%U'",
694 format_unformat_error, line_input);
699 if (!thread_present || !inst_present)
701 error = clib_error_return (0, "mandatory argument(s) missing");
705 if (thread_index == 0 && vlib_num_workers () > 0)
708 clib_error_return (0, "assign crypto resource for master thread");
712 if (thread_index > vec_len (cmt->per_thread_data) ||
713 inst_index > vec_len (cmt->cryptodev_inst))
715 error = clib_error_return (0, "wrong thread id or resource id");
719 cet = cmt->per_thread_data + thread_index;
720 ret = cryptodev_assign_resource (cet, inst_index,
721 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
725 clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
732 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
733 .path = "set cryptodev assignment",
734 .short_help = "set cryptodev assignment thread <thread_index> "
735 "resource <inst_index>",
736 .function = cryptodev_set_assignment_fn,
740 cryptodev_count_queue (u32 numa)
742 struct rte_cryptodev_info info;
743 u32 n_cryptodev = rte_cryptodev_count ();
746 for (i = 0; i < n_cryptodev; i++)
748 rte_cryptodev_info_get (i, &info);
749 q_count += info.max_nb_queue_pairs;
756 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
758 struct rte_cryptodev_config cfg;
759 struct rte_cryptodev_info info;
760 cryptodev_main_t *cmt = &cryptodev_main;
764 rte_cryptodev_info_get (cryptodev_id, &info);
766 /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
767 anymore. Only devices that have the same driver type as the first
768 initialized device can be initialized.
770 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
771 if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
775 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
778 cfg.socket_id = info.device->numa_node;
779 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
781 rte_cryptodev_configure (cryptodev_id, &cfg);
783 for (i = 0; i < info.max_nb_queue_pairs; i++)
785 struct rte_cryptodev_qp_conf qp_cfg;
787 qp_cfg.mp_session = 0;
788 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
789 qp_cfg.mp_session_private = 0;
791 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
793 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
794 info.device->numa_node);
797 clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
798 cryptodev_id, i, ret);
803 if (i != info.max_nb_queue_pairs)
806 /* start the device */
807 rte_cryptodev_start (cryptodev_id);
809 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
810 if (cmt->drivers_cnt == 0)
812 cmt->drivers_cnt = 1;
813 cmt->driver_id = info.driver_id;
814 cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
818 for (i = 0; i < info.max_nb_queue_pairs; i++)
820 cryptodev_inst_t *cdev_inst;
821 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
822 const char *dev_name = rte_dev_name (info.device);
824 const char *dev_name = info.device->name;
826 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
827 cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
828 cdev_inst->dev_id = cryptodev_id;
831 snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
832 info.device->name, i);
839 cryptodev_cmp (void *v1, void *v2)
841 cryptodev_inst_t *a1 = v1;
842 cryptodev_inst_t *a2 = v2;
844 if (a1->q_id > a2->q_id)
846 if (a1->q_id < a2->q_id)
852 cryptodev_supports_param_value (u32 *params, u32 param_value)
855 vec_foreach (value, params)
857 if (*value == param_value)
864 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
865 u32 key_size, u32 digest_size, u32 aad_size)
867 cryptodev_main_t *cmt = &cryptodev_main;
868 cryptodev_capability_t *cap;
869 vec_foreach (cap, cmt->supported_caps)
872 if (cap->xform_type != idx->type)
875 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
876 cap->auth.algo == idx->algo.auth &&
877 cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
880 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
881 cap->cipher.algo == idx->algo.cipher &&
882 cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
885 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
886 cap->aead.algo == idx->algo.aead &&
887 cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
888 cryptodev_supports_param_value (cap->aead.digest_sizes,
890 cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
897 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
898 u32 param_size_max, u32 increment)
903 while (i < vec_len (*param_sizes))
906 for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
907 cap_param_size += increment)
909 if ((*param_sizes)[i] == cap_param_size)
918 /* no such param_size in cap so delete this size in temp_cap params */
919 vec_delete (*param_sizes, 1, i);
926 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
928 cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
930 switch (temp_cap.xform_type)
932 case RTE_CRYPTO_SYM_XFORM_AUTH:
933 vec_free (temp_cap.auth.digest_sizes);
935 case RTE_CRYPTO_SYM_XFORM_CIPHER:
936 vec_free (temp_cap.cipher.key_sizes);
938 case RTE_CRYPTO_SYM_XFORM_AEAD:
939 vec_free (temp_cap.aead.key_sizes);
940 vec_free (temp_cap.aead.aad_sizes);
941 vec_free (temp_cap.aead.digest_sizes);
946 vec_delete (*temp_caps, 1, temp_cap_id);
950 cryptodev_remove_unsupported_param_sizes (
951 cryptodev_capability_t *temp_cap,
952 const struct rte_cryptodev_capabilities *dev_caps)
955 const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
957 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
959 if (cap->sym.xform_type == temp_cap->xform_type)
960 switch (cap->sym.xform_type)
962 case RTE_CRYPTO_SYM_XFORM_CIPHER:
963 if (cap->sym.cipher.algo == temp_cap->cipher.algo)
965 remove_unsupported_param_size (
966 &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
967 cap->sym.cipher.key_size.max,
968 cap->sym.cipher.key_size.increment);
969 if (vec_len (temp_cap->cipher.key_sizes) > 0)
973 case RTE_CRYPTO_SYM_XFORM_AUTH:
974 if (cap->sym.auth.algo == temp_cap->auth.algo)
976 remove_unsupported_param_size (
977 &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
978 cap->sym.auth.digest_size.max,
979 cap->sym.auth.digest_size.increment);
980 if (vec_len (temp_cap->auth.digest_sizes) > 0)
984 case RTE_CRYPTO_SYM_XFORM_AEAD:
985 if (cap->sym.aead.algo == temp_cap->aead.algo)
987 remove_unsupported_param_size (
988 &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
989 cap->sym.aead.key_size.max,
990 cap->sym.aead.key_size.increment);
991 remove_unsupported_param_size (
992 &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
993 cap->sym.aead.aad_size.max,
994 cap->sym.aead.aad_size.increment);
995 remove_unsupported_param_size (
996 &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
997 cap->sym.aead.digest_size.max,
998 cap->sym.aead.digest_size.increment);
999 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1000 vec_len (temp_cap->aead.aad_sizes) > 0 &&
1001 vec_len (temp_cap->aead.digest_sizes) > 0)
1017 cryptodev_get_common_capabilities ()
1019 cryptodev_main_t *cmt = &cryptodev_main;
1020 cryptodev_inst_t *dev_inst;
1021 struct rte_cryptodev_info dev_info;
1022 u32 previous_dev_id, dev_id;
1025 cryptodev_capability_t tmp_cap;
1026 const struct rte_cryptodev_capabilities *cap;
1027 const struct rte_cryptodev_capabilities *dev_caps;
1029 clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
1030 if (vec_len (cmt->cryptodev_inst) == 0)
1032 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1033 rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1034 cap = &dev_info.capabilities[0];
1036 /*init capabilities vector*/
1037 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1039 if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1045 tmp_cap.xform_type = cap->sym.xform_type;
1046 switch (cap->sym.xform_type)
1048 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1049 tmp_cap.cipher.key_sizes = 0;
1050 tmp_cap.cipher.algo = cap->sym.cipher.algo;
1051 for (param = cap->sym.cipher.key_size.min;
1052 param <= cap->sym.cipher.key_size.max;
1053 param += cap->sym.cipher.key_size.increment)
1055 vec_add1 (tmp_cap.cipher.key_sizes, param);
1056 if (cap->sym.cipher.key_size.increment == 0)
1060 case RTE_CRYPTO_SYM_XFORM_AUTH:
1061 tmp_cap.auth.algo = cap->sym.auth.algo;
1062 tmp_cap.auth.digest_sizes = 0;
1063 for (param = cap->sym.auth.digest_size.min;
1064 param <= cap->sym.auth.digest_size.max;
1065 param += cap->sym.auth.digest_size.increment)
1067 vec_add1 (tmp_cap.auth.digest_sizes, param);
1068 if (cap->sym.auth.digest_size.increment == 0)
1072 case RTE_CRYPTO_SYM_XFORM_AEAD:
1073 tmp_cap.aead.key_sizes = 0;
1074 tmp_cap.aead.aad_sizes = 0;
1075 tmp_cap.aead.digest_sizes = 0;
1076 tmp_cap.aead.algo = cap->sym.aead.algo;
1077 for (param = cap->sym.aead.key_size.min;
1078 param <= cap->sym.aead.key_size.max;
1079 param += cap->sym.aead.key_size.increment)
1081 vec_add1 (tmp_cap.aead.key_sizes, param);
1082 if (cap->sym.aead.key_size.increment == 0)
1085 for (param = cap->sym.aead.aad_size.min;
1086 param <= cap->sym.aead.aad_size.max;
1087 param += cap->sym.aead.aad_size.increment)
1089 vec_add1 (tmp_cap.aead.aad_sizes, param);
1090 if (cap->sym.aead.aad_size.increment == 0)
1093 for (param = cap->sym.aead.digest_size.min;
1094 param <= cap->sym.aead.digest_size.max;
1095 param += cap->sym.aead.digest_size.increment)
1097 vec_add1 (tmp_cap.aead.digest_sizes, param);
1098 if (cap->sym.aead.digest_size.increment == 0)
1106 vec_add1 (cmt->supported_caps, tmp_cap);
1110 while (cap_id < vec_len (cmt->supported_caps))
1112 u32 cap_is_supported = 1;
1113 previous_dev_id = cmt->cryptodev_inst->dev_id;
1115 vec_foreach (dev_inst, cmt->cryptodev_inst)
1117 dev_id = dev_inst->dev_id;
1118 if (previous_dev_id != dev_id)
1120 previous_dev_id = dev_id;
1121 rte_cryptodev_info_get (dev_id, &dev_info);
1122 dev_caps = &dev_info.capabilities[0];
1123 cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1124 &cmt->supported_caps[cap_id], dev_caps);
1125 if (!cap_is_supported)
1127 cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1128 /*no need to check other devices as this one doesn't support
1134 if (cap_is_supported)
1140 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1142 cryptodev_main_t *cmt = &cryptodev_main;
1143 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1146 if (n_queues < n_workers)
1149 for (i = 0; i < rte_cryptodev_count (); i++)
1150 cryptodev_configure (vm, i);
1152 if (vec_len (cmt->cryptodev_inst) == 0)
1154 cryptodev_get_common_capabilities ();
1155 vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1157 /* if there is not enough device stop cryptodev */
1158 if (vec_len (cmt->cryptodev_inst) < n_workers)
1164 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1166 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1171 vec_foreach (unique_elt, *unique_drivers)
1173 if (*unique_elt == driver_id)
1181 vec_add1 (*unique_drivers, driver_id);
1186 dpdk_cryptodev_init (vlib_main_t * vm)
1188 cryptodev_main_t *cmt = &cryptodev_main;
1189 vlib_thread_main_t *tm = vlib_get_thread_main ();
1190 cryptodev_engine_thread_t *cet;
1191 cryptodev_numa_data_t *numa_data;
1194 u32 skip_master = vlib_num_workers () > 0;
1195 u32 n_workers = tm->n_vlib_mains - skip_master;
1198 clib_error_t *error;
1200 cmt->iova_mode = rte_eal_iova_mode ();
1202 clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1208 vec_validate (cmt->per_numa_data, nodes);
1209 vec_foreach (numa_data, cmt->per_numa_data)
1211 vec_validate (numa_data->sess_pools, 0);
1214 /* probe all cryptodev devices and get queue info */
1215 if (cryptodev_probe (vm, n_workers) < 0)
1218 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1219 struct rte_cryptodev_info dev_info;
1220 cryptodev_inst_t *dev_inst;
1221 u32 *unique_drivers = 0;
1222 vec_foreach (dev_inst, cmt->cryptodev_inst)
1224 u32 dev_id = dev_inst->dev_id;
1225 rte_cryptodev_info_get (dev_id, &dev_info);
1226 u32 driver_id = dev_info.driver_id;
1227 is_drv_unique (driver_id, &unique_drivers);
1230 rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1231 cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1234 cmt->drivers_cnt = vec_len (unique_drivers);
1235 vec_free (unique_drivers);
1238 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1239 clib_spinlock_init (&cmt->tlock);
1241 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1242 CLIB_CACHE_LINE_BYTES);
1243 for (i = skip_master; i < tm->n_vlib_mains; i++)
1245 cet = cmt->per_thread_data + i;
1247 if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1250 error = clib_error_return (0, "Failed to configure cryptodev");
1255 /* register handler */
1256 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1257 "DPDK Cryptodev Engine");
1259 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1261 if (cryptodev_register_raw_hdl)
1262 error = cryptodev_register_raw_hdl (vm, eidx);
1264 error = cryptodev_register_cop_hdl (vm, eidx);
1269 /* this engine is only enabled when cryptodev device(s) are presented in
1270 * startup.conf. Assume it is wanted to be used, turn on async mode here.
1272 ipsec_set_async_mode (1);