2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
34 #include "cryptodev.h"
37 #define always_inline static inline
39 #define always_inline static inline __attribute__ ((__always_inline__))
42 cryptodev_main_t cryptodev_main;
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46 cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
49 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50 memset (xform, 0, sizeof (*xform));
51 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
54 if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55 key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56 key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
58 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
60 else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
62 aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
67 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69 aead_xform->aad_length = aad_len;
70 aead_xform->digest_length = 16;
71 aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72 aead_xform->iv.length = 12;
73 aead_xform->key.data = key->data;
74 aead_xform->key.length = vec_len (key->data);
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81 cryptodev_op_type_t op_type,
82 const vnet_crypto_key_t *key)
84 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85 vnet_crypto_key_t *key_cipher, *key_auth;
86 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87 enum rte_crypto_auth_algorithm auth_algo = ~0;
90 key_cipher = vnet_crypto_get_key (key->index_crypto);
91 key_auth = vnet_crypto_get_key (key->index_integ);
92 if (!key_cipher || !key_auth)
95 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
97 xform_cipher = xforms;
98 xform_auth = xforms + 1;
99 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
104 xform_cipher = xforms + 1;
106 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
110 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112 xforms->next = xforms + 1;
114 switch (key->async_alg)
116 #define _(a, b, c, d, e) \
117 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
118 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
119 auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC; \
123 foreach_cryptodev_link_async_alg
129 xform_cipher->cipher.algo = cipher_algo;
130 xform_cipher->cipher.key.data = key_cipher->data;
131 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132 xform_cipher->cipher.iv.length = 16;
133 xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
135 xform_auth->auth.algo = auth_algo;
136 xform_auth->auth.digest_length = digest_len;
137 xform_auth->auth.key.data = key_auth->data;
138 xform_auth->auth.key.length = vec_len (key_auth->data);
143 static_always_inline void
144 cryptodev_session_del (cryptodev_session_t *sess)
151 n_devs = rte_cryptodev_count ();
153 for (i = 0; i < n_devs; i++)
154 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
155 if (rte_cryptodev_sym_session_free (i, sess) == 0)
158 rte_cryptodev_sym_session_clear (i, sess);
160 rte_cryptodev_sym_session_free (sess);
165 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
167 cryptodev_main_t *cmt = &cryptodev_main;
168 cryptodev_capability_t *vcap;
171 vec_foreach (vcap, cmt->supported_caps)
173 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
175 if (vcap->cipher.algo != algo)
177 vec_foreach (s, vcap->cipher.key_sizes)
186 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
188 cryptodev_main_t *cmt = &cryptodev_main;
189 cryptodev_capability_t *vcap;
192 vec_foreach (vcap, cmt->supported_caps)
194 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
196 if (vcap->auth.algo != algo)
198 vec_foreach (s, vcap->auth.digest_sizes)
199 if (*s == digest_size)
206 static_always_inline int
207 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
208 u32 digest_size, u32 aad_size)
210 cryptodev_main_t *cmt = &cryptodev_main;
211 cryptodev_capability_t *vcap;
213 u32 key_match = 0, digest_match = 0, aad_match = 0;
215 vec_foreach (vcap, cmt->supported_caps)
217 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
219 if (vcap->aead.algo != algo)
221 vec_foreach (s, vcap->aead.digest_sizes)
222 if (*s == digest_size)
227 vec_foreach (s, vcap->aead.key_sizes)
233 vec_foreach (s, vcap->aead.aad_sizes)
241 if (key_match == 1 && digest_match == 1 && aad_match == 1)
247 static_always_inline int
248 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
252 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
254 switch (key->async_alg)
256 #define _(a, b, c, d, e) \
257 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
258 if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) && \
259 check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e)) \
261 foreach_cryptodev_link_async_alg
268 #define _(a, b, c, d, e, f, g) \
269 if (key->alg == VNET_CRYPTO_ALG_##a) \
271 if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f)) \
274 foreach_vnet_aead_crypto_conversion
277 if (matched < 2) return 0;
283 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
284 vnet_crypto_key_index_t idx, u32 aad_len)
286 cryptodev_main_t *cmt = &cryptodev_main;
287 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
288 cryptodev_key_t *ckey = 0;
291 vec_validate (cmt->keys, idx);
292 ckey = vec_elt_at_index (cmt->keys, idx);
294 if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
296 if (idx >= vec_len (cmt->keys))
299 vec_foreach_index (i, cmt->per_numa_data)
305 if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
307 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
308 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
310 CLIB_MEMORY_STORE_BARRIER ();
311 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
312 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
320 /* do not create session for unsupported alg */
321 if (cryptodev_check_supported_vnet_alg (key) == 0)
324 vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
325 vec_foreach_index (i, ckey->keys)
326 vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
330 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
331 vnet_crypto_key_index_t idx)
333 cryptodev_sess_handler (vm, kop, idx, 8);
337 allocate_session_pools (u32 numa_node,
338 cryptodev_session_pool_t *sess_pools_elt, u32 len)
340 cryptodev_main_t *cmt = &cryptodev_main;
342 clib_error_t *error = NULL;
344 name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
345 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
346 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
347 (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
349 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
350 (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
353 if (!sess_pools_elt->sess_pool)
355 error = clib_error_return (0, "Not enough memory for mp %s", name);
360 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
361 name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
362 sess_pools_elt->sess_priv_pool = rte_mempool_create (
363 (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
364 0, NULL, NULL, NULL, NULL, numa_node, 0);
366 if (!sess_pools_elt->sess_priv_pool)
368 error = clib_error_return (0, "Not enough memory for mp %s", name);
378 if (sess_pools_elt->sess_pool)
379 rte_mempool_free (sess_pools_elt->sess_pool);
380 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
381 if (sess_pools_elt->sess_priv_pool)
382 rte_mempool_free (sess_pools_elt->sess_priv_pool);
390 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
393 cryptodev_main_t *cmt = &cryptodev_main;
394 cryptodev_numa_data_t *numa_data;
395 cryptodev_inst_t *dev_inst;
396 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
397 struct rte_mempool *sess_pool;
398 cryptodev_session_pool_t *sess_pools_elt;
399 cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
400 struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
401 struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
402 cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
403 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
404 struct rte_mempool *sess_priv_pool;
405 struct rte_cryptodev_info dev_info;
407 u32 numa_node = vm->numa_node;
412 numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
414 clib_spinlock_lock (&cmt->tlock);
415 vec_foreach (sess_pools_elt, numa_data->sess_pools)
417 if (sess_pools_elt->sess_pool == NULL)
419 error = allocate_session_pools (numa_node, sess_pools_elt,
420 vec_len (numa_data->sess_pools) - 1);
427 if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
436 vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
437 error = allocate_session_pools (numa_node, sess_pools_elt,
438 vec_len (numa_data->sess_pools) - 1);
446 sess_pool = sess_pools_elt->sess_pool;
447 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
448 sess_priv_pool = sess_pools_elt->sess_priv_pool;
450 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
451 rte_cryptodev_sym_session_create (sess_pool);
453 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
454 rte_cryptodev_sym_session_create (sess_pool);
457 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
458 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
461 prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
468 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
469 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
471 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
473 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
474 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
475 u32 dev_id = dev_inst->dev_id;
476 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
477 rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
478 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
479 rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
480 if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
481 !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
487 rte_cryptodev_sym_session_opaque_data_set (
488 sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
489 rte_cryptodev_sym_session_opaque_data_set (
490 sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
492 vec_foreach (dev_inst, cmt->cryptodev_inst)
494 u32 dev_id = dev_inst->dev_id;
495 rte_cryptodev_info_get (dev_id, &dev_info);
496 u32 driver_id = dev_info.driver_id;
498 /* if the session is already configured for the driver type, avoid
499 configuring it again to increase the session data's refcnt */
500 if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
501 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
504 ret = rte_cryptodev_sym_session_init (
505 dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
507 ret = rte_cryptodev_sym_session_init (
508 dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
514 sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
515 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
518 CLIB_MEMORY_STORE_BARRIER ();
519 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
520 sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
521 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
522 sessions[CRYPTODEV_OP_TYPE_DECRYPT];
527 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
528 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
530 clib_spinlock_unlock (&cmt->tlock);
536 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
537 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
538 } cryptodev_resource_assign_op_t;
541 * assign a cryptodev resource to a worker.
542 * @param cet: the worker thread data
543 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
544 * @param op: the assignment method.
545 * @return: 0 if successfully, negative number otherwise.
547 static_always_inline int
548 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
549 u32 cryptodev_inst_index,
550 cryptodev_resource_assign_op_t op)
552 cryptodev_main_t *cmt = &cryptodev_main;
553 cryptodev_inst_t *cinst = 0;
556 /* assign resource is only allowed when no inflight op is in the queue */
562 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
563 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
564 vec_len (cmt->cryptodev_inst))
567 clib_spinlock_lock (&cmt->tlock);
568 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
569 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
570 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
571 cet->cryptodev_id = cinst->dev_id;
572 cet->cryptodev_q = cinst->q_id;
573 clib_spinlock_unlock (&cmt->tlock);
575 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
576 /* assigning a used cryptodev resource is not allowed */
577 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
581 vec_foreach_index (idx, cmt->cryptodev_inst)
583 cinst = cmt->cryptodev_inst + idx;
584 if (cinst->dev_id == cet->cryptodev_id &&
585 cinst->q_id == cet->cryptodev_q)
588 /* invalid existing worker resource assignment */
589 if (idx >= vec_len (cmt->cryptodev_inst))
591 clib_spinlock_lock (&cmt->tlock);
592 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
593 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
594 cryptodev_inst_index, 1);
595 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
596 cet->cryptodev_id = cinst->dev_id;
597 cet->cryptodev_q = cinst->q_id;
598 clib_spinlock_unlock (&cmt->tlock);
607 format_cryptodev_inst (u8 * s, va_list * args)
609 cryptodev_main_t *cmt = &cryptodev_main;
610 u32 inst = va_arg (*args, u32);
611 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
612 u32 thread_index = 0;
613 struct rte_cryptodev_info info;
615 rte_cryptodev_info_get (cit->dev_id, &info);
616 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
618 vec_foreach_index (thread_index, cmt->per_thread_data)
620 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
621 if (vlib_num_workers () > 0 && thread_index == 0)
624 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
626 s = format (s, "%u (%v)\n", thread_index,
627 vlib_worker_threads[thread_index].name);
632 if (thread_index == vec_len (cmt->per_thread_data))
633 s = format (s, "%s\n", "free");
638 static clib_error_t *
639 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
640 vlib_cli_command_t * cmd)
642 cryptodev_main_t *cmt = &cryptodev_main;
645 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
647 if (vec_len (cmt->cryptodev_inst) == 0)
649 vlib_cli_output (vm, "(nil)\n");
653 vec_foreach_index (inst, cmt->cryptodev_inst)
654 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
657 vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
659 vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
663 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
664 .path = "show cryptodev assignment",
665 .short_help = "show cryptodev assignment",
666 .function = cryptodev_show_assignment_fn,
669 static clib_error_t *
670 cryptodev_show_sw_rings_fn (vlib_main_t *vm, unformat_input_t *input,
671 vlib_cli_command_t *cmd)
673 cryptodev_main_t *cmt = &cryptodev_main;
674 u32 thread_index = 0;
675 vec_foreach_index (thread_index, cmt->per_thread_data)
677 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
678 if (vlib_num_workers () > 0 && thread_index == 0)
680 vlib_cli_output (vm, "\n\n");
681 vlib_cli_output (vm, "Frames total: %d", cet->frames_on_ring);
682 vlib_cli_output (vm, "Frames pending in a ring: %d",
683 cet->frames_on_ring - cet->enqueued_not_dequeueq -
684 cet->deqeued_not_returned);
685 vlib_cli_output (vm, "Frames enqueued but not dequeued: %d",
686 cet->enqueued_not_dequeueq);
687 vlib_cli_output (vm, "Frames dequed but not returned: %d",
688 cet->deqeued_not_returned);
689 vlib_cli_output (vm, "inflight: %d", cet->inflight);
690 vlib_cli_output (vm, "Head: %d", cet->frame_ring.head);
691 vlib_cli_output (vm, "Tail: %d", cet->frame_ring.tail);
692 vlib_cli_output (vm, "\n\n");
697 VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
698 .path = "show cryptodev sw-ring status",
699 .short_help = "show status of all cryptodev software rings",
700 .function = cryptodev_show_sw_rings_fn,
703 static clib_error_t *
704 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
705 vlib_cli_command_t * cmd)
707 cryptodev_main_t *cmt = &cryptodev_main;
708 cryptodev_engine_thread_t *cet;
709 unformat_input_t _line_input, *line_input = &_line_input;
710 u32 thread_index, inst_index;
711 u32 thread_present = 0, inst_present = 0;
712 clib_error_t *error = 0;
715 /* Get a line of input. */
716 if (!unformat_user (input, unformat_line_input, line_input))
719 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
721 if (unformat (line_input, "thread %u", &thread_index))
723 else if (unformat (line_input, "resource %u", &inst_index))
727 error = clib_error_return (0, "unknown input `%U'",
728 format_unformat_error, line_input);
733 if (!thread_present || !inst_present)
735 error = clib_error_return (0, "mandatory argument(s) missing");
739 if (thread_index == 0 && vlib_num_workers () > 0)
742 clib_error_return (0, "assign crypto resource for master thread");
746 if (thread_index > vec_len (cmt->per_thread_data) ||
747 inst_index > vec_len (cmt->cryptodev_inst))
749 error = clib_error_return (0, "wrong thread id or resource id");
753 cet = cmt->per_thread_data + thread_index;
754 ret = cryptodev_assign_resource (cet, inst_index,
755 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
759 clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
766 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
767 .path = "set cryptodev assignment",
768 .short_help = "set cryptodev assignment thread <thread_index> "
769 "resource <inst_index>",
770 .function = cryptodev_set_assignment_fn,
774 cryptodev_count_queue (u32 numa)
776 struct rte_cryptodev_info info;
777 u32 n_cryptodev = rte_cryptodev_count ();
780 for (i = 0; i < n_cryptodev; i++)
782 rte_cryptodev_info_get (i, &info);
783 q_count += info.max_nb_queue_pairs;
790 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
792 struct rte_cryptodev_config cfg;
793 struct rte_cryptodev_info info;
794 cryptodev_main_t *cmt = &cryptodev_main;
798 rte_cryptodev_info_get (cryptodev_id, &info);
800 /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
801 anymore. Only devices that have the same driver type as the first
802 initialized device can be initialized.
804 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
805 if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
809 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
812 cfg.socket_id = info.device->numa_node;
813 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
815 rte_cryptodev_configure (cryptodev_id, &cfg);
817 for (i = 0; i < info.max_nb_queue_pairs; i++)
819 struct rte_cryptodev_qp_conf qp_cfg;
821 qp_cfg.mp_session = 0;
822 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
823 qp_cfg.mp_session_private = 0;
825 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
827 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
828 info.device->numa_node);
831 clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
832 cryptodev_id, i, ret);
837 if (i != info.max_nb_queue_pairs)
840 /* start the device */
841 rte_cryptodev_start (cryptodev_id);
843 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
844 if (cmt->drivers_cnt == 0)
846 cmt->drivers_cnt = 1;
847 cmt->driver_id = info.driver_id;
848 cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
852 for (i = 0; i < info.max_nb_queue_pairs; i++)
854 cryptodev_inst_t *cdev_inst;
855 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
856 const char *dev_name = rte_dev_name (info.device);
858 const char *dev_name = info.device->name;
860 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
861 cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
862 cdev_inst->dev_id = cryptodev_id;
865 snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
866 info.device->name, i);
873 cryptodev_cmp (void *v1, void *v2)
875 cryptodev_inst_t *a1 = v1;
876 cryptodev_inst_t *a2 = v2;
878 if (a1->q_id > a2->q_id)
880 if (a1->q_id < a2->q_id)
886 cryptodev_supports_param_value (u32 *params, u32 param_value)
889 vec_foreach (value, params)
891 if (*value == param_value)
898 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
899 u32 key_size, u32 digest_size, u32 aad_size)
901 cryptodev_main_t *cmt = &cryptodev_main;
902 cryptodev_capability_t *cap;
903 vec_foreach (cap, cmt->supported_caps)
906 if (cap->xform_type != idx->type)
909 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
910 cap->auth.algo == idx->algo.auth &&
911 cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
914 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
915 cap->cipher.algo == idx->algo.cipher &&
916 cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
919 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
920 cap->aead.algo == idx->algo.aead &&
921 cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
922 cryptodev_supports_param_value (cap->aead.digest_sizes,
924 cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
931 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
932 u32 param_size_max, u32 increment)
937 while (i < vec_len (*param_sizes))
940 for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
941 cap_param_size += increment)
943 if ((*param_sizes)[i] == cap_param_size)
952 /* no such param_size in cap so delete this size in temp_cap params */
953 vec_delete (*param_sizes, 1, i);
960 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
962 cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
964 switch (temp_cap.xform_type)
966 case RTE_CRYPTO_SYM_XFORM_AUTH:
967 vec_free (temp_cap.auth.digest_sizes);
969 case RTE_CRYPTO_SYM_XFORM_CIPHER:
970 vec_free (temp_cap.cipher.key_sizes);
972 case RTE_CRYPTO_SYM_XFORM_AEAD:
973 vec_free (temp_cap.aead.key_sizes);
974 vec_free (temp_cap.aead.aad_sizes);
975 vec_free (temp_cap.aead.digest_sizes);
980 vec_delete (*temp_caps, 1, temp_cap_id);
984 cryptodev_remove_unsupported_param_sizes (
985 cryptodev_capability_t *temp_cap,
986 const struct rte_cryptodev_capabilities *dev_caps)
989 const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
991 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
993 if (cap->sym.xform_type == temp_cap->xform_type)
994 switch (cap->sym.xform_type)
996 case RTE_CRYPTO_SYM_XFORM_CIPHER:
997 if (cap->sym.cipher.algo == temp_cap->cipher.algo)
999 remove_unsupported_param_size (
1000 &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
1001 cap->sym.cipher.key_size.max,
1002 cap->sym.cipher.key_size.increment);
1003 if (vec_len (temp_cap->cipher.key_sizes) > 0)
1007 case RTE_CRYPTO_SYM_XFORM_AUTH:
1008 if (cap->sym.auth.algo == temp_cap->auth.algo)
1010 remove_unsupported_param_size (
1011 &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
1012 cap->sym.auth.digest_size.max,
1013 cap->sym.auth.digest_size.increment);
1014 if (vec_len (temp_cap->auth.digest_sizes) > 0)
1018 case RTE_CRYPTO_SYM_XFORM_AEAD:
1019 if (cap->sym.aead.algo == temp_cap->aead.algo)
1021 remove_unsupported_param_size (
1022 &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
1023 cap->sym.aead.key_size.max,
1024 cap->sym.aead.key_size.increment);
1025 remove_unsupported_param_size (
1026 &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
1027 cap->sym.aead.aad_size.max,
1028 cap->sym.aead.aad_size.increment);
1029 remove_unsupported_param_size (
1030 &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
1031 cap->sym.aead.digest_size.max,
1032 cap->sym.aead.digest_size.increment);
1033 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1034 vec_len (temp_cap->aead.aad_sizes) > 0 &&
1035 vec_len (temp_cap->aead.digest_sizes) > 0)
1051 cryptodev_get_common_capabilities ()
1053 cryptodev_main_t *cmt = &cryptodev_main;
1054 cryptodev_inst_t *dev_inst;
1055 struct rte_cryptodev_info dev_info;
1056 u32 previous_dev_id, dev_id;
1059 cryptodev_capability_t tmp_cap;
1060 const struct rte_cryptodev_capabilities *cap;
1061 const struct rte_cryptodev_capabilities *dev_caps;
1063 clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
1064 if (vec_len (cmt->cryptodev_inst) == 0)
1066 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1067 rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1068 cap = &dev_info.capabilities[0];
1070 /*init capabilities vector*/
1071 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1073 if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1079 tmp_cap.xform_type = cap->sym.xform_type;
1080 switch (cap->sym.xform_type)
1082 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1083 tmp_cap.cipher.key_sizes = 0;
1084 tmp_cap.cipher.algo = cap->sym.cipher.algo;
1085 for (param = cap->sym.cipher.key_size.min;
1086 param <= cap->sym.cipher.key_size.max;
1087 param += cap->sym.cipher.key_size.increment)
1089 vec_add1 (tmp_cap.cipher.key_sizes, param);
1090 if (cap->sym.cipher.key_size.increment == 0)
1094 case RTE_CRYPTO_SYM_XFORM_AUTH:
1095 tmp_cap.auth.algo = cap->sym.auth.algo;
1096 tmp_cap.auth.digest_sizes = 0;
1097 for (param = cap->sym.auth.digest_size.min;
1098 param <= cap->sym.auth.digest_size.max;
1099 param += cap->sym.auth.digest_size.increment)
1101 vec_add1 (tmp_cap.auth.digest_sizes, param);
1102 if (cap->sym.auth.digest_size.increment == 0)
1106 case RTE_CRYPTO_SYM_XFORM_AEAD:
1107 tmp_cap.aead.key_sizes = 0;
1108 tmp_cap.aead.aad_sizes = 0;
1109 tmp_cap.aead.digest_sizes = 0;
1110 tmp_cap.aead.algo = cap->sym.aead.algo;
1111 for (param = cap->sym.aead.key_size.min;
1112 param <= cap->sym.aead.key_size.max;
1113 param += cap->sym.aead.key_size.increment)
1115 vec_add1 (tmp_cap.aead.key_sizes, param);
1116 if (cap->sym.aead.key_size.increment == 0)
1119 for (param = cap->sym.aead.aad_size.min;
1120 param <= cap->sym.aead.aad_size.max;
1121 param += cap->sym.aead.aad_size.increment)
1123 vec_add1 (tmp_cap.aead.aad_sizes, param);
1124 if (cap->sym.aead.aad_size.increment == 0)
1127 for (param = cap->sym.aead.digest_size.min;
1128 param <= cap->sym.aead.digest_size.max;
1129 param += cap->sym.aead.digest_size.increment)
1131 vec_add1 (tmp_cap.aead.digest_sizes, param);
1132 if (cap->sym.aead.digest_size.increment == 0)
1140 vec_add1 (cmt->supported_caps, tmp_cap);
1144 while (cap_id < vec_len (cmt->supported_caps))
1146 u32 cap_is_supported = 1;
1147 previous_dev_id = cmt->cryptodev_inst->dev_id;
1149 vec_foreach (dev_inst, cmt->cryptodev_inst)
1151 dev_id = dev_inst->dev_id;
1152 if (previous_dev_id != dev_id)
1154 previous_dev_id = dev_id;
1155 rte_cryptodev_info_get (dev_id, &dev_info);
1156 dev_caps = &dev_info.capabilities[0];
1157 cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1158 &cmt->supported_caps[cap_id], dev_caps);
1159 if (!cap_is_supported)
1161 cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1162 /*no need to check other devices as this one doesn't support
1168 if (cap_is_supported)
1174 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1176 cryptodev_main_t *cmt = &cryptodev_main;
1177 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1180 if (n_queues < n_workers)
1183 for (i = 0; i < rte_cryptodev_count (); i++)
1184 cryptodev_configure (vm, i);
1186 if (vec_len (cmt->cryptodev_inst) == 0)
1188 cryptodev_get_common_capabilities ();
1189 vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1191 /* if there is not enough device stop cryptodev */
1192 if (vec_len (cmt->cryptodev_inst) < n_workers)
1198 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1200 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1205 vec_foreach (unique_elt, *unique_drivers)
1207 if (*unique_elt == driver_id)
1215 vec_add1 (*unique_drivers, driver_id);
1220 dpdk_cryptodev_init (vlib_main_t * vm)
1222 cryptodev_main_t *cmt = &cryptodev_main;
1223 vlib_thread_main_t *tm = vlib_get_thread_main ();
1224 cryptodev_engine_thread_t *cet;
1225 cryptodev_numa_data_t *numa_data;
1228 u32 skip_master = vlib_num_workers () > 0;
1229 u32 n_workers = tm->n_vlib_mains - skip_master;
1232 clib_error_t *error;
1234 cmt->iova_mode = rte_eal_iova_mode ();
1236 clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1242 vec_validate (cmt->per_numa_data, nodes);
1243 vec_foreach (numa_data, cmt->per_numa_data)
1245 vec_validate (numa_data->sess_pools, 0);
1248 /* probe all cryptodev devices and get queue info */
1249 if (cryptodev_probe (vm, n_workers) < 0)
1252 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1253 struct rte_cryptodev_info dev_info;
1254 cryptodev_inst_t *dev_inst;
1255 u32 *unique_drivers = 0;
1256 vec_foreach (dev_inst, cmt->cryptodev_inst)
1258 u32 dev_id = dev_inst->dev_id;
1259 rte_cryptodev_info_get (dev_id, &dev_info);
1260 u32 driver_id = dev_info.driver_id;
1261 is_drv_unique (driver_id, &unique_drivers);
1264 rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1265 cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1268 cmt->drivers_cnt = vec_len (unique_drivers);
1269 vec_free (unique_drivers);
1272 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers);
1273 clib_spinlock_init (&cmt->tlock);
1275 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1276 CLIB_CACHE_LINE_BYTES);
1277 for (i = skip_master; i < tm->n_vlib_mains; i++)
1279 cet = cmt->per_thread_data + i;
1281 if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1284 error = clib_error_return (0, "Failed to configure cryptodev");
1289 /* register handler */
1290 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1291 "DPDK Cryptodev Engine");
1293 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1295 if (cryptodev_register_raw_hdl)
1296 error = cryptodev_register_raw_hdl (vm, eidx);
1298 error = cryptodev_register_cop_hdl (vm, eidx);
1303 /* this engine is only enabled when cryptodev device(s) are presented in
1304 * startup.conf. Assume it is wanted to be used, turn on async mode here.
1306 ipsec_set_async_mode (1);