2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
35 #include "cryptodev.h"
38 #define always_inline static inline
40 #define always_inline static inline __attribute__ ((__always_inline__))
43 cryptodev_main_t cryptodev_main;
45 static_always_inline int
46 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
47 cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
50 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
51 memset (xform, 0, sizeof (*xform));
52 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
55 if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
56 key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
57 key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
60 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
61 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
62 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
63 aead_xform->aad_length = aad_len;
64 aead_xform->digest_length = 16;
65 aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
66 aead_xform->iv.length = 12;
67 aead_xform->key.data = key->data;
68 aead_xform->key.length = vec_len (key->data);
73 static_always_inline int
74 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
75 cryptodev_op_type_t op_type,
76 const vnet_crypto_key_t *key)
78 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
79 vnet_crypto_key_t *key_cipher, *key_auth;
80 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
81 enum rte_crypto_auth_algorithm auth_algo = ~0;
84 key_cipher = vnet_crypto_get_key (key->index_crypto);
85 key_auth = vnet_crypto_get_key (key->index_integ);
86 if (!key_cipher || !key_auth)
89 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
91 xform_cipher = xforms;
92 xform_auth = xforms + 1;
93 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
94 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
98 xform_cipher = xforms + 1;
100 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
101 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
104 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
105 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
106 xforms->next = xforms + 1;
108 switch (key->async_alg)
110 #define _(a, b, c, d, e) \
111 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
112 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
113 auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC; \
117 foreach_cryptodev_link_async_alg
123 xform_cipher->cipher.algo = cipher_algo;
124 xform_cipher->cipher.key.data = key_cipher->data;
125 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
126 xform_cipher->cipher.iv.length = 16;
127 xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
129 xform_auth->auth.algo = auth_algo;
130 xform_auth->auth.digest_length = digest_len;
131 xform_auth->auth.key.data = key_auth->data;
132 xform_auth->auth.key.length = vec_len (key_auth->data);
137 static_always_inline void
138 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
145 n_devs = rte_cryptodev_count ();
147 for (i = 0; i < n_devs; i++)
148 rte_cryptodev_sym_session_clear (i, sess);
150 rte_cryptodev_sym_session_free (sess);
154 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
156 cryptodev_main_t *cmt = &cryptodev_main;
157 cryptodev_capability_t *vcap;
160 vec_foreach (vcap, cmt->supported_caps)
162 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
164 if (vcap->cipher.algo != algo)
166 vec_foreach (s, vcap->cipher.key_sizes)
175 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
177 cryptodev_main_t *cmt = &cryptodev_main;
178 cryptodev_capability_t *vcap;
181 vec_foreach (vcap, cmt->supported_caps)
183 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
185 if (vcap->auth.algo != algo)
187 vec_foreach (s, vcap->auth.digest_sizes)
188 if (*s == digest_size)
195 static_always_inline int
196 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
197 u32 digest_size, u32 aad_size)
199 cryptodev_main_t *cmt = &cryptodev_main;
200 cryptodev_capability_t *vcap;
202 u32 key_match = 0, digest_match = 0, aad_match = 0;
204 vec_foreach (vcap, cmt->supported_caps)
206 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
208 if (vcap->aead.algo != algo)
210 vec_foreach (s, vcap->aead.digest_sizes)
211 if (*s == digest_size)
216 vec_foreach (s, vcap->aead.key_sizes)
222 vec_foreach (s, vcap->aead.aad_sizes)
230 if (key_match == 1 && digest_match == 1 && aad_match == 1)
236 static_always_inline int
237 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
241 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
243 switch (key->async_alg)
245 #define _(a, b, c, d, e) \
246 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
247 if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) && \
248 check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e)) \
250 foreach_cryptodev_link_async_alg
257 #define _(a, b, c, d, e, f, g) \
258 if (key->alg == VNET_CRYPTO_ALG_##a) \
260 if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f)) \
263 foreach_vnet_aead_crypto_conversion
266 if (matched < 2) return 0;
272 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
273 vnet_crypto_key_index_t idx, u32 aad_len)
275 cryptodev_main_t *cmt = &cryptodev_main;
276 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
277 cryptodev_key_t *ckey = 0;
280 vec_validate (cmt->keys, idx);
281 ckey = vec_elt_at_index (cmt->keys, idx);
283 if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
285 if (idx >= vec_len (cmt->keys))
288 vec_foreach_index (i, cmt->per_numa_data)
294 if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
296 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
297 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
299 CLIB_MEMORY_STORE_BARRIER ();
300 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
301 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
309 /* do not create session for unsupported alg */
310 if (cryptodev_check_supported_vnet_alg (key) == 0)
313 vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
314 vec_foreach_index (i, ckey->keys)
315 vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
319 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
320 vnet_crypto_key_index_t idx)
322 cryptodev_sess_handler (vm, kop, idx, 8);
326 allocate_session_pools (u32 numa_node,
327 cryptodev_session_pool_t *sess_pools_elt, u32 len)
329 cryptodev_main_t *cmt = &cryptodev_main;
331 clib_error_t *error = NULL;
333 name = format (0, "vcryptodev_sess_pool_%u_%c", numa_node, len);
334 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
335 (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
337 if (!sess_pools_elt->sess_pool)
339 error = clib_error_return (0, "Not enough memory for mp %s", name);
344 name = format (0, "cryptodev_sess_pool_%u_%c", numa_node, len);
345 sess_pools_elt->sess_priv_pool = rte_mempool_create (
346 (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
347 0, NULL, NULL, NULL, NULL, numa_node, 0);
349 if (!sess_pools_elt->sess_priv_pool)
351 error = clib_error_return (0, "Not enough memory for mp %s", name);
360 if (sess_pools_elt->sess_pool)
361 rte_mempool_free (sess_pools_elt->sess_pool);
362 if (sess_pools_elt->sess_priv_pool)
363 rte_mempool_free (sess_pools_elt->sess_priv_pool);
370 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
373 cryptodev_main_t *cmt = &cryptodev_main;
374 cryptodev_numa_data_t *numa_data;
375 cryptodev_inst_t *dev_inst;
376 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
377 struct rte_mempool *sess_pool, *sess_priv_pool;
378 cryptodev_session_pool_t *sess_pools_elt;
379 cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
380 struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
381 struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
382 struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
383 u32 numa_node = vm->numa_node;
388 numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
390 clib_spinlock_lock (&cmt->tlock);
391 vec_foreach (sess_pools_elt, numa_data->sess_pools)
393 if (sess_pools_elt->sess_pool == NULL)
395 error = allocate_session_pools (numa_node, sess_pools_elt,
396 vec_len (numa_data->sess_pools) - 1);
403 if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
412 vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
413 error = allocate_session_pools (numa_node, sess_pools_elt,
414 vec_len (numa_data->sess_pools) - 1);
422 sess_pool = sess_pools_elt->sess_pool;
423 sess_priv_pool = sess_pools_elt->sess_priv_pool;
425 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
426 rte_cryptodev_sym_session_create (sess_pool);
428 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
429 rte_cryptodev_sym_session_create (sess_pool);
431 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
432 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
435 prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
442 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
443 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
445 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
447 vec_foreach (dev_inst, cmt->cryptodev_inst)
449 u32 dev_id = dev_inst->dev_id;
450 struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
451 u32 driver_id = cdev->driver_id;
453 /* if the session is already configured for the driver type, avoid
454 configuring it again to increase the session data's refcnt */
455 if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
456 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
459 ret = rte_cryptodev_sym_session_init (
460 dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
462 ret = rte_cryptodev_sym_session_init (
463 dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
469 sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
470 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
472 CLIB_MEMORY_STORE_BARRIER ();
473 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
474 sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
475 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
476 sessions[CRYPTODEV_OP_TYPE_DECRYPT];
481 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
482 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
484 clib_spinlock_unlock (&cmt->tlock);
490 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
491 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
492 } cryptodev_resource_assign_op_t;
495 * assign a cryptodev resource to a worker.
496 * @param cet: the worker thread data
497 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
498 * @param op: the assignment method.
499 * @return: 0 if successfully, negative number otherwise.
501 static_always_inline int
502 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
503 u32 cryptodev_inst_index,
504 cryptodev_resource_assign_op_t op)
506 cryptodev_main_t *cmt = &cryptodev_main;
507 cryptodev_inst_t *cinst = 0;
510 /* assign resource is only allowed when no inflight op is in the queue */
516 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
517 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
518 vec_len (cmt->cryptodev_inst))
521 clib_spinlock_lock (&cmt->tlock);
522 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
523 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
524 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
525 cet->cryptodev_id = cinst->dev_id;
526 cet->cryptodev_q = cinst->q_id;
527 clib_spinlock_unlock (&cmt->tlock);
529 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
530 /* assigning a used cryptodev resource is not allowed */
531 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
535 vec_foreach_index (idx, cmt->cryptodev_inst)
537 cinst = cmt->cryptodev_inst + idx;
538 if (cinst->dev_id == cet->cryptodev_id &&
539 cinst->q_id == cet->cryptodev_q)
542 /* invalid existing worker resource assignment */
543 if (idx == vec_len (cmt->cryptodev_inst))
545 clib_spinlock_lock (&cmt->tlock);
546 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
547 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
548 cryptodev_inst_index, 1);
549 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
550 cet->cryptodev_id = cinst->dev_id;
551 cet->cryptodev_q = cinst->q_id;
552 clib_spinlock_unlock (&cmt->tlock);
561 format_cryptodev_inst (u8 * s, va_list * args)
563 cryptodev_main_t *cmt = &cryptodev_main;
564 u32 inst = va_arg (*args, u32);
565 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
566 u32 thread_index = 0;
567 struct rte_cryptodev_info info;
569 rte_cryptodev_info_get (cit->dev_id, &info);
570 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
572 vec_foreach_index (thread_index, cmt->per_thread_data)
574 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
575 if (vlib_num_workers () > 0 && thread_index == 0)
578 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
580 s = format (s, "%u (%v)\n", thread_index,
581 vlib_worker_threads[thread_index].name);
586 if (thread_index == vec_len (cmt->per_thread_data))
587 s = format (s, "%s\n", "free");
592 static clib_error_t *
593 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
594 vlib_cli_command_t * cmd)
596 cryptodev_main_t *cmt = &cryptodev_main;
599 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
601 if (vec_len (cmt->cryptodev_inst) == 0)
603 vlib_cli_output (vm, "(nil)\n");
607 vec_foreach_index (inst, cmt->cryptodev_inst)
608 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
611 vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
613 vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
617 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
618 .path = "show cryptodev assignment",
619 .short_help = "show cryptodev assignment",
620 .function = cryptodev_show_assignment_fn,
623 static clib_error_t *
624 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
625 vlib_cli_command_t * cmd)
627 cryptodev_main_t *cmt = &cryptodev_main;
628 cryptodev_engine_thread_t *cet;
629 unformat_input_t _line_input, *line_input = &_line_input;
630 u32 thread_index, inst_index;
631 u32 thread_present = 0, inst_present = 0;
632 clib_error_t *error = 0;
635 /* Get a line of input. */
636 if (!unformat_user (input, unformat_line_input, line_input))
639 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
641 if (unformat (line_input, "thread %u", &thread_index))
643 else if (unformat (line_input, "resource %u", &inst_index))
647 error = clib_error_return (0, "unknown input `%U'",
648 format_unformat_error, line_input);
653 if (!thread_present || !inst_present)
655 error = clib_error_return (0, "mandatory argument(s) missing");
659 if (thread_index == 0 && vlib_num_workers () > 0)
662 clib_error_return (0, "assign crypto resource for master thread");
666 if (thread_index > vec_len (cmt->per_thread_data) ||
667 inst_index > vec_len (cmt->cryptodev_inst))
669 error = clib_error_return (0, "wrong thread id or resource id");
673 cet = cmt->per_thread_data + thread_index;
674 ret = cryptodev_assign_resource (cet, inst_index,
675 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
679 clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
686 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
687 .path = "set cryptodev assignment",
688 .short_help = "set cryptodev assignment thread <thread_index> "
689 "resource <inst_index>",
690 .function = cryptodev_set_assignment_fn,
694 cryptodev_count_queue (u32 numa)
696 struct rte_cryptodev_info info;
697 u32 n_cryptodev = rte_cryptodev_count ();
700 for (i = 0; i < n_cryptodev; i++)
702 rte_cryptodev_info_get (i, &info);
703 q_count += info.max_nb_queue_pairs;
710 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
712 struct rte_cryptodev_config cfg;
713 struct rte_cryptodev_info info;
714 cryptodev_main_t *cmt = &cryptodev_main;
718 rte_cryptodev_info_get (cryptodev_id, &info);
720 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
723 cfg.socket_id = info.device->numa_node;
724 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
726 rte_cryptodev_configure (cryptodev_id, &cfg);
728 for (i = 0; i < info.max_nb_queue_pairs; i++)
730 struct rte_cryptodev_qp_conf qp_cfg;
732 qp_cfg.mp_session = 0;
733 qp_cfg.mp_session_private = 0;
734 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
736 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
737 info.device->numa_node);
740 clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
741 cryptodev_id, i, ret);
746 if (i != info.max_nb_queue_pairs)
749 /* start the device */
750 rte_cryptodev_start (cryptodev_id);
752 for (i = 0; i < info.max_nb_queue_pairs; i++)
754 cryptodev_inst_t *cdev_inst;
755 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
756 cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
757 cdev_inst->dev_id = cryptodev_id;
760 snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
761 "%s_q%u", info.device->name, i);
768 cryptodev_cmp (void *v1, void *v2)
770 cryptodev_inst_t *a1 = v1;
771 cryptodev_inst_t *a2 = v2;
773 if (a1->q_id > a2->q_id)
775 if (a1->q_id < a2->q_id)
781 cryptodev_supports_param_value (u32 *params, u32 param_value)
784 vec_foreach (value, params)
786 if (*value == param_value)
793 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
794 u32 key_size, u32 digest_size, u32 aad_size)
796 cryptodev_main_t *cmt = &cryptodev_main;
797 cryptodev_capability_t *cap;
798 vec_foreach (cap, cmt->supported_caps)
801 if (cap->xform_type != idx->type)
804 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
805 cap->auth.algo == idx->algo.auth &&
806 cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
809 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
810 cap->cipher.algo == idx->algo.cipher &&
811 cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
814 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
815 cap->aead.algo == idx->algo.aead &&
816 cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
817 cryptodev_supports_param_value (cap->aead.digest_sizes,
819 cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
826 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
827 u32 param_size_max, u32 increment)
832 while (i < vec_len (*param_sizes))
835 for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
836 cap_param_size += increment)
838 if ((*param_sizes)[i] == cap_param_size)
847 /* no such param_size in cap so delete this size in temp_cap params */
848 vec_delete (*param_sizes, 1, i);
855 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
857 cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
859 switch (temp_cap.xform_type)
861 case RTE_CRYPTO_SYM_XFORM_AUTH:
862 vec_free (temp_cap.auth.digest_sizes);
864 case RTE_CRYPTO_SYM_XFORM_CIPHER:
865 vec_free (temp_cap.cipher.key_sizes);
867 case RTE_CRYPTO_SYM_XFORM_AEAD:
868 vec_free (temp_cap.aead.key_sizes);
869 vec_free (temp_cap.aead.aad_sizes);
870 vec_free (temp_cap.aead.digest_sizes);
875 vec_delete (*temp_caps, 1, temp_cap_id);
879 cryptodev_remove_unsupported_param_sizes (
880 cryptodev_capability_t *temp_cap,
881 const struct rte_cryptodev_capabilities *dev_caps)
884 const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
886 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
888 if (cap->sym.xform_type == temp_cap->xform_type)
889 switch (cap->sym.xform_type)
891 case RTE_CRYPTO_SYM_XFORM_CIPHER:
892 if (cap->sym.cipher.algo == temp_cap->cipher.algo)
894 remove_unsupported_param_size (
895 &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
896 cap->sym.cipher.key_size.max,
897 cap->sym.cipher.key_size.increment);
898 if (vec_len (temp_cap->cipher.key_sizes) > 0)
902 case RTE_CRYPTO_SYM_XFORM_AUTH:
903 if (cap->sym.auth.algo == temp_cap->auth.algo)
905 remove_unsupported_param_size (
906 &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
907 cap->sym.auth.digest_size.max,
908 cap->sym.auth.digest_size.increment);
909 if (vec_len (temp_cap->auth.digest_sizes) > 0)
913 case RTE_CRYPTO_SYM_XFORM_AEAD:
914 if (cap->sym.aead.algo == temp_cap->aead.algo)
916 remove_unsupported_param_size (
917 &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
918 cap->sym.aead.key_size.max,
919 cap->sym.aead.key_size.increment);
920 remove_unsupported_param_size (
921 &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
922 cap->sym.aead.aad_size.max,
923 cap->sym.aead.aad_size.increment);
924 remove_unsupported_param_size (
925 &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
926 cap->sym.aead.digest_size.max,
927 cap->sym.aead.digest_size.increment);
928 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
929 vec_len (temp_cap->aead.aad_sizes) > 0 &&
930 vec_len (temp_cap->aead.digest_sizes) > 0)
946 cryptodev_get_common_capabilities ()
948 cryptodev_main_t *cmt = &cryptodev_main;
949 cryptodev_inst_t *dev_inst;
950 struct rte_cryptodev_info dev_info;
951 u32 previous_dev_id, dev_id;
954 cryptodev_capability_t tmp_cap;
955 const struct rte_cryptodev_capabilities *cap;
956 const struct rte_cryptodev_capabilities *dev_caps;
958 clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
959 if (vec_len (cmt->cryptodev_inst) == 0)
961 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
962 rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
963 cap = &dev_info.capabilities[0];
965 /*init capabilities vector*/
966 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
968 if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
974 tmp_cap.xform_type = cap->sym.xform_type;
975 switch (cap->sym.xform_type)
977 case RTE_CRYPTO_SYM_XFORM_CIPHER:
978 tmp_cap.cipher.key_sizes = 0;
979 tmp_cap.cipher.algo = cap->sym.cipher.algo;
980 for (param = cap->sym.cipher.key_size.min;
981 param <= cap->sym.cipher.key_size.max;
982 param += cap->sym.cipher.key_size.increment)
984 vec_add1 (tmp_cap.cipher.key_sizes, param);
985 if (cap->sym.cipher.key_size.increment == 0)
989 case RTE_CRYPTO_SYM_XFORM_AUTH:
990 tmp_cap.auth.algo = cap->sym.auth.algo;
991 tmp_cap.auth.digest_sizes = 0;
992 for (param = cap->sym.auth.digest_size.min;
993 param <= cap->sym.auth.digest_size.max;
994 param += cap->sym.auth.digest_size.increment)
996 vec_add1 (tmp_cap.auth.digest_sizes, param);
997 if (cap->sym.auth.digest_size.increment == 0)
1001 case RTE_CRYPTO_SYM_XFORM_AEAD:
1002 tmp_cap.aead.key_sizes = 0;
1003 tmp_cap.aead.aad_sizes = 0;
1004 tmp_cap.aead.digest_sizes = 0;
1005 tmp_cap.aead.algo = cap->sym.aead.algo;
1006 for (param = cap->sym.aead.key_size.min;
1007 param <= cap->sym.aead.key_size.max;
1008 param += cap->sym.aead.key_size.increment)
1010 vec_add1 (tmp_cap.aead.key_sizes, param);
1011 if (cap->sym.aead.key_size.increment == 0)
1014 for (param = cap->sym.aead.aad_size.min;
1015 param <= cap->sym.aead.aad_size.max;
1016 param += cap->sym.aead.aad_size.increment)
1018 vec_add1 (tmp_cap.aead.aad_sizes, param);
1019 if (cap->sym.aead.aad_size.increment == 0)
1022 for (param = cap->sym.aead.digest_size.min;
1023 param <= cap->sym.aead.digest_size.max;
1024 param += cap->sym.aead.digest_size.increment)
1026 vec_add1 (tmp_cap.aead.digest_sizes, param);
1027 if (cap->sym.aead.digest_size.increment == 0)
1035 vec_add1 (cmt->supported_caps, tmp_cap);
1039 while (cap_id < vec_len (cmt->supported_caps))
1041 u32 cap_is_supported = 1;
1042 previous_dev_id = cmt->cryptodev_inst->dev_id;
1044 vec_foreach (dev_inst, cmt->cryptodev_inst)
1046 dev_id = dev_inst->dev_id;
1047 if (previous_dev_id != dev_id)
1049 previous_dev_id = dev_id;
1050 rte_cryptodev_info_get (dev_id, &dev_info);
1051 dev_caps = &dev_info.capabilities[0];
1052 cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1053 &cmt->supported_caps[cap_id], dev_caps);
1054 if (!cap_is_supported)
1056 cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1057 /*no need to check other devices as this one doesn't support
1063 if (cap_is_supported)
1069 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1071 cryptodev_main_t *cmt = &cryptodev_main;
1072 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1075 if (n_queues < n_workers)
1078 for (i = 0; i < rte_cryptodev_count (); i++)
1079 cryptodev_configure (vm, i);
1081 if (vec_len (cmt->cryptodev_inst) == 0)
1083 cryptodev_get_common_capabilities ();
1084 vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1086 /* if there is not enough device stop cryptodev */
1087 if (vec_len (cmt->cryptodev_inst) < n_workers)
1094 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1099 vec_foreach (unique_elt, *unique_drivers)
1101 if (*unique_elt == driver_id)
1109 vec_add1 (*unique_drivers, driver_id);
1113 dpdk_cryptodev_init (vlib_main_t * vm)
1115 cryptodev_main_t *cmt = &cryptodev_main;
1116 vlib_thread_main_t *tm = vlib_get_thread_main ();
1117 cryptodev_engine_thread_t *cet;
1118 cryptodev_numa_data_t *numa_data;
1119 cryptodev_inst_t *dev_inst;
1122 u32 skip_master = vlib_num_workers () > 0;
1123 u32 n_workers = tm->n_vlib_mains - skip_master;
1126 u32 *unique_drivers = 0;
1127 clib_error_t *error;
1129 cmt->iova_mode = rte_eal_iova_mode ();
1131 clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1137 vec_validate (cmt->per_numa_data, nodes);
1138 vec_foreach (numa_data, cmt->per_numa_data)
1140 vec_validate (numa_data->sess_pools, 0);
1143 /* probe all cryptodev devices and get queue info */
1144 if (cryptodev_probe (vm, n_workers) < 0)
1146 error = clib_error_return (0, "Not enough cryptodev resources");
1150 vec_foreach (dev_inst, cmt->cryptodev_inst)
1152 u32 dev_id = dev_inst->dev_id;
1153 struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
1154 u32 driver_id = cdev->driver_id;
1155 is_drv_unique (driver_id, &unique_drivers);
1158 rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1159 cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1162 cmt->drivers_cnt = vec_len (unique_drivers);
1163 vec_free (unique_drivers);
1165 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1166 clib_spinlock_init (&cmt->tlock);
1168 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1169 CLIB_CACHE_LINE_BYTES);
1170 for (i = skip_master; i < tm->n_vlib_mains; i++)
1172 cet = cmt->per_thread_data + i;
1174 if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1177 error = clib_error_return (0, "Failed to configure cryptodev");
1182 /* register handler */
1183 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1184 "DPDK Cryptodev Engine");
1186 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1188 if (cryptodev_register_raw_hdl)
1189 error = cryptodev_register_raw_hdl (vm, eidx);
1191 error = cryptodev_register_cop_hdl (vm, eidx);
1196 /* this engine is only enabled when cryptodev device(s) are presented in
1197 * startup.conf. Assume it is wanted to be used, turn on async mode here.
1199 vnet_crypto_request_async_mode (1);
1200 ipsec_set_async_mode (1);