2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
34 #include "cryptodev.h"
37 #define always_inline static inline
39 #define always_inline static inline __attribute__ ((__always_inline__))
42 cryptodev_main_t cryptodev_main;
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46 cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
49 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50 memset (xform, 0, sizeof (*xform));
51 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
54 if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55 key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56 key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
58 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
60 else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
62 aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
67 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69 aead_xform->aad_length = aad_len;
70 aead_xform->digest_length = 16;
71 aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72 aead_xform->iv.length = 12;
73 aead_xform->key.data = key->data;
74 aead_xform->key.length = vec_len (key->data);
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81 cryptodev_op_type_t op_type,
82 const vnet_crypto_key_t *key)
84 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85 vnet_crypto_key_t *key_cipher, *key_auth;
86 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87 enum rte_crypto_auth_algorithm auth_algo = ~0;
90 key_cipher = vnet_crypto_get_key (key->index_crypto);
91 key_auth = vnet_crypto_get_key (key->index_integ);
92 if (!key_cipher || !key_auth)
95 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
97 xform_cipher = xforms;
98 xform_auth = xforms + 1;
99 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
104 xform_cipher = xforms + 1;
106 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
110 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112 xforms->next = xforms + 1;
114 switch (key->async_alg)
116 #define _(a, b, c, d, e) \
117 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
118 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
119 auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC; \
123 foreach_cryptodev_link_async_alg
129 xform_cipher->cipher.algo = cipher_algo;
130 xform_cipher->cipher.key.data = key_cipher->data;
131 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132 xform_cipher->cipher.iv.length = 16;
133 xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
135 xform_auth->auth.algo = auth_algo;
136 xform_auth->auth.digest_length = digest_len;
137 xform_auth->auth.key.data = key_auth->data;
138 xform_auth->auth.key.length = vec_len (key_auth->data);
143 static_always_inline void
144 cryptodev_session_del (cryptodev_session_t *sess)
151 n_devs = rte_cryptodev_count ();
153 for (i = 0; i < n_devs; i++)
154 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
155 if (rte_cryptodev_sym_session_free (i, sess) == 0)
158 rte_cryptodev_sym_session_clear (i, sess);
160 rte_cryptodev_sym_session_free (sess);
165 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
167 cryptodev_main_t *cmt = &cryptodev_main;
168 cryptodev_capability_t *vcap;
171 vec_foreach (vcap, cmt->supported_caps)
173 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
175 if (vcap->cipher.algo != algo)
177 vec_foreach (s, vcap->cipher.key_sizes)
186 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
188 cryptodev_main_t *cmt = &cryptodev_main;
189 cryptodev_capability_t *vcap;
192 vec_foreach (vcap, cmt->supported_caps)
194 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
196 if (vcap->auth.algo != algo)
198 vec_foreach (s, vcap->auth.digest_sizes)
199 if (*s == digest_size)
206 static_always_inline int
207 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
208 u32 digest_size, u32 aad_size)
210 cryptodev_main_t *cmt = &cryptodev_main;
211 cryptodev_capability_t *vcap;
213 u32 key_match = 0, digest_match = 0, aad_match = 0;
215 vec_foreach (vcap, cmt->supported_caps)
217 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
219 if (vcap->aead.algo != algo)
221 vec_foreach (s, vcap->aead.digest_sizes)
222 if (*s == digest_size)
227 vec_foreach (s, vcap->aead.key_sizes)
233 vec_foreach (s, vcap->aead.aad_sizes)
241 if (key_match == 1 && digest_match == 1 && aad_match == 1)
247 static_always_inline int
248 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
252 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
254 switch (key->async_alg)
256 #define _(a, b, c, d, e) \
257 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
258 if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) && \
259 check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e)) \
261 foreach_cryptodev_link_async_alg
268 #define _(a, b, c, d, e, f, g) \
269 if (key->alg == VNET_CRYPTO_ALG_##a) \
271 if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f)) \
274 foreach_vnet_aead_crypto_conversion
277 if (matched < 2) return 0;
283 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
284 vnet_crypto_key_index_t idx, u32 aad_len)
286 cryptodev_main_t *cmt = &cryptodev_main;
287 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
288 cryptodev_key_t *ckey = 0;
291 vec_validate (cmt->keys, idx);
292 ckey = vec_elt_at_index (cmt->keys, idx);
294 if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
296 if (idx >= vec_len (cmt->keys))
299 vec_foreach_index (i, cmt->per_numa_data)
305 if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
307 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
308 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
310 CLIB_MEMORY_STORE_BARRIER ();
311 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
312 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
320 /* do not create session for unsupported alg */
321 if (cryptodev_check_supported_vnet_alg (key) == 0)
324 vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
325 vec_foreach_index (i, ckey->keys)
326 vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
330 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
331 vnet_crypto_key_index_t idx)
333 cryptodev_sess_handler (vm, kop, idx, 8);
337 allocate_session_pools (u32 numa_node,
338 cryptodev_session_pool_t *sess_pools_elt, u32 len)
340 cryptodev_main_t *cmt = &cryptodev_main;
342 clib_error_t *error = NULL;
344 name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
345 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
346 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
347 (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
349 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
350 (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
353 if (!sess_pools_elt->sess_pool)
355 error = clib_error_return (0, "Not enough memory for mp %s", name);
360 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
361 name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
362 sess_pools_elt->sess_priv_pool = rte_mempool_create (
363 (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
364 0, NULL, NULL, NULL, NULL, numa_node, 0);
366 if (!sess_pools_elt->sess_priv_pool)
368 error = clib_error_return (0, "Not enough memory for mp %s", name);
378 if (sess_pools_elt->sess_pool)
379 rte_mempool_free (sess_pools_elt->sess_pool);
380 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
381 if (sess_pools_elt->sess_priv_pool)
382 rte_mempool_free (sess_pools_elt->sess_priv_pool);
390 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
393 cryptodev_main_t *cmt = &cryptodev_main;
394 cryptodev_numa_data_t *numa_data;
395 cryptodev_inst_t *dev_inst;
396 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
397 struct rte_mempool *sess_pool;
398 cryptodev_session_pool_t *sess_pools_elt;
399 cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
400 struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
401 struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
402 cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
403 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
404 struct rte_mempool *sess_priv_pool;
405 struct rte_cryptodev_info dev_info;
407 u32 numa_node = vm->numa_node;
412 numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
414 clib_spinlock_lock (&cmt->tlock);
415 vec_foreach (sess_pools_elt, numa_data->sess_pools)
417 if (sess_pools_elt->sess_pool == NULL)
419 error = allocate_session_pools (numa_node, sess_pools_elt,
420 vec_len (numa_data->sess_pools) - 1);
427 if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
436 vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
437 error = allocate_session_pools (numa_node, sess_pools_elt,
438 vec_len (numa_data->sess_pools) - 1);
446 sess_pool = sess_pools_elt->sess_pool;
447 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
448 sess_priv_pool = sess_pools_elt->sess_priv_pool;
450 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
451 rte_cryptodev_sym_session_create (sess_pool);
453 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
454 rte_cryptodev_sym_session_create (sess_pool);
457 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
458 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
461 prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
468 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
469 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
471 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
473 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
474 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
475 u32 dev_id = dev_inst->dev_id;
476 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
477 rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
478 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
479 rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
480 if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
481 !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
487 rte_cryptodev_sym_session_opaque_data_set (
488 sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
489 rte_cryptodev_sym_session_opaque_data_set (
490 sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
492 vec_foreach (dev_inst, cmt->cryptodev_inst)
494 u32 dev_id = dev_inst->dev_id;
495 rte_cryptodev_info_get (dev_id, &dev_info);
496 u32 driver_id = dev_info.driver_id;
498 /* if the session is already configured for the driver type, avoid
499 configuring it again to increase the session data's refcnt */
500 if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
501 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
504 ret = rte_cryptodev_sym_session_init (
505 dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
507 ret = rte_cryptodev_sym_session_init (
508 dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
514 sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
515 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
518 CLIB_MEMORY_STORE_BARRIER ();
519 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
520 sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
521 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
522 sessions[CRYPTODEV_OP_TYPE_DECRYPT];
527 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
528 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
530 clib_spinlock_unlock (&cmt->tlock);
536 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
537 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
538 } cryptodev_resource_assign_op_t;
541 * assign a cryptodev resource to a worker.
542 * @param cet: the worker thread data
543 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
544 * @param op: the assignment method.
545 * @return: 0 if successfully, negative number otherwise.
547 static_always_inline int
548 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
549 u32 cryptodev_inst_index,
550 cryptodev_resource_assign_op_t op)
552 cryptodev_main_t *cmt = &cryptodev_main;
553 cryptodev_inst_t *cinst = 0;
556 /* assign resource is only allowed when no inflight op is in the queue */
562 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
563 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
564 vec_len (cmt->cryptodev_inst))
567 clib_spinlock_lock (&cmt->tlock);
568 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
569 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
570 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
571 cet->cryptodev_id = cinst->dev_id;
572 cet->cryptodev_q = cinst->q_id;
573 clib_spinlock_unlock (&cmt->tlock);
575 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
576 /* assigning a used cryptodev resource is not allowed */
577 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
581 vec_foreach_index (idx, cmt->cryptodev_inst)
583 cinst = cmt->cryptodev_inst + idx;
584 if (cinst->dev_id == cet->cryptodev_id &&
585 cinst->q_id == cet->cryptodev_q)
588 /* invalid existing worker resource assignment */
589 if (idx >= vec_len (cmt->cryptodev_inst))
591 clib_spinlock_lock (&cmt->tlock);
592 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
593 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
594 cryptodev_inst_index, 1);
595 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
596 cet->cryptodev_id = cinst->dev_id;
597 cet->cryptodev_q = cinst->q_id;
598 clib_spinlock_unlock (&cmt->tlock);
607 format_cryptodev_inst (u8 * s, va_list * args)
609 cryptodev_main_t *cmt = &cryptodev_main;
610 u32 inst = va_arg (*args, u32);
611 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
612 u32 thread_index = 0;
613 struct rte_cryptodev_info info;
615 rte_cryptodev_info_get (cit->dev_id, &info);
616 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
618 vec_foreach_index (thread_index, cmt->per_thread_data)
620 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
621 if (vlib_num_workers () > 0 && thread_index == 0)
624 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
626 s = format (s, "%u (%v)\n", thread_index,
627 vlib_worker_threads[thread_index].name);
632 if (thread_index == vec_len (cmt->per_thread_data))
633 s = format (s, "%s\n", "free");
638 static clib_error_t *
639 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
640 vlib_cli_command_t * cmd)
642 cryptodev_main_t *cmt = &cryptodev_main;
645 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
647 if (vec_len (cmt->cryptodev_inst) == 0)
649 vlib_cli_output (vm, "(nil)\n");
653 vec_foreach_index (inst, cmt->cryptodev_inst)
654 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
657 vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
659 vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
663 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
664 .path = "show cryptodev assignment",
665 .short_help = "show cryptodev assignment",
666 .function = cryptodev_show_assignment_fn,
669 static clib_error_t *
670 cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
671 vlib_cli_command_t *cmd)
673 cryptodev_main_t *cmt = &cryptodev_main;
674 u32 thread_index = 0;
675 vec_foreach_index (thread_index, cmt->per_thread_data)
677 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
678 cryptodev_cache_ring_t *ring = &cet->cache_ring;
679 u16 head = ring->head;
680 u16 tail = ring->tail;
681 u16 n_cached = ((head == tail) && (ring->frames[head].f == 0)) ?
683 ((head == tail) && (ring->frames[head].f != 0)) ?
684 (CRYPTODEV_CACHE_QUEUE_MASK + 1) :
687 (CRYPTODEV_CACHE_QUEUE_MASK - tail + head);
689 u16 enq_head = ring->enq_head;
690 u16 deq_tail = ring->deq_tail;
691 u16 n_frames_inflight =
692 ((enq_head == deq_tail) && (ring->frames[enq_head].f == 0)) ?
694 ((enq_head == deq_tail) && (ring->frames[enq_head].f != 0)) ?
695 CRYPTODEV_CACHE_QUEUE_MASK + 1 :
696 (enq_head > deq_tail) ?
697 (enq_head - deq_tail) :
698 (CRYPTODEV_CACHE_QUEUE_MASK - deq_tail + enq_head);
700 u16 n_frames_processed =
701 ((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ? 0 :
702 ((tail == deq_tail) && (ring->frames[deq_tail].f != 0)) ? 1 :
703 (deq_tail > tail) ? (deq_tail - tail + 1) :
704 (CRYPTODEV_CACHE_QUEUE_MASK - tail + deq_tail - 1);
706 if (vlib_num_workers () > 0 && thread_index == 0)
709 vlib_cli_output (vm, "\n\n");
710 vlib_cli_output (vm, "Frames total: %u", n_cached);
711 vlib_cli_output (vm, "Frames pending in the ring: %u",
712 n_cached - n_frames_inflight - n_frames_processed);
713 vlib_cli_output (vm, "Frames inflight: %u", n_frames_inflight);
714 vlib_cli_output (vm, "Frames dequed but not returned: %u",
716 vlib_cli_output (vm, "Elements inflight: %u", cet->inflight);
717 vlib_cli_output (vm, "Head: %u", head);
718 vlib_cli_output (vm, "Tail: %u", tail);
719 vlib_cli_output (vm, "\n\n");
724 VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
725 .path = "show cryptodev cache status",
726 .short_help = "show status of all cryptodev cache rings",
727 .function = cryptodev_show_cache_rings_fn,
730 static clib_error_t *
731 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
732 vlib_cli_command_t * cmd)
734 cryptodev_main_t *cmt = &cryptodev_main;
735 cryptodev_engine_thread_t *cet;
736 unformat_input_t _line_input, *line_input = &_line_input;
737 u32 thread_index, inst_index;
738 u32 thread_present = 0, inst_present = 0;
739 clib_error_t *error = 0;
742 /* Get a line of input. */
743 if (!unformat_user (input, unformat_line_input, line_input))
746 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
748 if (unformat (line_input, "thread %u", &thread_index))
750 else if (unformat (line_input, "resource %u", &inst_index))
754 error = clib_error_return (0, "unknown input `%U'",
755 format_unformat_error, line_input);
760 if (!thread_present || !inst_present)
762 error = clib_error_return (0, "mandatory argument(s) missing");
766 if (thread_index == 0 && vlib_num_workers () > 0)
769 clib_error_return (0, "assign crypto resource for master thread");
773 if (thread_index > vec_len (cmt->per_thread_data) ||
774 inst_index > vec_len (cmt->cryptodev_inst))
776 error = clib_error_return (0, "wrong thread id or resource id");
780 cet = cmt->per_thread_data + thread_index;
781 ret = cryptodev_assign_resource (cet, inst_index,
782 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
786 clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
793 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
794 .path = "set cryptodev assignment",
795 .short_help = "set cryptodev assignment thread <thread_index> "
796 "resource <inst_index>",
797 .function = cryptodev_set_assignment_fn,
801 cryptodev_count_queue (u32 numa)
803 struct rte_cryptodev_info info;
804 u32 n_cryptodev = rte_cryptodev_count ();
807 for (i = 0; i < n_cryptodev; i++)
809 rte_cryptodev_info_get (i, &info);
810 q_count += info.max_nb_queue_pairs;
817 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
819 struct rte_cryptodev_config cfg;
820 struct rte_cryptodev_info info;
821 cryptodev_main_t *cmt = &cryptodev_main;
825 rte_cryptodev_info_get (cryptodev_id, &info);
827 /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
828 anymore. Only devices that have the same driver type as the first
829 initialized device can be initialized.
831 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
832 if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
836 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
839 cfg.socket_id = info.device->numa_node;
840 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
842 rte_cryptodev_configure (cryptodev_id, &cfg);
844 for (i = 0; i < info.max_nb_queue_pairs; i++)
846 struct rte_cryptodev_qp_conf qp_cfg;
848 qp_cfg.mp_session = 0;
849 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
850 qp_cfg.mp_session_private = 0;
852 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
854 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
855 info.device->numa_node);
858 clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
859 cryptodev_id, i, ret);
864 if (i != info.max_nb_queue_pairs)
867 /* start the device */
868 rte_cryptodev_start (cryptodev_id);
870 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
871 if (cmt->drivers_cnt == 0)
873 cmt->drivers_cnt = 1;
874 cmt->driver_id = info.driver_id;
875 cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
879 for (i = 0; i < info.max_nb_queue_pairs; i++)
881 cryptodev_inst_t *cdev_inst;
882 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
883 const char *dev_name = rte_dev_name (info.device);
885 const char *dev_name = info.device->name;
887 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
888 cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
889 cdev_inst->dev_id = cryptodev_id;
892 snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
893 info.device->name, i);
900 cryptodev_cmp (void *v1, void *v2)
902 cryptodev_inst_t *a1 = v1;
903 cryptodev_inst_t *a2 = v2;
905 if (a1->q_id > a2->q_id)
907 if (a1->q_id < a2->q_id)
913 cryptodev_supports_param_value (u32 *params, u32 param_value)
916 vec_foreach (value, params)
918 if (*value == param_value)
925 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
926 u32 key_size, u32 digest_size, u32 aad_size)
928 cryptodev_main_t *cmt = &cryptodev_main;
929 cryptodev_capability_t *cap;
930 vec_foreach (cap, cmt->supported_caps)
933 if (cap->xform_type != idx->type)
936 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
937 cap->auth.algo == idx->algo.auth &&
938 cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
941 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
942 cap->cipher.algo == idx->algo.cipher &&
943 cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
946 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
947 cap->aead.algo == idx->algo.aead &&
948 cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
949 cryptodev_supports_param_value (cap->aead.digest_sizes,
951 cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
958 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
959 u32 param_size_max, u32 increment)
964 while (i < vec_len (*param_sizes))
967 for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
968 cap_param_size += increment)
970 if ((*param_sizes)[i] == cap_param_size)
979 /* no such param_size in cap so delete this size in temp_cap params */
980 vec_delete (*param_sizes, 1, i);
987 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
989 cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
991 switch (temp_cap.xform_type)
993 case RTE_CRYPTO_SYM_XFORM_AUTH:
994 vec_free (temp_cap.auth.digest_sizes);
996 case RTE_CRYPTO_SYM_XFORM_CIPHER:
997 vec_free (temp_cap.cipher.key_sizes);
999 case RTE_CRYPTO_SYM_XFORM_AEAD:
1000 vec_free (temp_cap.aead.key_sizes);
1001 vec_free (temp_cap.aead.aad_sizes);
1002 vec_free (temp_cap.aead.digest_sizes);
1007 vec_delete (*temp_caps, 1, temp_cap_id);
1011 cryptodev_remove_unsupported_param_sizes (
1012 cryptodev_capability_t *temp_cap,
1013 const struct rte_cryptodev_capabilities *dev_caps)
1016 const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
1018 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1020 if (cap->sym.xform_type == temp_cap->xform_type)
1021 switch (cap->sym.xform_type)
1023 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1024 if (cap->sym.cipher.algo == temp_cap->cipher.algo)
1026 remove_unsupported_param_size (
1027 &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
1028 cap->sym.cipher.key_size.max,
1029 cap->sym.cipher.key_size.increment);
1030 if (vec_len (temp_cap->cipher.key_sizes) > 0)
1034 case RTE_CRYPTO_SYM_XFORM_AUTH:
1035 if (cap->sym.auth.algo == temp_cap->auth.algo)
1037 remove_unsupported_param_size (
1038 &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
1039 cap->sym.auth.digest_size.max,
1040 cap->sym.auth.digest_size.increment);
1041 if (vec_len (temp_cap->auth.digest_sizes) > 0)
1045 case RTE_CRYPTO_SYM_XFORM_AEAD:
1046 if (cap->sym.aead.algo == temp_cap->aead.algo)
1048 remove_unsupported_param_size (
1049 &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
1050 cap->sym.aead.key_size.max,
1051 cap->sym.aead.key_size.increment);
1052 remove_unsupported_param_size (
1053 &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
1054 cap->sym.aead.aad_size.max,
1055 cap->sym.aead.aad_size.increment);
1056 remove_unsupported_param_size (
1057 &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
1058 cap->sym.aead.digest_size.max,
1059 cap->sym.aead.digest_size.increment);
1060 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1061 vec_len (temp_cap->aead.aad_sizes) > 0 &&
1062 vec_len (temp_cap->aead.digest_sizes) > 0)
1078 cryptodev_get_common_capabilities ()
1080 cryptodev_main_t *cmt = &cryptodev_main;
1081 cryptodev_inst_t *dev_inst;
1082 struct rte_cryptodev_info dev_info;
1083 u32 previous_dev_id, dev_id;
1086 cryptodev_capability_t tmp_cap;
1087 const struct rte_cryptodev_capabilities *cap;
1088 const struct rte_cryptodev_capabilities *dev_caps;
1090 clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
1091 if (vec_len (cmt->cryptodev_inst) == 0)
1093 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1094 rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1095 cap = &dev_info.capabilities[0];
1097 /*init capabilities vector*/
1098 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1100 if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1106 tmp_cap.xform_type = cap->sym.xform_type;
1107 switch (cap->sym.xform_type)
1109 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1110 tmp_cap.cipher.key_sizes = 0;
1111 tmp_cap.cipher.algo = cap->sym.cipher.algo;
1112 for (param = cap->sym.cipher.key_size.min;
1113 param <= cap->sym.cipher.key_size.max;
1114 param += cap->sym.cipher.key_size.increment)
1116 vec_add1 (tmp_cap.cipher.key_sizes, param);
1117 if (cap->sym.cipher.key_size.increment == 0)
1121 case RTE_CRYPTO_SYM_XFORM_AUTH:
1122 tmp_cap.auth.algo = cap->sym.auth.algo;
1123 tmp_cap.auth.digest_sizes = 0;
1124 for (param = cap->sym.auth.digest_size.min;
1125 param <= cap->sym.auth.digest_size.max;
1126 param += cap->sym.auth.digest_size.increment)
1128 vec_add1 (tmp_cap.auth.digest_sizes, param);
1129 if (cap->sym.auth.digest_size.increment == 0)
1133 case RTE_CRYPTO_SYM_XFORM_AEAD:
1134 tmp_cap.aead.key_sizes = 0;
1135 tmp_cap.aead.aad_sizes = 0;
1136 tmp_cap.aead.digest_sizes = 0;
1137 tmp_cap.aead.algo = cap->sym.aead.algo;
1138 for (param = cap->sym.aead.key_size.min;
1139 param <= cap->sym.aead.key_size.max;
1140 param += cap->sym.aead.key_size.increment)
1142 vec_add1 (tmp_cap.aead.key_sizes, param);
1143 if (cap->sym.aead.key_size.increment == 0)
1146 for (param = cap->sym.aead.aad_size.min;
1147 param <= cap->sym.aead.aad_size.max;
1148 param += cap->sym.aead.aad_size.increment)
1150 vec_add1 (tmp_cap.aead.aad_sizes, param);
1151 if (cap->sym.aead.aad_size.increment == 0)
1154 for (param = cap->sym.aead.digest_size.min;
1155 param <= cap->sym.aead.digest_size.max;
1156 param += cap->sym.aead.digest_size.increment)
1158 vec_add1 (tmp_cap.aead.digest_sizes, param);
1159 if (cap->sym.aead.digest_size.increment == 0)
1167 vec_add1 (cmt->supported_caps, tmp_cap);
1171 while (cap_id < vec_len (cmt->supported_caps))
1173 u32 cap_is_supported = 1;
1174 previous_dev_id = cmt->cryptodev_inst->dev_id;
1176 vec_foreach (dev_inst, cmt->cryptodev_inst)
1178 dev_id = dev_inst->dev_id;
1179 if (previous_dev_id != dev_id)
1181 previous_dev_id = dev_id;
1182 rte_cryptodev_info_get (dev_id, &dev_info);
1183 dev_caps = &dev_info.capabilities[0];
1184 cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1185 &cmt->supported_caps[cap_id], dev_caps);
1186 if (!cap_is_supported)
1188 cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1189 /*no need to check other devices as this one doesn't support
1195 if (cap_is_supported)
1201 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1203 cryptodev_main_t *cmt = &cryptodev_main;
1204 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1207 if (n_queues < n_workers)
1210 for (i = 0; i < rte_cryptodev_count (); i++)
1211 cryptodev_configure (vm, i);
1213 if (vec_len (cmt->cryptodev_inst) == 0)
1215 cryptodev_get_common_capabilities ();
1216 vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1218 /* if there is not enough device stop cryptodev */
1219 if (vec_len (cmt->cryptodev_inst) < n_workers)
1225 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1227 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1232 vec_foreach (unique_elt, *unique_drivers)
1234 if (*unique_elt == driver_id)
1242 vec_add1 (*unique_drivers, driver_id);
1247 dpdk_cryptodev_init (vlib_main_t * vm)
1249 cryptodev_main_t *cmt = &cryptodev_main;
1250 vlib_thread_main_t *tm = vlib_get_thread_main ();
1251 cryptodev_engine_thread_t *cet;
1252 cryptodev_numa_data_t *numa_data;
1255 u32 skip_master = vlib_num_workers () > 0;
1256 u32 n_workers = tm->n_vlib_mains - skip_master;
1259 clib_error_t *error;
1261 cmt->iova_mode = rte_eal_iova_mode ();
1263 clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1269 vec_validate (cmt->per_numa_data, nodes);
1270 vec_foreach (numa_data, cmt->per_numa_data)
1272 vec_validate (numa_data->sess_pools, 0);
1275 /* probe all cryptodev devices and get queue info */
1276 if (cryptodev_probe (vm, n_workers) < 0)
1279 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1280 struct rte_cryptodev_info dev_info;
1281 cryptodev_inst_t *dev_inst;
1282 u32 *unique_drivers = 0;
1283 vec_foreach (dev_inst, cmt->cryptodev_inst)
1285 u32 dev_id = dev_inst->dev_id;
1286 rte_cryptodev_info_get (dev_id, &dev_info);
1287 u32 driver_id = dev_info.driver_id;
1288 is_drv_unique (driver_id, &unique_drivers);
1291 rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1292 cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1295 cmt->drivers_cnt = vec_len (unique_drivers);
1296 vec_free (unique_drivers);
1299 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers);
1300 clib_spinlock_init (&cmt->tlock);
1302 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1303 CLIB_CACHE_LINE_BYTES);
1304 for (i = skip_master; i < tm->n_vlib_mains; i++)
1306 cet = cmt->per_thread_data + i;
1308 if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1311 error = clib_error_return (0, "Failed to configure cryptodev");
1316 /* register handler */
1317 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1318 "DPDK Cryptodev Engine");
1320 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1322 if (cryptodev_register_raw_hdl)
1323 error = cryptodev_register_raw_hdl (vm, eidx);
1325 error = cryptodev_register_cop_hdl (vm, eidx);
1330 /* this engine is only enabled when cryptodev device(s) are presented in
1331 * startup.conf. Assume it is wanted to be used, turn on async mode here.
1333 ipsec_set_async_mode (1);