2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
34 #include "cryptodev.h"
37 #define always_inline static inline
39 #define always_inline static inline __attribute__ ((__always_inline__))
42 cryptodev_main_t cryptodev_main;
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46 cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
49 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50 memset (xform, 0, sizeof (*xform));
51 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
54 if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55 key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56 key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
58 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
60 else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
62 aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
67 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69 aead_xform->aad_length = aad_len;
70 aead_xform->digest_length = 16;
71 aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72 aead_xform->iv.length = 12;
73 aead_xform->key.data = key->data;
74 aead_xform->key.length = vec_len (key->data);
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81 cryptodev_op_type_t op_type,
82 const vnet_crypto_key_t *key)
84 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85 vnet_crypto_key_t *key_cipher, *key_auth;
86 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87 enum rte_crypto_auth_algorithm auth_algo = ~0;
90 key_cipher = vnet_crypto_get_key (key->index_crypto);
91 key_auth = vnet_crypto_get_key (key->index_integ);
92 if (!key_cipher || !key_auth)
95 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
97 xform_cipher = xforms;
98 xform_auth = xforms + 1;
99 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
104 xform_cipher = xforms + 1;
106 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
110 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112 xforms->next = xforms + 1;
114 switch (key->async_alg)
116 #define _(a, b, c, d, e) \
117 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
118 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
119 auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC; \
123 foreach_cryptodev_link_async_alg
129 xform_cipher->cipher.algo = cipher_algo;
130 xform_cipher->cipher.key.data = key_cipher->data;
131 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132 xform_cipher->cipher.iv.length = 16;
133 xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
135 xform_auth->auth.algo = auth_algo;
136 xform_auth->auth.digest_length = digest_len;
137 xform_auth->auth.key.data = key_auth->data;
138 xform_auth->auth.key.length = vec_len (key_auth->data);
143 static_always_inline void
144 cryptodev_session_del (cryptodev_session_t *sess)
151 n_devs = rte_cryptodev_count ();
153 for (i = 0; i < n_devs; i++)
154 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
155 if (rte_cryptodev_sym_session_free (i, sess) == 0)
158 rte_cryptodev_sym_session_clear (i, sess);
160 rte_cryptodev_sym_session_free (sess);
165 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
167 cryptodev_main_t *cmt = &cryptodev_main;
168 cryptodev_capability_t *vcap;
171 vec_foreach (vcap, cmt->supported_caps)
173 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
175 if (vcap->cipher.algo != algo)
177 vec_foreach (s, vcap->cipher.key_sizes)
186 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
188 cryptodev_main_t *cmt = &cryptodev_main;
189 cryptodev_capability_t *vcap;
192 vec_foreach (vcap, cmt->supported_caps)
194 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
196 if (vcap->auth.algo != algo)
198 vec_foreach (s, vcap->auth.digest_sizes)
199 if (*s == digest_size)
206 static_always_inline int
207 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
208 u32 digest_size, u32 aad_size)
210 cryptodev_main_t *cmt = &cryptodev_main;
211 cryptodev_capability_t *vcap;
213 u32 key_match = 0, digest_match = 0, aad_match = 0;
215 vec_foreach (vcap, cmt->supported_caps)
217 if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
219 if (vcap->aead.algo != algo)
221 vec_foreach (s, vcap->aead.digest_sizes)
222 if (*s == digest_size)
227 vec_foreach (s, vcap->aead.key_sizes)
233 vec_foreach (s, vcap->aead.aad_sizes)
241 if (key_match == 1 && digest_match == 1 && aad_match == 1)
247 static_always_inline int
248 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
252 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
254 switch (key->async_alg)
256 #define _(a, b, c, d, e) \
257 case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \
258 if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) && \
259 check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e)) \
261 foreach_cryptodev_link_async_alg
268 #define _(a, b, c, d, e, f, g) \
269 if (key->alg == VNET_CRYPTO_ALG_##a) \
271 if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f)) \
274 foreach_vnet_aead_crypto_conversion
277 if (matched < 2) return 0;
283 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
284 vnet_crypto_key_index_t idx, u32 aad_len)
286 cryptodev_main_t *cmt = &cryptodev_main;
287 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
288 cryptodev_key_t *ckey = 0;
291 vec_validate (cmt->keys, idx);
292 ckey = vec_elt_at_index (cmt->keys, idx);
294 if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
296 if (idx >= vec_len (cmt->keys))
299 vec_foreach_index (i, cmt->per_numa_data)
305 if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
307 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
308 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
310 CLIB_MEMORY_STORE_BARRIER ();
311 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
312 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
320 /* do not create session for unsupported alg */
321 if (cryptodev_check_supported_vnet_alg (key) == 0)
324 vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
325 vec_foreach_index (i, ckey->keys)
326 vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
330 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
331 vnet_crypto_key_index_t idx)
333 cryptodev_sess_handler (vm, kop, idx, 8);
337 allocate_session_pools (u32 numa_node,
338 cryptodev_session_pool_t *sess_pools_elt, u32 len)
340 cryptodev_main_t *cmt = &cryptodev_main;
342 clib_error_t *error = NULL;
344 name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
345 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
346 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
347 (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
349 sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
350 (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
353 if (!sess_pools_elt->sess_pool)
355 error = clib_error_return (0, "Not enough memory for mp %s", name);
360 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
361 name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
362 sess_pools_elt->sess_priv_pool = rte_mempool_create (
363 (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
364 0, NULL, NULL, NULL, NULL, numa_node, 0);
366 if (!sess_pools_elt->sess_priv_pool)
368 error = clib_error_return (0, "Not enough memory for mp %s", name);
378 if (sess_pools_elt->sess_pool)
379 rte_mempool_free (sess_pools_elt->sess_pool);
380 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
381 if (sess_pools_elt->sess_priv_pool)
382 rte_mempool_free (sess_pools_elt->sess_priv_pool);
390 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
393 cryptodev_main_t *cmt = &cryptodev_main;
394 cryptodev_numa_data_t *numa_data;
395 cryptodev_inst_t *dev_inst;
396 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
397 struct rte_mempool *sess_pool;
398 cryptodev_session_pool_t *sess_pools_elt;
399 cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
400 struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
401 struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
402 cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
403 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
404 struct rte_mempool *sess_priv_pool;
405 struct rte_cryptodev_info dev_info;
407 u32 numa_node = vm->numa_node;
412 numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
414 clib_spinlock_lock (&cmt->tlock);
415 vec_foreach (sess_pools_elt, numa_data->sess_pools)
417 if (sess_pools_elt->sess_pool == NULL)
419 error = allocate_session_pools (numa_node, sess_pools_elt,
420 vec_len (numa_data->sess_pools) - 1);
427 if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
436 vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
437 error = allocate_session_pools (numa_node, sess_pools_elt,
438 vec_len (numa_data->sess_pools) - 1);
446 sess_pool = sess_pools_elt->sess_pool;
447 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
448 sess_priv_pool = sess_pools_elt->sess_priv_pool;
450 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
451 rte_cryptodev_sym_session_create (sess_pool);
453 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
454 rte_cryptodev_sym_session_create (sess_pool);
457 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
458 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
461 prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
468 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
469 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
471 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
473 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
474 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
475 u32 dev_id = dev_inst->dev_id;
476 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
477 rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
478 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
479 rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
480 if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
481 !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
487 rte_cryptodev_sym_session_opaque_data_set (
488 sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
489 rte_cryptodev_sym_session_opaque_data_set (
490 sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
492 vec_foreach (dev_inst, cmt->cryptodev_inst)
494 u32 dev_id = dev_inst->dev_id;
495 rte_cryptodev_info_get (dev_id, &dev_info);
496 u32 driver_id = dev_info.driver_id;
498 /* if the session is already configured for the driver type, avoid
499 configuring it again to increase the session data's refcnt */
500 if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
501 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
504 ret = rte_cryptodev_sym_session_init (
505 dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
507 ret = rte_cryptodev_sym_session_init (
508 dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
514 sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
515 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
518 CLIB_MEMORY_STORE_BARRIER ();
519 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
520 sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
521 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
522 sessions[CRYPTODEV_OP_TYPE_DECRYPT];
527 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
528 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
530 clib_spinlock_unlock (&cmt->tlock);
536 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
537 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
538 } cryptodev_resource_assign_op_t;
541 * assign a cryptodev resource to a worker.
542 * @param cet: the worker thread data
543 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
544 * @param op: the assignment method.
545 * @return: 0 if successfully, negative number otherwise.
547 static_always_inline int
548 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
549 u32 cryptodev_inst_index,
550 cryptodev_resource_assign_op_t op)
552 cryptodev_main_t *cmt = &cryptodev_main;
553 cryptodev_inst_t *cinst = 0;
556 /* assign resource is only allowed when no inflight op is in the queue */
562 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
563 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
564 vec_len (cmt->cryptodev_inst))
567 clib_spinlock_lock (&cmt->tlock);
568 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
569 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
570 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
571 cet->cryptodev_id = cinst->dev_id;
572 cet->cryptodev_q = cinst->q_id;
573 clib_spinlock_unlock (&cmt->tlock);
575 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
576 /* assigning a used cryptodev resource is not allowed */
577 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
581 vec_foreach_index (idx, cmt->cryptodev_inst)
583 cinst = cmt->cryptodev_inst + idx;
584 if (cinst->dev_id == cet->cryptodev_id &&
585 cinst->q_id == cet->cryptodev_q)
588 /* invalid existing worker resource assignment */
589 if (idx >= vec_len (cmt->cryptodev_inst))
591 clib_spinlock_lock (&cmt->tlock);
592 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
593 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
594 cryptodev_inst_index, 1);
595 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
596 cet->cryptodev_id = cinst->dev_id;
597 cet->cryptodev_q = cinst->q_id;
598 clib_spinlock_unlock (&cmt->tlock);
607 format_cryptodev_inst (u8 * s, va_list * args)
609 cryptodev_main_t *cmt = &cryptodev_main;
610 u32 inst = va_arg (*args, u32);
611 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
612 u32 thread_index = 0;
613 struct rte_cryptodev_info info;
615 rte_cryptodev_info_get (cit->dev_id, &info);
616 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
618 vec_foreach_index (thread_index, cmt->per_thread_data)
620 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
621 if (vlib_num_workers () > 0 && thread_index == 0)
624 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
626 s = format (s, "%u (%v)\n", thread_index,
627 vlib_worker_threads[thread_index].name);
632 if (thread_index == vec_len (cmt->per_thread_data))
633 s = format (s, "%s\n", "free");
638 static clib_error_t *
639 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
640 vlib_cli_command_t * cmd)
642 cryptodev_main_t *cmt = &cryptodev_main;
645 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
647 if (vec_len (cmt->cryptodev_inst) == 0)
649 vlib_cli_output (vm, "(nil)\n");
653 vec_foreach_index (inst, cmt->cryptodev_inst)
654 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
657 vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
659 vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
663 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
664 .path = "show cryptodev assignment",
665 .short_help = "show cryptodev assignment",
666 .function = cryptodev_show_assignment_fn,
669 static clib_error_t *
670 cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
671 vlib_cli_command_t *cmd)
673 cryptodev_main_t *cmt = &cryptodev_main;
674 u32 thread_index = 0;
675 vec_foreach_index (thread_index, cmt->per_thread_data)
677 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
678 cryptodev_cache_ring_t *ring = &cet->cache_ring;
679 u16 head = ring->head;
680 u16 tail = ring->tail;
681 u16 n_cached = ((head == tail) && (ring->frames[head].f == 0)) ?
683 ((head == tail) && (ring->frames[head].f != 0)) ?
684 (CRYPTODEV_CACHE_QUEUE_MASK + 1) :
687 (CRYPTODEV_CACHE_QUEUE_MASK - tail + head);
689 u16 enq_head = ring->enq_head;
690 u16 deq_tail = ring->deq_tail;
691 u16 n_frames_inflight =
692 ((enq_head == deq_tail) && (ring->frames[enq_head].f == 0)) ?
694 ((enq_head == deq_tail) && (ring->frames[enq_head].f != 0)) ?
695 CRYPTODEV_CACHE_QUEUE_MASK + 1 :
696 (enq_head > deq_tail) ?
697 (enq_head - deq_tail) :
698 (CRYPTODEV_CACHE_QUEUE_MASK - deq_tail + enq_head);
700 u16 n_frames_processed =
701 ((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ?
703 ((tail == deq_tail) && (ring->frames[deq_tail].f != 0)) ?
704 (CRYPTODEV_CACHE_QUEUE_MASK + 1) :
705 (deq_tail > tail) ? (deq_tail - tail) :
706 (CRYPTODEV_CACHE_QUEUE_MASK - tail + deq_tail);
708 if (vlib_num_workers () > 0 && thread_index == 0)
710 vlib_cli_output (vm, "\n\n");
711 vlib_cli_output (vm, "Frames total: %d", n_cached);
712 vlib_cli_output (vm, "Frames pending in the ring: %d",
713 n_cached - n_frames_inflight - n_frames_processed);
714 vlib_cli_output (vm, "Frames enqueued but not dequeued: %d",
716 vlib_cli_output (vm, "Frames dequed but not returned: %d",
718 vlib_cli_output (vm, "inflight: %d", cet->inflight);
719 vlib_cli_output (vm, "Head: %d", ring->head);
720 vlib_cli_output (vm, "Tail: %d", ring->tail);
721 vlib_cli_output (vm, "\n\n");
726 VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
727 .path = "show cryptodev cache status",
728 .short_help = "show status of all cryptodev cache rings",
729 .function = cryptodev_show_cache_rings_fn,
732 static clib_error_t *
733 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
734 vlib_cli_command_t * cmd)
736 cryptodev_main_t *cmt = &cryptodev_main;
737 cryptodev_engine_thread_t *cet;
738 unformat_input_t _line_input, *line_input = &_line_input;
739 u32 thread_index, inst_index;
740 u32 thread_present = 0, inst_present = 0;
741 clib_error_t *error = 0;
744 /* Get a line of input. */
745 if (!unformat_user (input, unformat_line_input, line_input))
748 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
750 if (unformat (line_input, "thread %u", &thread_index))
752 else if (unformat (line_input, "resource %u", &inst_index))
756 error = clib_error_return (0, "unknown input `%U'",
757 format_unformat_error, line_input);
762 if (!thread_present || !inst_present)
764 error = clib_error_return (0, "mandatory argument(s) missing");
768 if (thread_index == 0 && vlib_num_workers () > 0)
771 clib_error_return (0, "assign crypto resource for master thread");
775 if (thread_index > vec_len (cmt->per_thread_data) ||
776 inst_index > vec_len (cmt->cryptodev_inst))
778 error = clib_error_return (0, "wrong thread id or resource id");
782 cet = cmt->per_thread_data + thread_index;
783 ret = cryptodev_assign_resource (cet, inst_index,
784 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
788 clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
795 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
796 .path = "set cryptodev assignment",
797 .short_help = "set cryptodev assignment thread <thread_index> "
798 "resource <inst_index>",
799 .function = cryptodev_set_assignment_fn,
803 cryptodev_count_queue (u32 numa)
805 struct rte_cryptodev_info info;
806 u32 n_cryptodev = rte_cryptodev_count ();
809 for (i = 0; i < n_cryptodev; i++)
811 rte_cryptodev_info_get (i, &info);
812 q_count += info.max_nb_queue_pairs;
819 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
821 struct rte_cryptodev_config cfg;
822 struct rte_cryptodev_info info;
823 cryptodev_main_t *cmt = &cryptodev_main;
827 rte_cryptodev_info_get (cryptodev_id, &info);
829 /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
830 anymore. Only devices that have the same driver type as the first
831 initialized device can be initialized.
833 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
834 if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
838 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
841 cfg.socket_id = info.device->numa_node;
842 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
844 rte_cryptodev_configure (cryptodev_id, &cfg);
846 for (i = 0; i < info.max_nb_queue_pairs; i++)
848 struct rte_cryptodev_qp_conf qp_cfg;
850 qp_cfg.mp_session = 0;
851 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
852 qp_cfg.mp_session_private = 0;
854 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
856 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
857 info.device->numa_node);
860 clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
861 cryptodev_id, i, ret);
866 if (i != info.max_nb_queue_pairs)
869 /* start the device */
870 rte_cryptodev_start (cryptodev_id);
872 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
873 if (cmt->drivers_cnt == 0)
875 cmt->drivers_cnt = 1;
876 cmt->driver_id = info.driver_id;
877 cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
881 for (i = 0; i < info.max_nb_queue_pairs; i++)
883 cryptodev_inst_t *cdev_inst;
884 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
885 const char *dev_name = rte_dev_name (info.device);
887 const char *dev_name = info.device->name;
889 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
890 cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
891 cdev_inst->dev_id = cryptodev_id;
894 snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
895 info.device->name, i);
902 cryptodev_cmp (void *v1, void *v2)
904 cryptodev_inst_t *a1 = v1;
905 cryptodev_inst_t *a2 = v2;
907 if (a1->q_id > a2->q_id)
909 if (a1->q_id < a2->q_id)
915 cryptodev_supports_param_value (u32 *params, u32 param_value)
918 vec_foreach (value, params)
920 if (*value == param_value)
927 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
928 u32 key_size, u32 digest_size, u32 aad_size)
930 cryptodev_main_t *cmt = &cryptodev_main;
931 cryptodev_capability_t *cap;
932 vec_foreach (cap, cmt->supported_caps)
935 if (cap->xform_type != idx->type)
938 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
939 cap->auth.algo == idx->algo.auth &&
940 cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
943 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
944 cap->cipher.algo == idx->algo.cipher &&
945 cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
948 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
949 cap->aead.algo == idx->algo.aead &&
950 cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
951 cryptodev_supports_param_value (cap->aead.digest_sizes,
953 cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
960 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
961 u32 param_size_max, u32 increment)
966 while (i < vec_len (*param_sizes))
969 for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
970 cap_param_size += increment)
972 if ((*param_sizes)[i] == cap_param_size)
981 /* no such param_size in cap so delete this size in temp_cap params */
982 vec_delete (*param_sizes, 1, i);
989 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
991 cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
993 switch (temp_cap.xform_type)
995 case RTE_CRYPTO_SYM_XFORM_AUTH:
996 vec_free (temp_cap.auth.digest_sizes);
998 case RTE_CRYPTO_SYM_XFORM_CIPHER:
999 vec_free (temp_cap.cipher.key_sizes);
1001 case RTE_CRYPTO_SYM_XFORM_AEAD:
1002 vec_free (temp_cap.aead.key_sizes);
1003 vec_free (temp_cap.aead.aad_sizes);
1004 vec_free (temp_cap.aead.digest_sizes);
1009 vec_delete (*temp_caps, 1, temp_cap_id);
1013 cryptodev_remove_unsupported_param_sizes (
1014 cryptodev_capability_t *temp_cap,
1015 const struct rte_cryptodev_capabilities *dev_caps)
1018 const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
1020 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1022 if (cap->sym.xform_type == temp_cap->xform_type)
1023 switch (cap->sym.xform_type)
1025 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1026 if (cap->sym.cipher.algo == temp_cap->cipher.algo)
1028 remove_unsupported_param_size (
1029 &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
1030 cap->sym.cipher.key_size.max,
1031 cap->sym.cipher.key_size.increment);
1032 if (vec_len (temp_cap->cipher.key_sizes) > 0)
1036 case RTE_CRYPTO_SYM_XFORM_AUTH:
1037 if (cap->sym.auth.algo == temp_cap->auth.algo)
1039 remove_unsupported_param_size (
1040 &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
1041 cap->sym.auth.digest_size.max,
1042 cap->sym.auth.digest_size.increment);
1043 if (vec_len (temp_cap->auth.digest_sizes) > 0)
1047 case RTE_CRYPTO_SYM_XFORM_AEAD:
1048 if (cap->sym.aead.algo == temp_cap->aead.algo)
1050 remove_unsupported_param_size (
1051 &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
1052 cap->sym.aead.key_size.max,
1053 cap->sym.aead.key_size.increment);
1054 remove_unsupported_param_size (
1055 &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
1056 cap->sym.aead.aad_size.max,
1057 cap->sym.aead.aad_size.increment);
1058 remove_unsupported_param_size (
1059 &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
1060 cap->sym.aead.digest_size.max,
1061 cap->sym.aead.digest_size.increment);
1062 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1063 vec_len (temp_cap->aead.aad_sizes) > 0 &&
1064 vec_len (temp_cap->aead.digest_sizes) > 0)
1080 cryptodev_get_common_capabilities ()
1082 cryptodev_main_t *cmt = &cryptodev_main;
1083 cryptodev_inst_t *dev_inst;
1084 struct rte_cryptodev_info dev_info;
1085 u32 previous_dev_id, dev_id;
1088 cryptodev_capability_t tmp_cap;
1089 const struct rte_cryptodev_capabilities *cap;
1090 const struct rte_cryptodev_capabilities *dev_caps;
1092 clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
1093 if (vec_len (cmt->cryptodev_inst) == 0)
1095 dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1096 rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1097 cap = &dev_info.capabilities[0];
1099 /*init capabilities vector*/
1100 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1102 if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1108 tmp_cap.xform_type = cap->sym.xform_type;
1109 switch (cap->sym.xform_type)
1111 case RTE_CRYPTO_SYM_XFORM_CIPHER:
1112 tmp_cap.cipher.key_sizes = 0;
1113 tmp_cap.cipher.algo = cap->sym.cipher.algo;
1114 for (param = cap->sym.cipher.key_size.min;
1115 param <= cap->sym.cipher.key_size.max;
1116 param += cap->sym.cipher.key_size.increment)
1118 vec_add1 (tmp_cap.cipher.key_sizes, param);
1119 if (cap->sym.cipher.key_size.increment == 0)
1123 case RTE_CRYPTO_SYM_XFORM_AUTH:
1124 tmp_cap.auth.algo = cap->sym.auth.algo;
1125 tmp_cap.auth.digest_sizes = 0;
1126 for (param = cap->sym.auth.digest_size.min;
1127 param <= cap->sym.auth.digest_size.max;
1128 param += cap->sym.auth.digest_size.increment)
1130 vec_add1 (tmp_cap.auth.digest_sizes, param);
1131 if (cap->sym.auth.digest_size.increment == 0)
1135 case RTE_CRYPTO_SYM_XFORM_AEAD:
1136 tmp_cap.aead.key_sizes = 0;
1137 tmp_cap.aead.aad_sizes = 0;
1138 tmp_cap.aead.digest_sizes = 0;
1139 tmp_cap.aead.algo = cap->sym.aead.algo;
1140 for (param = cap->sym.aead.key_size.min;
1141 param <= cap->sym.aead.key_size.max;
1142 param += cap->sym.aead.key_size.increment)
1144 vec_add1 (tmp_cap.aead.key_sizes, param);
1145 if (cap->sym.aead.key_size.increment == 0)
1148 for (param = cap->sym.aead.aad_size.min;
1149 param <= cap->sym.aead.aad_size.max;
1150 param += cap->sym.aead.aad_size.increment)
1152 vec_add1 (tmp_cap.aead.aad_sizes, param);
1153 if (cap->sym.aead.aad_size.increment == 0)
1156 for (param = cap->sym.aead.digest_size.min;
1157 param <= cap->sym.aead.digest_size.max;
1158 param += cap->sym.aead.digest_size.increment)
1160 vec_add1 (tmp_cap.aead.digest_sizes, param);
1161 if (cap->sym.aead.digest_size.increment == 0)
1169 vec_add1 (cmt->supported_caps, tmp_cap);
1173 while (cap_id < vec_len (cmt->supported_caps))
1175 u32 cap_is_supported = 1;
1176 previous_dev_id = cmt->cryptodev_inst->dev_id;
1178 vec_foreach (dev_inst, cmt->cryptodev_inst)
1180 dev_id = dev_inst->dev_id;
1181 if (previous_dev_id != dev_id)
1183 previous_dev_id = dev_id;
1184 rte_cryptodev_info_get (dev_id, &dev_info);
1185 dev_caps = &dev_info.capabilities[0];
1186 cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1187 &cmt->supported_caps[cap_id], dev_caps);
1188 if (!cap_is_supported)
1190 cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1191 /*no need to check other devices as this one doesn't support
1197 if (cap_is_supported)
1203 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1205 cryptodev_main_t *cmt = &cryptodev_main;
1206 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1209 if (n_queues < n_workers)
1212 for (i = 0; i < rte_cryptodev_count (); i++)
1213 cryptodev_configure (vm, i);
1215 if (vec_len (cmt->cryptodev_inst) == 0)
1217 cryptodev_get_common_capabilities ();
1218 vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1220 /* if there is not enough device stop cryptodev */
1221 if (vec_len (cmt->cryptodev_inst) < n_workers)
1227 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1229 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1234 vec_foreach (unique_elt, *unique_drivers)
1236 if (*unique_elt == driver_id)
1244 vec_add1 (*unique_drivers, driver_id);
1249 dpdk_cryptodev_init (vlib_main_t * vm)
1251 cryptodev_main_t *cmt = &cryptodev_main;
1252 vlib_thread_main_t *tm = vlib_get_thread_main ();
1253 cryptodev_engine_thread_t *cet;
1254 cryptodev_numa_data_t *numa_data;
1257 u32 skip_master = vlib_num_workers () > 0;
1258 u32 n_workers = tm->n_vlib_mains - skip_master;
1261 clib_error_t *error;
1263 cmt->iova_mode = rte_eal_iova_mode ();
1265 clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1271 vec_validate (cmt->per_numa_data, nodes);
1272 vec_foreach (numa_data, cmt->per_numa_data)
1274 vec_validate (numa_data->sess_pools, 0);
1277 /* probe all cryptodev devices and get queue info */
1278 if (cryptodev_probe (vm, n_workers) < 0)
1281 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1282 struct rte_cryptodev_info dev_info;
1283 cryptodev_inst_t *dev_inst;
1284 u32 *unique_drivers = 0;
1285 vec_foreach (dev_inst, cmt->cryptodev_inst)
1287 u32 dev_id = dev_inst->dev_id;
1288 rte_cryptodev_info_get (dev_id, &dev_info);
1289 u32 driver_id = dev_info.driver_id;
1290 is_drv_unique (driver_id, &unique_drivers);
1293 rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1294 cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1297 cmt->drivers_cnt = vec_len (unique_drivers);
1298 vec_free (unique_drivers);
1301 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers);
1302 clib_spinlock_init (&cmt->tlock);
1304 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1305 CLIB_CACHE_LINE_BYTES);
1306 for (i = skip_master; i < tm->n_vlib_mains; i++)
1308 cet = cmt->per_thread_data + i;
1310 if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1313 error = clib_error_return (0, "Failed to configure cryptodev");
1318 /* register handler */
1319 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1320 "DPDK Cryptodev Engine");
1322 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1324 if (cryptodev_register_raw_hdl)
1325 error = cryptodev_register_raw_hdl (vm, eidx);
1327 error = cryptodev_register_cop_hdl (vm, eidx);
1332 /* this engine is only enabled when cryptodev device(s) are presented in
1333 * startup.conf. Assume it is wanted to be used, turn on async mode here.
1335 ipsec_set_async_mode (1);