X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fdpdk%2Fcryptodev%2Fcryptodev.c;h=43c2c879aab8650f87f228dda42e727fdcb08557;hb=7cc17f6df9b3f4b45aaac16ba0aa098d6cd58794;hp=f51a5a527dc1c721e020ca8fabf8906b750ab2fc;hpb=ef69b5103438ec099cf080b54a88a5333b1c055b;p=vpp.git diff --git a/src/plugins/dpdk/cryptodev/cryptodev.c b/src/plugins/dpdk/cryptodev/cryptodev.c index f51a5a527dc..43c2c879aab 100644 --- a/src/plugins/dpdk/cryptodev/cryptodev.c +++ b/src/plugins/dpdk/cryptodev/cryptodev.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include @@ -29,141 +29,41 @@ #include #include #include -#include #include +#include "cryptodev.h" + #if CLIB_DEBUG > 0 #define always_inline static inline #else #define always_inline static inline __attribute__ ((__always_inline__)) #endif -#define CRYPTODEV_NB_CRYPTO_OPS 1024 -#define CRYPTODEV_NB_SESSION 10240 -#define CRYPTODEV_DEF_DRIVE crypto_aesni_mb - -#define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv)) -#define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad)) - -/* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN */ -#define foreach_vnet_aead_crypto_conversion \ - _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \ - _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \ - _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \ - _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \ - _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \ - _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12) - -/** - * crypto (alg, cryptodev_alg), hash (alg, digest-size) - **/ -#define foreach_cryptodev_link_async_alg \ - _ (AES_128_CBC, AES_CBC, SHA1, 12) \ - _ (AES_192_CBC, AES_CBC, SHA1, 12) \ - _ (AES_256_CBC, AES_CBC, SHA1, 12) \ - _ (AES_128_CBC, AES_CBC, SHA224, 14) \ - _ (AES_192_CBC, AES_CBC, SHA224, 14) \ - _ (AES_256_CBC, AES_CBC, SHA224, 14) \ - _ (AES_128_CBC, AES_CBC, SHA256, 16) \ - _ (AES_192_CBC, AES_CBC, SHA256, 16) \ - _ (AES_256_CBC, AES_CBC, SHA256, 16) \ - _ (AES_128_CBC, AES_CBC, SHA384, 24) \ - _ (AES_192_CBC, AES_CBC, SHA384, 24) \ - _ (AES_256_CBC, AES_CBC, SHA384, 24) \ - _ (AES_128_CBC, AES_CBC, SHA512, 32) \ - _ (AES_192_CBC, AES_CBC, SHA512, 32) \ - _ (AES_256_CBC, AES_CBC, SHA512, 32) - -#define foreach_vnet_crypto_status_conversion \ - _(SUCCESS, COMPLETED) \ - _(NOT_PROCESSED, WORK_IN_PROGRESS) \ - _(AUTH_FAILED, FAIL_BAD_HMAC) \ - _(INVALID_SESSION, FAIL_ENGINE_ERR) \ - _(INVALID_ARGS, FAIL_ENGINE_ERR) \ - _(ERROR, FAIL_ENGINE_ERR) - -static const vnet_crypto_op_status_t cryptodev_status_conversion[] = { -#define _(a, b) VNET_CRYPTO_OP_STATUS_##b, - foreach_vnet_crypto_status_conversion -#undef _ -}; - -typedef struct -{ - CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - struct rte_crypto_op op; - struct rte_crypto_sym_op sop; - u8 iv[16]; - u8 aad[16]; - vnet_crypto_async_frame_t *frame; - u32 n_elts; -} cryptodev_op_t; - -typedef enum -{ - CRYPTODEV_OP_TYPE_ENCRYPT = 0, - CRYPTODEV_OP_TYPE_DECRYPT, - CRYPTODEV_N_OP_TYPES, -} cryptodev_op_type_t; - -typedef struct -{ - struct rte_cryptodev_sym_session *keys[CRYPTODEV_N_OP_TYPES]; -} cryptodev_key_t; - -typedef struct -{ - u32 dev_id; - u32 q_id; - char *desc; -} cryptodev_inst_t; - -typedef struct -{ - struct rte_mempool *cop_pool; - struct rte_mempool *sess_pool; - struct rte_mempool *sess_priv_pool; -} cryptodev_numa_data_t; - -typedef struct -{ - CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - u16 cryptodev_id; - u16 cryptodev_q; - u32 inflight; - cryptodev_op_t **cops; - struct rte_ring *ring; -} cryptodev_engine_thread_t; - -typedef struct -{ - cryptodev_numa_data_t *per_numa_data; - cryptodev_key_t *keys; - cryptodev_engine_thread_t *per_thread_data; - enum rte_iova_mode iova_mode; - cryptodev_inst_t *cryptodev_inst; - clib_bitmap_t *active_cdev_inst_mask; - clib_spinlock_t tlock; -} cryptodev_main_t; - cryptodev_main_t cryptodev_main; -static int +static_always_inline int prepare_aead_xform (struct rte_crypto_sym_xform *xform, - cryptodev_op_type_t op_type, - const vnet_crypto_key_t * key, u32 aad_len) + cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, + u32 aad_len) { struct rte_crypto_aead_xform *aead_xform = &xform->aead; memset (xform, 0, sizeof (*xform)); xform->type = RTE_CRYPTO_SYM_XFORM_AEAD; xform->next = 0; - if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM && - key->alg != VNET_CRYPTO_ALG_AES_192_GCM && - key->alg != VNET_CRYPTO_ALG_AES_256_GCM) + if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM || + key->alg == VNET_CRYPTO_ALG_AES_192_GCM || + key->alg == VNET_CRYPTO_ALG_AES_256_GCM) + { + aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM; + } + else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305) + { + aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305; + } + else return -1; - aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM; aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ? RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT; aead_xform->aad_length = aad_len; @@ -176,10 +76,10 @@ prepare_aead_xform (struct rte_crypto_sym_xform *xform, return 0; } -static int +static_always_inline int prepare_linked_xform (struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, - const vnet_crypto_key_t * key) + const vnet_crypto_key_t *key) { struct rte_crypto_sym_xform *xform_cipher, *xform_auth; vnet_crypto_key_t *key_cipher, *key_auth; @@ -213,11 +113,11 @@ prepare_linked_xform (struct rte_crypto_sym_xform *xforms, switch (key->async_alg) { -#define _(a, b, c, d) \ - case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\ - cipher_algo = RTE_CRYPTO_CIPHER_##b; \ - auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \ - digest_len = d; \ +#define _(a, b, c, d, e) \ + case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \ + cipher_algo = RTE_CRYPTO_CIPHER_##b; \ + auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC; \ + digest_len = e; \ break; foreach_cryptodev_link_async_alg @@ -240,584 +140,395 @@ prepare_linked_xform (struct rte_crypto_sym_xform *xforms, return 0; } -static int -cryptodev_session_create (vnet_crypto_key_t * const key, - struct rte_mempool *sess_priv_pool, - cryptodev_key_t * session_pair, u32 aad_len) +static_always_inline void +cryptodev_session_del (cryptodev_session_t *sess) { - struct rte_crypto_sym_xform xforms_enc[2] = { {0} }; - struct rte_crypto_sym_xform xforms_dec[2] = { {0} }; - cryptodev_main_t *cmt = &cryptodev_main; - cryptodev_inst_t *dev_inst; - struct rte_cryptodev *cdev; - int ret; - uint8_t dev_id = 0; + u32 n_devs, i; - if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) - ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key); - else - ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, - aad_len); - if (ret) - return 0; + if (sess == NULL) + return; - if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) - prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key); - else - prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len); + n_devs = rte_cryptodev_count (); - vec_foreach (dev_inst, cmt->cryptodev_inst) - { - dev_id = dev_inst->dev_id; - cdev = rte_cryptodev_pmd_get_dev (dev_id); + for (i = 0; i < n_devs; i++) +#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) + if (rte_cryptodev_sym_session_free (i, sess) == 0) + break; +#else + rte_cryptodev_sym_session_clear (i, sess); - /* if the session is already configured for the driver type, avoid - configuring it again to increase the session data's refcnt */ - if (session_pair->keys[0]->sess_data[cdev->driver_id].data && - session_pair->keys[1]->sess_data[cdev->driver_id].data) - continue; + rte_cryptodev_sym_session_free (sess); +#endif +} - ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[0], - xforms_enc, sess_priv_pool); - ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[1], - xforms_dec, sess_priv_pool); - if (ret < 0) - return ret; - } - session_pair->keys[0]->opaque_data = aad_len; - session_pair->keys[1]->opaque_data = aad_len; +static int +check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size) +{ + cryptodev_main_t *cmt = &cryptodev_main; + cryptodev_capability_t *vcap; + u32 *s; + + vec_foreach (vcap, cmt->supported_caps) + { + if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + if (vcap->cipher.algo != algo) + continue; + vec_foreach (s, vcap->cipher.key_sizes) + if (*s == key_size) + return 1; + } return 0; } -static void -cryptodev_session_del (struct rte_cryptodev_sym_session *sess) +static int +check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size) { - u32 n_devs, i; + cryptodev_main_t *cmt = &cryptodev_main; + cryptodev_capability_t *vcap; + u32 *s; - if (sess == NULL) - return; + vec_foreach (vcap, cmt->supported_caps) + { + if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + if (vcap->auth.algo != algo) + continue; + vec_foreach (s, vcap->auth.digest_sizes) + if (*s == digest_size) + return 1; + } - n_devs = rte_cryptodev_count (); + return 0; +} - for (i = 0; i < n_devs; i++) - rte_cryptodev_sym_session_clear (i, sess); +static_always_inline int +check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size, + u32 digest_size, u32 aad_size) +{ + cryptodev_main_t *cmt = &cryptodev_main; + cryptodev_capability_t *vcap; + u32 *s; + u32 key_match = 0, digest_match = 0, aad_match = 0; - rte_cryptodev_sym_session_free (sess); + vec_foreach (vcap, cmt->supported_caps) + { + if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD) + continue; + if (vcap->aead.algo != algo) + continue; + vec_foreach (s, vcap->aead.digest_sizes) + if (*s == digest_size) + { + digest_match = 1; + break; + } + vec_foreach (s, vcap->aead.key_sizes) + if (*s == key_size) + { + key_match = 1; + break; + } + vec_foreach (s, vcap->aead.aad_sizes) + if (*s == aad_size) + { + aad_match = 1; + break; + } + } + + if (key_match == 1 && digest_match == 1 && aad_match == 1) + return 1; + + return 0; } -static int -cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key) +static_always_inline int +cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key) { - vnet_crypto_alg_t alg; - if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) - return 0; + u32 matched = 0; - alg = key->alg; - -#define _(a, b, c, d, e, f) \ - if (alg == VNET_CRYPTO_ALG_##a) \ - return 0; + if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) + { + switch (key->async_alg) + { +#define _(a, b, c, d, e) \ + case VNET_CRYPTO_ALG_##a##_##d##_TAG##e: \ + if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) && \ + check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e)) \ + return 1; + foreach_cryptodev_link_async_alg +#undef _ + default : return 0; + } + return 0; + } +#define _(a, b, c, d, e, f, g) \ + if (key->alg == VNET_CRYPTO_ALG_##a) \ + { \ + if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f)) \ + matched++; \ + } foreach_vnet_aead_crypto_conversion #undef _ - return -1; + + if (matched < 2) return 0; + + return 1; } -static_always_inline void -cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop, +void +cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len) { cryptodev_main_t *cmt = &cryptodev_main; - cryptodev_numa_data_t *numa_data; vnet_crypto_key_t *key = vnet_crypto_get_key (idx); - struct rte_mempool *sess_pool, *sess_priv_pool; cryptodev_key_t *ckey = 0; - int ret = 0; + u32 i; - if (kop == VNET_CRYPTO_KEY_OP_DEL) - { - if (idx >= vec_len (cmt->keys)) - return; + vec_validate (cmt->keys, idx); + ckey = vec_elt_at_index (cmt->keys, idx); - ckey = pool_elt_at_index (cmt->keys, idx); - cryptodev_session_del (ckey->keys[0]); - cryptodev_session_del (ckey->keys[1]); - ckey->keys[0] = 0; - ckey->keys[1] = 0; - pool_put (cmt->keys, ckey); - return; - } - else if (kop == VNET_CRYPTO_KEY_OP_MODIFY) + if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY) { if (idx >= vec_len (cmt->keys)) return; - ckey = pool_elt_at_index (cmt->keys, idx); + vec_foreach_index (i, cmt->per_numa_data) + { + if (!ckey->keys) + continue; + if (!ckey->keys[i]) + continue; + if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]) + { + cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]); + cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]); - cryptodev_session_del (ckey->keys[0]); - cryptodev_session_del (ckey->keys[1]); - ckey->keys[0] = 0; - ckey->keys[1] = 0; + CLIB_MEMORY_STORE_BARRIER (); + ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0; + ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0; + } + } + return; } - else /* create key */ - pool_get_zero (cmt->keys, ckey); + + /* create key */ /* do not create session for unsupported alg */ - if (cryptodev_check_supported_vnet_alg (key)) + if (cryptodev_check_supported_vnet_alg (key) == 0) return; - numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node); - sess_pool = numa_data->sess_pool; - sess_priv_pool = numa_data->sess_priv_pool; - - ckey->keys[0] = rte_cryptodev_sym_session_create (sess_pool); - if (!ckey->keys[0]) - { - ret = -1; - goto clear_key; - } - - ckey->keys[1] = rte_cryptodev_sym_session_create (sess_pool); - if (!ckey->keys[1]) - { - ret = -1; - goto clear_key; - } - - ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len); - -clear_key: - if (ret != 0) - { - cryptodev_session_del (ckey->keys[0]); - cryptodev_session_del (ckey->keys[1]); - memset (ckey, 0, sizeof (*ckey)); - pool_put (cmt->keys, ckey); - } + vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1); + vec_foreach_index (i, ckey->keys) + vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1); } /*static*/ void -cryptodev_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop, +cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx) { cryptodev_sess_handler (vm, kop, idx, 8); } -static_always_inline void -cryptodev_mark_frame_err_status (vnet_crypto_async_frame_t * f, - vnet_crypto_op_status_t s) -{ - u32 n_elts = f->n_elts, i; - - for (i = 0; i < n_elts; i++) - f->elts[i].status = s; -} - -static_always_inline rte_iova_t -cryptodev_get_iova (clib_pmalloc_main_t * pm, enum rte_iova_mode mode, - void *data) -{ - u64 index; - if (mode == RTE_IOVA_VA) - return (rte_iova_t) pointer_to_uword (data); - - index = clib_pmalloc_get_page_index (pm, data); - return pointer_to_uword (data) - pm->lookup_table[index]; -} - -static_always_inline void -cryptodev_validate_mbuf_chain (vlib_main_t * vm, struct rte_mbuf *mb, - vlib_buffer_t * b) +clib_error_t * +allocate_session_pools (u32 numa_node, + cryptodev_session_pool_t *sess_pools_elt, u32 len) { - struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */ - /* when input node is not dpdk, mbuf data len is not initialized, for - * single buffer it is not a problem since the data length is written - * into cryptodev operation. For chained buffer a reference data length - * has to be computed through vlib_buffer. - * - * even when input node is dpdk, it is possible chained vlib_buffers - * are updated (either added or removed a buffer) but not not mbuf fields. - * we have to re-link every mbuf in the chain. - */ - u16 data_len = b->current_length + (b->data + b->current_data - - rte_pktmbuf_mtod (mb, u8 *)); + cryptodev_main_t *cmt = &cryptodev_main; + u8 *name; + clib_error_t *error = NULL; - first_mb->nb_segs = 1; - first_mb->pkt_len = first_mb->data_len = data_len; + name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0); +#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) + sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create ( + (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node); +#else + sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create ( + (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node); +#endif - while (b->flags & VLIB_BUFFER_NEXT_PRESENT) + if (!sess_pools_elt->sess_pool) { - b = vlib_get_buffer (vm, b->next_buffer); - mb = rte_mbuf_from_vlib_buffer (b); - if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0)) - rte_pktmbuf_reset (mb); - last_mb->next = mb; - last_mb = mb; - mb->data_len = b->current_length; - mb->pkt_len = b->current_length; - mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data; - first_mb->nb_segs++; - if (PREDICT_FALSE (b->ref_count > 1)) - mb->pool = - dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index]; + error = clib_error_return (0, "Not enough memory for mp %s", name); + goto clear_mempools; } -} - -static_always_inline int -cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm, - vnet_crypto_async_frame_t * frame, - cryptodev_op_type_t op_type) -{ - cryptodev_main_t *cmt = &cryptodev_main; - clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main; - cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node; - cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index; - vnet_crypto_async_frame_elt_t *fe; - cryptodev_op_t **cop; - u32 *bi; - u32 n_enqueue, n_elts; - cryptodev_key_t *key; - u32 last_key_index; - - if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0)) - return -1; - n_elts = frame->n_elts; + vec_free (name); - if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts)) - { - cryptodev_mark_frame_err_status (frame, - VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); - return -1; - } +#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) + name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0); + sess_pools_elt->sess_priv_pool = rte_mempool_create ( + (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0, + 0, NULL, NULL, NULL, NULL, numa_node, 0); - if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool, - (void **) cet->cops, n_elts) < 0)) + if (!sess_pools_elt->sess_priv_pool) { - cryptodev_mark_frame_err_status (frame, - VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); - return -1; + error = clib_error_return (0, "Not enough memory for mp %s", name); + goto clear_mempools; } + vec_free (name); +#endif - cop = cet->cops; - fe = frame->elts; - bi = frame->buffer_indices; - cop[0]->frame = frame; - cop[0]->n_elts = n_elts; - - key = pool_elt_at_index (cmt->keys, fe->key_index); - last_key_index = fe->key_index; - - while (n_elts) +clear_mempools: + if (error) { - vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]); - struct rte_crypto_sym_op *sop = &cop[0]->sop; - i16 crypto_offset = fe->crypto_start_offset; - i16 integ_offset = fe->integ_start_offset; - u32 offset_diff = crypto_offset - integ_offset; - - if (n_elts > 2) - { - CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE); - CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE); - CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD); - } - if (last_key_index != fe->key_index) - { - key = pool_elt_at_index (cmt->keys, fe->key_index); - last_key_index = fe->key_index; - } - - sop->m_src = rte_mbuf_from_vlib_buffer (b); - sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE; - sop->m_dst = 0; - /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes, - * so we have to manually adjust mbuf data_off here so cryptodev can - * correctly compute the data pointer. The prepend here will be later - * rewritten by tx. */ - if (PREDICT_TRUE (fe->integ_start_offset < 0)) - { - sop->m_src->data_off += fe->integ_start_offset; - integ_offset = 0; - crypto_offset = offset_diff; - } - sop->session = key->keys[op_type]; - sop->cipher.data.offset = crypto_offset; - sop->cipher.data.length = fe->crypto_total_length; - sop->auth.data.offset = integ_offset; - sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj; - sop->auth.digest.data = fe->digest; - sop->auth.digest.phys_addr = cryptodev_get_iova (pm, cmt->iova_mode, - fe->digest); - if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)) - cryptodev_validate_mbuf_chain (vm, sop->m_src, b); - else - /* for input nodes that are not dpdk-input, it is possible the mbuf - * was updated before as one of the chained mbufs. Setting nb_segs - * to 1 here to prevent the cryptodev PMD to access potentially - * invalid m_src->next pointers. - */ - sop->m_src->nb_segs = 1; - clib_memcpy_fast (cop[0]->iv, fe->iv, 16); - cop++; - bi++; - fe++; - n_elts--; + vec_free (name); + if (sess_pools_elt->sess_pool) + rte_mempool_free (sess_pools_elt->sess_pool); +#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) + if (sess_pools_elt->sess_priv_pool) + rte_mempool_free (sess_pools_elt->sess_priv_pool); +#endif + return error; } - - n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id, - cet->cryptodev_q, - (struct rte_crypto_op **) - cet->cops, frame->n_elts); - ASSERT (n_enqueue == frame->n_elts); - cet->inflight += n_enqueue; - return 0; } -static_always_inline int -cryptodev_frame_gcm_enqueue (vlib_main_t * vm, - vnet_crypto_async_frame_t * frame, - cryptodev_op_type_t op_type, u8 aad_len) +int +cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx, + u32 aad_len) { cryptodev_main_t *cmt = &cryptodev_main; - clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main; - cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node; - cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index; - vnet_crypto_async_frame_elt_t *fe; - cryptodev_op_t **cop; - u32 *bi; - u32 n_enqueue = 0, n_elts; - cryptodev_key_t *key; - u32 last_key_index; - u8 sess_aad_len; - - if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0)) - return -1; - n_elts = frame->n_elts; - - if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts)) - { - cryptodev_mark_frame_err_status (frame, - VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); - return -1; - } - - if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool, - (void **) cet->cops, n_elts) < 0)) - { - cryptodev_mark_frame_err_status (frame, - VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); - return -1; - } - - cop = cet->cops; - fe = frame->elts; - bi = frame->buffer_indices; - cop[0]->frame = frame; - cop[0]->n_elts = n_elts; + cryptodev_numa_data_t *numa_data; + cryptodev_inst_t *dev_inst; + vnet_crypto_key_t *key = vnet_crypto_get_key (idx); + struct rte_mempool *sess_pool; + cryptodev_session_pool_t *sess_pools_elt; + cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx); + struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } }; + struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } }; + cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 }; +#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) + struct rte_mempool *sess_priv_pool; + struct rte_cryptodev_info dev_info; +#endif + u32 numa_node = vm->numa_node; + clib_error_t *error; + int ret = 0; + u8 found = 0; - key = pool_elt_at_index (cmt->keys, fe->key_index); - last_key_index = fe->key_index; - sess_aad_len = (u8) key->keys[op_type]->opaque_data; - if (PREDICT_FALSE (sess_aad_len != aad_len)) - cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY, - fe->key_index, aad_len); + numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node); - while (n_elts) + clib_spinlock_lock (&cmt->tlock); + vec_foreach (sess_pools_elt, numa_data->sess_pools) { - vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]); - struct rte_crypto_sym_op *sop = &cop[0]->sop; - u16 crypto_offset = fe->crypto_start_offset; - - if (n_elts > 2) - { - CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE); - CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE); - CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD); - } - if (last_key_index != fe->key_index) + if (sess_pools_elt->sess_pool == NULL) { - key = pool_elt_at_index (cmt->keys, fe->key_index); - sess_aad_len = (u8) key->keys[op_type]->opaque_data; - if (PREDICT_FALSE (sess_aad_len != aad_len)) + error = allocate_session_pools (numa_node, sess_pools_elt, + vec_len (numa_data->sess_pools) - 1); + if (error) { - cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY, - fe->key_index, aad_len); + ret = -1; + goto clear_key; } - last_key_index = fe->key_index; } - - sop->m_src = rte_mbuf_from_vlib_buffer (b); - sop->m_dst = 0; - /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes, - * so we have to manually adjust mbuf data_off here so cryptodev can - * correctly compute the data pointer. The prepend here will be later - * rewritten by tx. */ - if (PREDICT_FALSE (fe->crypto_start_offset < 0)) + if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2) { - rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset); - crypto_offset = 0; + found = 1; + break; } - - sop->session = key->keys[op_type]; - sop->aead.aad.data = cop[0]->aad; - sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET; - sop->aead.data.length = fe->crypto_total_length; - sop->aead.data.offset = crypto_offset; - sop->aead.digest.data = fe->tag; - sop->aead.digest.phys_addr = cryptodev_get_iova (pm, cmt->iova_mode, - fe->tag); - if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)) - cryptodev_validate_mbuf_chain (vm, sop->m_src, b); - else - /* for input nodes that are not dpdk-input, it is possible the mbuf - * was updated before as one of the chained mbufs. Setting nb_segs - * to 1 here to prevent the cryptodev PMD to access potentially - * invalid m_src->next pointers. - */ - sop->m_src->nb_segs = 1; - clib_memcpy_fast (cop[0]->iv, fe->iv, 12); - clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len); - cop++; - bi++; - fe++; - n_elts--; } - n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id, - cet->cryptodev_q, - (struct rte_crypto_op **) - cet->cops, frame->n_elts); - ASSERT (n_enqueue == frame->n_elts); - cet->inflight += n_enqueue; - - return 0; -} - -static_always_inline cryptodev_op_t * -cryptodev_get_ring_head (struct rte_ring * ring) -{ - cryptodev_op_t **r = (void *) &ring[1]; - return r[ring->cons.head & ring->mask]; -} - -static_always_inline vnet_crypto_async_frame_t * -cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed, - u32 * enqueue_thread_idx) -{ - cryptodev_main_t *cmt = &cryptodev_main; - cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node; - cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index; - cryptodev_op_t *cop0, **cop = cet->cops; - vnet_crypto_async_frame_elt_t *fe; - vnet_crypto_async_frame_t *frame; - u32 n_elts, n_completed_ops = rte_ring_count (cet->ring); - u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */ - - if (cet->inflight) + if (found == 0) { - n_elts = clib_min (CRYPTODEV_NB_CRYPTO_OPS - n_completed_ops, - VNET_CRYPTO_FRAME_SIZE); - n_elts = rte_cryptodev_dequeue_burst - (cet->cryptodev_id, cet->cryptodev_q, - (struct rte_crypto_op **) cet->cops, n_elts); - cet->inflight -= n_elts; - n_completed_ops += n_elts; - - rte_ring_sp_enqueue_burst (cet->ring, (void *) cet->cops, n_elts, NULL); + vec_add2 (numa_data->sess_pools, sess_pools_elt, 1); + error = allocate_session_pools (numa_node, sess_pools_elt, + vec_len (numa_data->sess_pools) - 1); + if (error) + { + ret = -1; + goto clear_key; + } } - if (PREDICT_FALSE (n_completed_ops == 0)) - return 0; + sess_pool = sess_pools_elt->sess_pool; +#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) + sess_priv_pool = sess_pools_elt->sess_priv_pool; - cop0 = cryptodev_get_ring_head (cet->ring); - /* not a single frame is finished */ - if (PREDICT_FALSE (cop0->n_elts > rte_ring_count (cet->ring))) - return 0; + sessions[CRYPTODEV_OP_TYPE_ENCRYPT] = + rte_cryptodev_sym_session_create (sess_pool); - frame = cop0->frame; - n_elts = cop0->n_elts; - n_elts = rte_ring_sc_dequeue_bulk (cet->ring, (void **) cet->cops, - n_elts, 0); - fe = frame->elts; + sessions[CRYPTODEV_OP_TYPE_DECRYPT] = + rte_cryptodev_sym_session_create (sess_pool); +#endif - while (n_elts > 4) + if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) + ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key); + else + ret = + prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len); + if (ret) { - ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status]; - ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status]; - ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status]; - ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status]; - - cop += 4; - fe += 4; - n_elts -= 4; + ret = -1; + goto clear_key; } - while (n_elts) + if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) + prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key); + else + prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len); + +#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) + dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0); + u32 dev_id = dev_inst->dev_id; + sessions[CRYPTODEV_OP_TYPE_ENCRYPT] = + rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool); + sessions[CRYPTODEV_OP_TYPE_DECRYPT] = + rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool); + if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] || + !sessions[CRYPTODEV_OP_TYPE_DECRYPT]) { - ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status]; - fe++; - cop++; - n_elts--; + ret = -1; + goto clear_key; } - frame->state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ? - VNET_CRYPTO_FRAME_STATE_SUCCESS : VNET_CRYPTO_FRAME_STATE_ELT_ERROR; - - rte_mempool_put_bulk (numa->cop_pool, (void **) cet->cops, frame->n_elts); - *nb_elts_processed = frame->n_elts; - *enqueue_thread_idx = frame->enqueue_thread_index; - return frame; -} + rte_cryptodev_sym_session_opaque_data_set ( + sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len); + rte_cryptodev_sym_session_opaque_data_set ( + sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len); +#else + vec_foreach (dev_inst, cmt->cryptodev_inst) + { + u32 dev_id = dev_inst->dev_id; + rte_cryptodev_info_get (dev_id, &dev_info); + u32 driver_id = dev_info.driver_id; + + /* if the session is already configured for the driver type, avoid + configuring it again to increase the session data's refcnt */ + if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data && + sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data) + continue; -/* *INDENT-OFF* */ -static_always_inline int -cryptodev_enqueue_gcm_aad_8_enc (vlib_main_t * vm, - vnet_crypto_async_frame_t * frame) -{ - return cryptodev_frame_gcm_enqueue (vm, frame, - CRYPTODEV_OP_TYPE_ENCRYPT, 8); -} -static_always_inline int -cryptodev_enqueue_gcm_aad_12_enc (vlib_main_t * vm, - vnet_crypto_async_frame_t * frame) -{ - return cryptodev_frame_gcm_enqueue (vm, frame, - CRYPTODEV_OP_TYPE_ENCRYPT, 12); -} + ret = rte_cryptodev_sym_session_init ( + dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc, + sess_priv_pool); + ret = rte_cryptodev_sym_session_init ( + dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec, + sess_priv_pool); + if (ret < 0) + goto clear_key; + } -static_always_inline int -cryptodev_enqueue_gcm_aad_8_dec (vlib_main_t * vm, - vnet_crypto_async_frame_t * frame) -{ - return cryptodev_frame_gcm_enqueue (vm, frame, - CRYPTODEV_OP_TYPE_DECRYPT, 8); -} -static_always_inline int -cryptodev_enqueue_gcm_aad_12_dec (vlib_main_t * vm, - vnet_crypto_async_frame_t * frame) -{ - return cryptodev_frame_gcm_enqueue (vm, frame, - CRYPTODEV_OP_TYPE_DECRYPT, 12); -} + sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len; + sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len; +#endif -static_always_inline int -cryptodev_enqueue_linked_alg_enc (vlib_main_t * vm, - vnet_crypto_async_frame_t * frame) -{ - return cryptodev_frame_linked_algs_enqueue (vm, frame, - CRYPTODEV_OP_TYPE_ENCRYPT); -} + CLIB_MEMORY_STORE_BARRIER (); + ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] = + sessions[CRYPTODEV_OP_TYPE_ENCRYPT]; + ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] = + sessions[CRYPTODEV_OP_TYPE_DECRYPT]; -static_always_inline int -cryptodev_enqueue_linked_alg_dec (vlib_main_t * vm, - vnet_crypto_async_frame_t * frame) -{ - return cryptodev_frame_linked_algs_enqueue (vm, frame, - CRYPTODEV_OP_TYPE_DECRYPT); +clear_key: + if (ret != 0) + { + cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]); + cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]); + } + clib_spinlock_unlock (&cmt->tlock); + return ret; } typedef enum @@ -866,15 +577,16 @@ cryptodev_assign_resource (cryptodev_engine_thread_t * cet, if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index) == 1) return -EBUSY; + vec_foreach_index (idx, cmt->cryptodev_inst) - { - cinst = cmt->cryptodev_inst + idx; - if (cinst->dev_id == cet->cryptodev_id && - cinst->q_id == cet->cryptodev_q) - break; - } + { + cinst = cmt->cryptodev_inst + idx; + if (cinst->dev_id == cet->cryptodev_id && + cinst->q_id == cet->cryptodev_q) + break; + } /* invalid existing worker resource assignment */ - if (idx == vec_len (cmt->cryptodev_inst)) + if (idx >= vec_len (cmt->cryptodev_inst)) return -EINVAL; clib_spinlock_lock (&cmt->tlock); clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0); @@ -941,6 +653,10 @@ cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input, vec_foreach_index (inst, cmt->cryptodev_inst) vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst); + if (cmt->is_raw_api) + vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API"); + else + vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API"); return 0; } @@ -950,6 +666,90 @@ VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = { .function = cryptodev_show_assignment_fn, }; +static clib_error_t * +cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input, + vlib_cli_command_t *cmd) +{ + cryptodev_main_t *cmt = &cryptodev_main; + u32 thread_index = 0; + u16 i; + vec_foreach_index (thread_index, cmt->per_thread_data) + { + cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index; + cryptodev_cache_ring_t *ring = &cet->cache_ring; + u16 head = ring->head; + u16 tail = ring->tail; + u16 n_cached = (CRYPTODEV_CACHE_QUEUE_SIZE - tail + head) & + CRYPTODEV_CACHE_QUEUE_MASK; + + u16 enq_head = ring->enq_head; + u16 deq_tail = ring->deq_tail; + u16 n_frames_inflight = + (enq_head == deq_tail) ? + 0 : + ((CRYPTODEV_CACHE_QUEUE_SIZE + enq_head - deq_tail) & + CRYPTODEV_CACHE_QUEUE_MASK); + /* even if some elements of dequeued frame are still pending for deq + * we consider the frame as processed */ + u16 n_frames_processed = + ((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ? + 0 : + ((CRYPTODEV_CACHE_QUEUE_SIZE - tail + deq_tail) & + CRYPTODEV_CACHE_QUEUE_MASK) + + 1; + /* even if some elements of enqueued frame are still pending for enq + * we consider the frame as enqueued */ + u16 n_frames_pending = + (head == enq_head) ? 0 : + ((CRYPTODEV_CACHE_QUEUE_SIZE - enq_head + head) & + CRYPTODEV_CACHE_QUEUE_MASK) - + 1; + + u16 elts_to_enq = + (ring->frames[enq_head].n_elts - ring->frames[enq_head].enq_elts_head); + u16 elts_to_deq = + (ring->frames[deq_tail].n_elts - ring->frames[deq_tail].deq_elts_tail); + + u32 elts_total = 0; + + for (i = 0; i < CRYPTODEV_CACHE_QUEUE_SIZE; i++) + elts_total += ring->frames[i].n_elts; + + if (vlib_num_workers () > 0 && thread_index == 0) + continue; + + vlib_cli_output (vm, "\n\n"); + vlib_cli_output (vm, "Frames cached in the ring: %u", n_cached); + vlib_cli_output (vm, "Frames cached but not processed: %u", + n_frames_pending); + vlib_cli_output (vm, "Frames inflight: %u", n_frames_inflight); + vlib_cli_output (vm, "Frames processed: %u", n_frames_processed); + vlib_cli_output (vm, "Elements total: %u", elts_total); + vlib_cli_output (vm, "Elements inflight: %u", cet->inflight); + vlib_cli_output (vm, "Head index: %u", head); + vlib_cli_output (vm, "Tail index: %u", tail); + vlib_cli_output (vm, "Current frame index beeing enqueued: %u", + enq_head); + vlib_cli_output (vm, "Current frame index being dequeued: %u", deq_tail); + vlib_cli_output (vm, + "Elements in current frame to be enqueued: %u, waiting " + "to be enqueued: %u", + ring->frames[enq_head].n_elts, elts_to_enq); + vlib_cli_output (vm, + "Elements in current frame to be dequeued: %u, waiting " + "to be dequeued: %u", + ring->frames[deq_tail].n_elts, elts_to_deq); + vlib_cli_output (vm, "\n\n"); + } + return 0; +} + +VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = { + .path = "show cryptodev cache status", + .short_help = "show status of all cryptodev cache rings", + .function = cryptodev_show_cache_rings_fn, +}; + static clib_error_t * cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) @@ -1005,8 +805,8 @@ cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input, CRYPTODEV_RESOURCE_ASSIGN_UPDATE); if (ret) { - error = clib_error_return (0, "cryptodev_assign_resource returned %i", - ret); + error = + clib_error_return (0, "cryptodev_assign_resource returned %d", ret); return error; } @@ -1020,48 +820,6 @@ VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = { .function = cryptodev_set_assignment_fn, }; -static int -check_cryptodev_alg_support (u32 dev_id) -{ - const struct rte_cryptodev_symmetric_capability *cap; - struct rte_cryptodev_sym_capability_idx cap_idx; - -#define _(a, b, c, d, e, f) \ - cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \ - cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \ - cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ - if (!cap) \ - return -RTE_CRYPTO_##b##_##c; \ - else \ - { \ - if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \ - return -RTE_CRYPTO_##b##_##c; \ - if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \ - return -RTE_CRYPTO_##b##_##c; \ - if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \ - return -RTE_CRYPTO_##b##_##c; \ - } - - foreach_vnet_aead_crypto_conversion -#undef _ - -#define _(a, b, c, d) \ - cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \ - cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \ - cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ - if (!cap) \ - return -RTE_CRYPTO_CIPHER_##b; \ - cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \ - cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \ - cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ - if (!cap) \ - return -RTE_CRYPTO_AUTH_##c; - - foreach_cryptodev_link_async_alg -#undef _ - return 0; -} - static u32 cryptodev_count_queue (u32 numa) { @@ -1072,15 +830,6 @@ cryptodev_count_queue (u32 numa) for (i = 0; i < n_cryptodev; i++) { rte_cryptodev_info_get (i, &info); - if (rte_cryptodev_socket_id (i) != numa) - { - clib_warning ("DPDK crypto resource %s is in different numa node " - "as %u, ignored", info.device->name, numa); - continue; - } - /* only device support symmetric crypto is used */ - if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO)) - continue; q_count += info.max_nb_queue_pairs; } @@ -1088,70 +837,83 @@ cryptodev_count_queue (u32 numa) } static int -cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id) +cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id) { + struct rte_cryptodev_config cfg; struct rte_cryptodev_info info; - struct rte_cryptodev *cdev; cryptodev_main_t *cmt = &cryptodev_main; - cryptodev_numa_data_t *numa_data = vec_elt_at_index (cmt->per_numa_data, - vm->numa_node); u32 i; int ret; rte_cryptodev_info_get (cryptodev_id, &info); - /* do not configure the device that does not support symmetric crypto */ + /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices + anymore. Only devices that have the same driver type as the first + initialized device can be initialized. + */ +#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) + if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id) + return -1; +#endif + if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO)) - return 0; + return -1; - ret = check_cryptodev_alg_support (cryptodev_id); - if (ret != 0) - return ret; + cfg.socket_id = info.device->numa_node; + cfg.nb_queue_pairs = info.max_nb_queue_pairs; - cdev = rte_cryptodev_pmd_get_dev (cryptodev_id); - /** If the device is already started, we reuse it, otherwise configure - * both the device and queue pair. - **/ - if (!cdev->data->dev_started) - { - struct rte_cryptodev_config cfg; + rte_cryptodev_configure (cryptodev_id, &cfg); - cfg.socket_id = vm->numa_node; - cfg.nb_queue_pairs = info.max_nb_queue_pairs; + for (i = 0; i < info.max_nb_queue_pairs; i++) + { + struct rte_cryptodev_qp_conf qp_cfg; - rte_cryptodev_configure (cryptodev_id, &cfg); + qp_cfg.mp_session = 0; +#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) + qp_cfg.mp_session_private = 0; +#endif + qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS; - for (i = 0; i < info.max_nb_queue_pairs; i++) + ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg, + info.device->numa_node); + if (ret) { - struct rte_cryptodev_qp_conf qp_cfg; + clib_warning ("Cryptodev: Configure device %u queue %u failed %d", + cryptodev_id, i, ret); + break; + } + } - int ret; + if (i != info.max_nb_queue_pairs) + return -1; - qp_cfg.mp_session = numa_data->sess_pool; - qp_cfg.mp_session_private = numa_data->sess_priv_pool; - qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS; + /* start the device */ + rte_cryptodev_start (cryptodev_id); - ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg, - vm->numa_node); - if (ret) - break; - } - if (i != info.max_nb_queue_pairs) - return -1; - /* start the device */ - rte_cryptodev_start (i); +#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) + if (cmt->drivers_cnt == 0) + { + cmt->drivers_cnt = 1; + cmt->driver_id = info.driver_id; + cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id); } +#endif - for (i = 0; i < cdev->data->nb_queue_pairs; i++) + for (i = 0; i < info.max_nb_queue_pairs; i++) { cryptodev_inst_t *cdev_inst; +#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) + const char *dev_name = rte_dev_name (info.device); +#else + const char *dev_name = info.device->name; +#endif vec_add2(cmt->cryptodev_inst, cdev_inst, 1); - cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10); + cdev_inst->desc = vec_new (char, strlen (dev_name) + 10); cdev_inst->dev_id = cryptodev_id; cdev_inst->q_id = i; - snprintf (cdev_inst->desc, strlen (info.device->name) + 9, - "%s_q%u", info.device->name, i); + snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u", + info.device->name, i); } return 0; @@ -1171,236 +933,430 @@ cryptodev_cmp (void *v1, void *v2) } static int -cryptodev_probe (vlib_main_t *vm, u32 n_workers) +cryptodev_supports_param_value (u32 *params, u32 param_value) +{ + u32 *value; + vec_foreach (value, params) + { + if (*value == param_value) + return 1; + } + return 0; +} + +int +cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx, + u32 key_size, u32 digest_size, u32 aad_size) { cryptodev_main_t *cmt = &cryptodev_main; - u32 n_queues = cryptodev_count_queue (vm->numa_node); - u32 i; - int ret; + cryptodev_capability_t *cap; + vec_foreach (cap, cmt->supported_caps) + { - /* If there is not enough queues, exit */ - if (n_queues < n_workers) - return -1; + if (cap->xform_type != idx->type) + continue; - for (i = 0; i < rte_cryptodev_count (); i++) + if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH && + cap->auth.algo == idx->algo.auth && + cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size)) + return 1; + + if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + cap->cipher.algo == idx->algo.cipher && + cryptodev_supports_param_value (cap->cipher.key_sizes, key_size)) + return 1; + + if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD && + cap->aead.algo == idx->algo.aead && + cryptodev_supports_param_value (cap->aead.key_sizes, key_size) && + cryptodev_supports_param_value (cap->aead.digest_sizes, + digest_size) && + cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size)) + return 1; + } + return 0; +} + +static void +remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min, + u32 param_size_max, u32 increment) +{ + u32 i = 0; + u32 cap_param_size; + + while (i < vec_len (*param_sizes)) { - ret = cryptodev_configure (vm, i); - if (ret) - return ret; + u32 found_param = 0; + for (cap_param_size = param_size_min; cap_param_size <= param_size_max; + cap_param_size += increment) + { + if ((*param_sizes)[i] == cap_param_size) + { + found_param = 1; + break; + } + if (increment == 0) + break; + } + if (!found_param) + /* no such param_size in cap so delete this size in temp_cap params */ + vec_delete (*param_sizes, 1, i); + else + i++; } +} - vec_sort_with_function(cmt->cryptodev_inst, cryptodev_cmp); +static void +cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id) +{ + cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id]; - return 0; + switch (temp_cap.xform_type) + { + case RTE_CRYPTO_SYM_XFORM_AUTH: + vec_free (temp_cap.auth.digest_sizes); + break; + case RTE_CRYPTO_SYM_XFORM_CIPHER: + vec_free (temp_cap.cipher.key_sizes); + break; + case RTE_CRYPTO_SYM_XFORM_AEAD: + vec_free (temp_cap.aead.key_sizes); + vec_free (temp_cap.aead.aad_sizes); + vec_free (temp_cap.aead.digest_sizes); + break; + default: + break; + } + vec_delete (*temp_caps, 1, temp_cap_id); } -static int -cryptodev_get_session_sz (vlib_main_t *vm, uint32_t n_workers) +static u32 +cryptodev_remove_unsupported_param_sizes ( + cryptodev_capability_t *temp_cap, + const struct rte_cryptodev_capabilities *dev_caps) { - u32 sess_data_sz = 0, i; + u32 cap_found = 0; + const struct rte_cryptodev_capabilities *cap = &dev_caps[0]; - if (rte_cryptodev_count () == 0) - return -1; + while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) + { + if (cap->sym.xform_type == temp_cap->xform_type) + switch (cap->sym.xform_type) + { + case RTE_CRYPTO_SYM_XFORM_CIPHER: + if (cap->sym.cipher.algo == temp_cap->cipher.algo) + { + remove_unsupported_param_size ( + &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min, + cap->sym.cipher.key_size.max, + cap->sym.cipher.key_size.increment); + if (vec_len (temp_cap->cipher.key_sizes) > 0) + cap_found = 1; + } + break; + case RTE_CRYPTO_SYM_XFORM_AUTH: + if (cap->sym.auth.algo == temp_cap->auth.algo) + { + remove_unsupported_param_size ( + &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min, + cap->sym.auth.digest_size.max, + cap->sym.auth.digest_size.increment); + if (vec_len (temp_cap->auth.digest_sizes) > 0) + cap_found = 1; + } + break; + case RTE_CRYPTO_SYM_XFORM_AEAD: + if (cap->sym.aead.algo == temp_cap->aead.algo) + { + remove_unsupported_param_size ( + &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min, + cap->sym.aead.key_size.max, + cap->sym.aead.key_size.increment); + remove_unsupported_param_size ( + &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min, + cap->sym.aead.aad_size.max, + cap->sym.aead.aad_size.increment); + remove_unsupported_param_size ( + &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min, + cap->sym.aead.digest_size.max, + cap->sym.aead.digest_size.increment); + if (vec_len (temp_cap->aead.key_sizes) > 0 && + vec_len (temp_cap->aead.aad_sizes) > 0 && + vec_len (temp_cap->aead.digest_sizes) > 0) + cap_found = 1; + } + break; + default: + break; + } + if (cap_found) + break; + cap++; + } - for (i = 0; i < rte_cryptodev_count (); i++) + return cap_found; +} + +static void +cryptodev_get_common_capabilities () +{ + cryptodev_main_t *cmt = &cryptodev_main; + cryptodev_inst_t *dev_inst; + struct rte_cryptodev_info dev_info; + u32 previous_dev_id, dev_id; + u32 cap_id = 0; + u32 param; + cryptodev_capability_t tmp_cap; + const struct rte_cryptodev_capabilities *cap; + const struct rte_cryptodev_capabilities *dev_caps; + + clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t)); + if (vec_len (cmt->cryptodev_inst) == 0) + return; + dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0); + rte_cryptodev_info_get (dev_inst->dev_id, &dev_info); + cap = &dev_info.capabilities[0]; + + /*init capabilities vector*/ + while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) { - u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i); + if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + { + cap++; + continue; + } - sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz; + tmp_cap.xform_type = cap->sym.xform_type; + switch (cap->sym.xform_type) + { + case RTE_CRYPTO_SYM_XFORM_CIPHER: + tmp_cap.cipher.key_sizes = 0; + tmp_cap.cipher.algo = cap->sym.cipher.algo; + for (param = cap->sym.cipher.key_size.min; + param <= cap->sym.cipher.key_size.max; + param += cap->sym.cipher.key_size.increment) + { + vec_add1 (tmp_cap.cipher.key_sizes, param); + if (cap->sym.cipher.key_size.increment == 0) + break; + } + break; + case RTE_CRYPTO_SYM_XFORM_AUTH: + tmp_cap.auth.algo = cap->sym.auth.algo; + tmp_cap.auth.digest_sizes = 0; + for (param = cap->sym.auth.digest_size.min; + param <= cap->sym.auth.digest_size.max; + param += cap->sym.auth.digest_size.increment) + { + vec_add1 (tmp_cap.auth.digest_sizes, param); + if (cap->sym.auth.digest_size.increment == 0) + break; + } + break; + case RTE_CRYPTO_SYM_XFORM_AEAD: + tmp_cap.aead.key_sizes = 0; + tmp_cap.aead.aad_sizes = 0; + tmp_cap.aead.digest_sizes = 0; + tmp_cap.aead.algo = cap->sym.aead.algo; + for (param = cap->sym.aead.key_size.min; + param <= cap->sym.aead.key_size.max; + param += cap->sym.aead.key_size.increment) + { + vec_add1 (tmp_cap.aead.key_sizes, param); + if (cap->sym.aead.key_size.increment == 0) + break; + } + for (param = cap->sym.aead.aad_size.min; + param <= cap->sym.aead.aad_size.max; + param += cap->sym.aead.aad_size.increment) + { + vec_add1 (tmp_cap.aead.aad_sizes, param); + if (cap->sym.aead.aad_size.increment == 0) + break; + } + for (param = cap->sym.aead.digest_size.min; + param <= cap->sym.aead.digest_size.max; + param += cap->sym.aead.digest_size.increment) + { + vec_add1 (tmp_cap.aead.digest_sizes, param); + if (cap->sym.aead.digest_size.increment == 0) + break; + } + break; + default: + break; + } + + vec_add1 (cmt->supported_caps, tmp_cap); + cap++; } - return sess_data_sz; + while (cap_id < vec_len (cmt->supported_caps)) + { + u32 cap_is_supported = 1; + previous_dev_id = cmt->cryptodev_inst->dev_id; + + vec_foreach (dev_inst, cmt->cryptodev_inst) + { + dev_id = dev_inst->dev_id; + if (previous_dev_id != dev_id) + { + previous_dev_id = dev_id; + rte_cryptodev_info_get (dev_id, &dev_info); + dev_caps = &dev_info.capabilities[0]; + cap_is_supported = cryptodev_remove_unsupported_param_sizes ( + &cmt->supported_caps[cap_id], dev_caps); + if (!cap_is_supported) + { + cryptodev_delete_cap (&cmt->supported_caps, cap_id); + /*no need to check other devices as this one doesn't support + * this temp_cap*/ + break; + } + } + } + if (cap_is_supported) + cap_id++; + } } -static void -dpdk_disable_cryptodev_engine (vlib_main_t * vm) +static int +cryptodev_probe (vlib_main_t *vm, u32 n_workers) { cryptodev_main_t *cmt = &cryptodev_main; - cryptodev_numa_data_t *numa_data; + u32 n_queues = cryptodev_count_queue (vm->numa_node); + u32 i; - vec_validate (cmt->per_numa_data, vm->numa_node); - numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node); + if (n_queues < n_workers) + return -1; + + for (i = 0; i < rte_cryptodev_count (); i++) + cryptodev_configure (vm, i); + + if (vec_len (cmt->cryptodev_inst) == 0) + return -1; + cryptodev_get_common_capabilities (); + vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp); + + /* if there is not enough device stop cryptodev */ + if (vec_len (cmt->cryptodev_inst) < n_workers) + return -1; - if (numa_data->sess_pool) - rte_mempool_free (numa_data->sess_pool); - if (numa_data->sess_priv_pool) - rte_mempool_free (numa_data->sess_priv_pool); - if (numa_data->cop_pool) - rte_mempool_free (numa_data->cop_pool); + return 0; } +#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) static void -crypto_op_init (struct rte_mempool *mempool, - void *_arg __attribute__ ((unused)), - void *_obj, unsigned i __attribute__ ((unused))) +is_drv_unique (u32 driver_id, u32 **unique_drivers) { - struct rte_crypto_op *op = _obj; + u32 *unique_elt; + u8 found = 0; - op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; - op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; - op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - op->phys_addr = rte_mempool_virt2iova (_obj); - op->mempool = mempool; -} + vec_foreach (unique_elt, *unique_drivers) + { + if (*unique_elt == driver_id) + { + found = 1; + break; + } + } + if (!found) + vec_add1 (*unique_drivers, driver_id); +} +#endif clib_error_t * dpdk_cryptodev_init (vlib_main_t * vm) { cryptodev_main_t *cmt = &cryptodev_main; vlib_thread_main_t *tm = vlib_get_thread_main (); - cryptodev_engine_thread_t *ptd; + cryptodev_engine_thread_t *cet; cryptodev_numa_data_t *numa_data; - struct rte_mempool *mp; + u32 node; + u8 nodes = 0; u32 skip_master = vlib_num_workers () > 0; u32 n_workers = tm->n_vlib_mains - skip_master; - u32 numa = vm->numa_node; - i32 sess_sz; - u64 n_cop_elts; u32 eidx; u32 i; - u8 *name = 0; clib_error_t *error; - struct rte_crypto_op_pool_private *priv; cmt->iova_mode = rte_eal_iova_mode (); - sess_sz = cryptodev_get_session_sz(vm, n_workers); - if (sess_sz < 0) + clib_bitmap_foreach (node, tm->cpu_socket_bitmap) { - error = clib_error_return (0, "Not enough cryptodevs"); - return error; + if (node >= nodes) + nodes = node; } - /* A total of 4 times n_worker threads * frame size as crypto ops */ - n_cop_elts = max_pow2 ((u64)n_workers * CRYPTODEV_NB_CRYPTO_OPS); - - vec_validate (cmt->per_numa_data, vm->numa_node); - numa_data = vec_elt_at_index (cmt->per_numa_data, numa); - - /* create session pool for the numa node */ - name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0); - mp = rte_cryptodev_sym_session_pool_create ((char *) name, - CRYPTODEV_NB_SESSION, - 0, 0, 0, numa); - if (!mp) + vec_validate (cmt->per_numa_data, nodes); + vec_foreach (numa_data, cmt->per_numa_data) { - error = clib_error_return (0, "Not enough memory for mp %s", name); - goto err_handling; + vec_validate (numa_data->sess_pools, 0); } - vec_free (name); - - numa_data->sess_pool = mp; - - /* create session private pool for the numa node */ - name = format (0, "cryptodev_sess_pool_%u%c", numa, 0); - mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0, - 0, NULL, NULL, NULL, NULL, numa, 0); - if (!mp) - { - error = clib_error_return (0, "Not enough memory for mp %s", name); - vec_free (name); - goto err_handling; - } - - vec_free (name); - - numa_data->sess_priv_pool = mp; - - /* create cryptodev op pool */ - name = format (0, "cryptodev_op_pool_%u%c", numa, 0); - - mp = rte_mempool_create ((char *) name, n_cop_elts, - sizeof (cryptodev_op_t), VLIB_FRAME_SIZE * 2, - sizeof (struct rte_crypto_op_pool_private), NULL, - NULL, crypto_op_init, NULL, numa, 0); - if (!mp) - { - error = clib_error_return (0, "Not enough memory for mp %s", name); - vec_free (name); - goto err_handling; - } - - priv = rte_mempool_get_priv (mp); - priv->priv_size = sizeof (struct rte_crypto_op_pool_private); - priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; - vec_free (name); - numa_data->cop_pool = mp; /* probe all cryptodev devices and get queue info */ if (cryptodev_probe (vm, n_workers) < 0) + return 0; + +#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) + struct rte_cryptodev_info dev_info; + cryptodev_inst_t *dev_inst; + u32 *unique_drivers = 0; + vec_foreach (dev_inst, cmt->cryptodev_inst) { - error = clib_error_return (0, "Failed to configure cryptodev"); - goto err_handling; + u32 dev_id = dev_inst->dev_id; + rte_cryptodev_info_get (dev_id, &dev_info); + u32 driver_id = dev_info.driver_id; + is_drv_unique (driver_id, &unique_drivers); + + u32 sess_sz = + rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id); + cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz); } - clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains); + cmt->drivers_cnt = vec_len (unique_drivers); + vec_free (unique_drivers); +#endif + + clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers); clib_spinlock_init (&cmt->tlock); vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1, CLIB_CACHE_LINE_BYTES); for (i = skip_master; i < tm->n_vlib_mains; i++) { - ptd = cmt->per_thread_data + i; - cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO); - name = format (0, "frames_ring_%u%c", i, 0); - ptd->ring = rte_ring_create((char *) name, CRYPTODEV_NB_CRYPTO_OPS, - vm->numa_node, RING_F_SP_ENQ|RING_F_SC_DEQ); - if (!ptd->ring) + cet = cmt->per_thread_data + i; + + if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) < + 0) { - error = clib_error_return (0, "Not enough memory for mp %s", name); - vec_free (name); + error = clib_error_return (0, "Failed to configure cryptodev"); goto err_handling; } - vec_validate (ptd->cops, VNET_CRYPTO_FRAME_SIZE - 1); - vec_free(name); } /* register handler */ - eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79, - "DPDK Cryptodev Engine"); - -#define _(a, b, c, d, e, f) \ - vnet_crypto_register_async_handler \ - (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \ - cryptodev_enqueue_gcm_aad_##f##_enc,\ - cryptodev_frame_dequeue); \ - vnet_crypto_register_async_handler \ - (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \ - cryptodev_enqueue_gcm_aad_##f##_dec, \ - cryptodev_frame_dequeue); + eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100, + "DPDK Cryptodev Engine"); - foreach_vnet_aead_crypto_conversion -#undef _ + vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler); -#define _(a, b, c, d) \ - vnet_crypto_register_async_handler \ - (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \ - cryptodev_enqueue_linked_alg_enc, \ - cryptodev_frame_dequeue); \ - vnet_crypto_register_async_handler \ - (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \ - cryptodev_enqueue_linked_alg_dec, \ - cryptodev_frame_dequeue); - - foreach_cryptodev_link_async_alg -#undef _ + if (cryptodev_register_raw_hdl) + error = cryptodev_register_raw_hdl (vm, eidx); + else + error = cryptodev_register_cop_hdl (vm, eidx); - vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler); + if (error) + goto err_handling; + + /* this engine is only enabled when cryptodev device(s) are presented in + * startup.conf. Assume it is wanted to be used, turn on async mode here. + */ + ipsec_set_async_mode (1); return 0; err_handling: - dpdk_disable_cryptodev_engine (vm); - return error; } -/* *INDENT-On* */ - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */