#include <rte_cryptodev.h>
#include <rte_crypto_sym.h>
#include <rte_crypto.h>
-#include <rte_cryptodev_pmd.h>
#include <rte_ring_peek_zc.h>
#include <rte_config.h>
return pointer_to_uword (data) - pm->lookup_table[index];
}
+static_always_inline void
+cryptodev_validate_mbuf (struct rte_mbuf *mb, vlib_buffer_t *b)
+{
+ /* on vnet side vlib_buffer current_length is updated by cipher padding and
+ * icv_sh. mbuf needs to be sync with these changes */
+ u16 data_len = b->current_length +
+ (b->data + b->current_data - rte_pktmbuf_mtod (mb, u8 *));
+
+ /* for input nodes that are not dpdk-input, it is possible the mbuf
+ * was updated before as one of the chained mbufs. Setting nb_segs
+ * to 1 here to prevent the cryptodev PMD to access potentially
+ * invalid m_src->next pointers.
+ */
+ mb->nb_segs = 1;
+ mb->pkt_len = mb->data_len = data_len;
+}
+
static_always_inline void
cryptodev_validate_mbuf_chain (vlib_main_t *vm, struct rte_mbuf *mb,
vlib_buffer_t *b)
clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
vnet_crypto_async_frame_elt_t *fe;
- struct rte_cryptodev_sym_session *sess = 0;
+ cryptodev_session_t *sess = 0;
cryptodev_op_t **cop;
u32 *bi;
u32 n_enqueue, n_elts;
if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
{
cryptodev_mark_frame_err_status (frame,
- VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
+ VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
return -1;
}
rte_mempool_get_bulk (cet->cop_pool, (void **) cet->cops, n_elts) < 0))
{
cryptodev_mark_frame_err_status (frame,
- VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
+ VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
return -1;
}
cryptodev_session_create (vm, last_key_index, 0) < 0))
{
cryptodev_mark_frame_err_status (
- frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
+ VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
return -1;
}
}
if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
else
- /* for input nodes that are not dpdk-input, it is possible the mbuf
- * was updated before as one of the chained mbufs. Setting nb_segs
- * to 1 here to prevent the cryptodev PMD to access potentially
- * invalid m_src->next pointers.
- */
- sop->m_src->nb_segs = 1;
+ cryptodev_validate_mbuf (sop->m_src, b);
+
clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
cop++;
bi++;
clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
vnet_crypto_async_frame_elt_t *fe;
- struct rte_cryptodev_sym_session *sess = 0;
+ cryptodev_session_t *sess = 0;
cryptodev_op_t **cop;
u32 *bi;
u32 n_enqueue = 0, n_elts;
if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
{
cryptodev_mark_frame_err_status (frame,
- VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
+ VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
return -1;
}
rte_mempool_get_bulk (cet->cop_pool, (void **) cet->cops, n_elts) < 0))
{
cryptodev_mark_frame_err_status (frame,
- VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
+ VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
return -1;
}
aad_len) < 0))
{
cryptodev_mark_frame_err_status (
- frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
+ VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
return -1;
}
}
else if (PREDICT_FALSE (
- key->keys[vm->numa_node][op_type]->opaque_data !=
- aad_len))
+#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
+ rte_cryptodev_sym_session_opaque_data_get (
+ key->keys[vm->numa_node][op_type]) != (u64) aad_len
+#else
+ key->keys[vm->numa_node][op_type]->opaque_data != aad_len
+#endif
+ ))
{
cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
fe->key_index, aad_len);
aad_len) < 0))
{
cryptodev_mark_frame_err_status (
- frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
+ VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
return -1;
}
}
if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
else
- /* for input nodes that are not dpdk-input, it is possible the mbuf
- * was updated before as one of the chained mbufs. Setting nb_segs
- * to 1 here to prevent the cryptodev PMD to access potentially
- * invalid m_src->next pointers.
- */
- sop->m_src->nb_segs = 1;
+ cryptodev_validate_mbuf (sop->m_src, b);
+
clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
cop++;
return frame;
}
+static_always_inline int
+cryptodev_enqueue_aead_aad_0_enc (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame)
+{
+ return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
+ 0);
+}
static_always_inline int
cryptodev_enqueue_aead_aad_8_enc (vlib_main_t *vm,
vnet_crypto_async_frame_t *frame)
12);
}
+static_always_inline int
+cryptodev_enqueue_aead_aad_0_dec (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame)
+{
+ return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
+ 0);
+}
static_always_inline int
cryptodev_enqueue_aead_aad_8_dec (vlib_main_t *vm,
vnet_crypto_async_frame_t *frame)
struct rte_cryptodev_sym_capability_idx cap_aead_idx;
u8 *name;
clib_error_t *error = 0;
+ u32 ref_cnt = 0;
vec_foreach (cet, cmt->per_thread_data)
{
vec_validate (cet->cops, VNET_CRYPTO_FRAME_SIZE - 1);
}
- /** INDENT-OFF **/
#define _(a, b, c, d, e, f, g) \
cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
{ \
- vnet_crypto_register_async_handler ( \
+ vnet_crypto_register_enqueue_handler ( \
vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
- cryptodev_enqueue_aead_aad_##f##_enc, cryptodev_frame_dequeue); \
- vnet_crypto_register_async_handler ( \
+ cryptodev_enqueue_aead_aad_##f##_enc); \
+ vnet_crypto_register_enqueue_handler ( \
vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
- cryptodev_enqueue_aead_aad_##f##_dec, cryptodev_frame_dequeue); \
+ cryptodev_enqueue_aead_aad_##f##_dec); \
+ ref_cnt++; \
}
foreach_vnet_aead_crypto_conversion
#undef _
if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
{ \
- vnet_crypto_register_async_handler ( \
+ vnet_crypto_register_enqueue_handler ( \
vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
- cryptodev_enqueue_linked_alg_enc, cryptodev_frame_dequeue); \
- vnet_crypto_register_async_handler ( \
+ cryptodev_enqueue_linked_alg_enc); \
+ vnet_crypto_register_enqueue_handler ( \
vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
- cryptodev_enqueue_linked_alg_dec, cryptodev_frame_dequeue); \
+ cryptodev_enqueue_linked_alg_dec); \
+ ref_cnt++; \
}
foreach_cryptodev_link_async_alg
#undef _
- /** INDENT-ON **/
+
+ if (ref_cnt)
+ vnet_crypto_register_dequeue_handler (vm, eidx, cryptodev_frame_dequeue);
return 0;