#define always_inline static inline __attribute__ ((__always_inline__))
#endif
+#define DPDK_CRYPTO_N_QUEUE_DESC 2048
+#define DPDK_CRYPTO_NB_SESS_OBJS 20000
+
#define foreach_dpdk_crypto_input_next \
_(DROP, "error-drop") \
_(IP4_LOOKUP, "ip4-lookup") \
_(IP6_LOOKUP, "ip6-lookup") \
_(INTERFACE_OUTPUT, "interface-output") \
- _(DECRYPT_POST, "dpdk-esp-decrypt-post")
+ _(MIDCHAIN, "adj-midchain-tx") \
+ _(DECRYPT4_POST, "dpdk-esp4-decrypt-post") \
+ _(DECRYPT6_POST, "dpdk-esp6-decrypt-post")
typedef enum
{
typedef struct
{
- dpdk_gcm_cnt_blk cb;
- u8 aad[12];
u32 next;
- u8 icv[32];
-} dpdk_op_priv_t __attribute__ ((aligned (16)));
+ u32 bi;
+ u8 encrypt;
+ CLIB_ALIGN_MARK (mark0, 16);
+ dpdk_gcm_cnt_blk cb;
+ u8 aad[16];
+ u8 icv[32]; /* XXX last 16B in next cache line */
+} dpdk_op_priv_t;
typedef struct
{
u16 *resource_idx;
- uword *session_by_drv_id_and_sa_index;
struct rte_crypto_op **ops;
u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG];
u16 auth_resource_idx[IPSEC_INTEG_N_ALG];
-} crypto_worker_main_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+} crypto_worker_main_t;
typedef struct
{
+ CLIB_ALIGN_MARK (pad, 8); /* align up to 8 bytes for 32bit builds */
char *name;
enum rte_crypto_sym_xform_type type;
u32 alg;
u8 boundary;
u8 disabled;
u8 resources;
-} crypto_alg_t __attribute__ ((aligned (8)));
+} crypto_alg_t;
typedef struct
{
u8 drv_id;
u8 numa;
u16 id;
- const i8 *name;
+ const char *name;
u32 max_qp;
u64 features;
} crypto_dev_t;
typedef struct
{
- const i8 *name;
+ const char *name;
u16 *devs;
} crypto_drv_t;
u16 __unused;
struct rte_crypto_op *ops[VLIB_FRAME_SIZE];
u32 bi[VLIB_FRAME_SIZE];
-} crypto_resource_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+} crypto_resource_t;
+
+typedef struct
+{
+ u64 ts;
+ struct rte_cryptodev_sym_session *session;
+} crypto_session_disposal_t;
+
+typedef struct
+{
+ struct rte_cryptodev_sym_session *session;
+ u64 dev_mask;
+ CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */
+} crypto_session_by_drv_t;
typedef struct
{
struct rte_mempool *crypto_op;
struct rte_mempool *session_h;
struct rte_mempool **session_drv;
+ crypto_session_disposal_t *session_disposal;
uword *session_by_sa_index;
u64 crypto_op_get_failed;
u64 session_h_failed;
u64 *session_drv_failed;
+ crypto_session_by_drv_t *session_by_drv_id_and_sa_index;
+ clib_spinlock_t lockp;
+ /* Required for vec_validate_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
} crypto_data_t;
typedef struct
{
crypto_worker_main_t *workers_main;
- struct rte_cryptodev_sym_session **sa_session;
crypto_dev_t *dev;
crypto_resource_t *resource;
crypto_alg_t *cipher_algs;
crypto_alg_t *auth_algs;
crypto_data_t *data;
crypto_drv_t *drv;
- u8 max_drv_id;
+ u64 session_timeout; /* nsec */
u8 enabled;
} dpdk_crypto_main_t;
static_always_inline u32
crypto_op_len (void)
{
- const u32 align = 16;
+ const u32 align = 4;
u32 op_size =
sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
return (dpdk_op_priv_t *) (((u8 *) op) + crypto_op_get_priv_offset ());
}
-/* XXX this requires 64 bit builds so hash_xxx macros use u64 key */
-typedef union
+
+static_always_inline void
+add_session_by_drv_and_sa_idx (struct rte_cryptodev_sym_session *session,
+ crypto_data_t * data, u32 drv_id, u32 sa_idx)
{
- u64 val;
- struct
- {
- u32 drv_id;
- u32 sa_idx;
- };
-} crypto_session_key_t;
+ crypto_session_by_drv_t *sbd;
+ vec_validate_aligned (data->session_by_drv_id_and_sa_index, sa_idx,
+ CLIB_CACHE_LINE_BYTES);
+ sbd = vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx);
+ sbd->dev_mask |= 1L << drv_id;
+ sbd->session = session;
+}
+
+static_always_inline struct rte_cryptodev_sym_session *
+get_session_by_drv_and_sa_idx (crypto_data_t * data, u32 drv_id, u32 sa_idx)
+{
+ crypto_session_by_drv_t *sess_by_sa;
+ if (_vec_len (data->session_by_drv_id_and_sa_index) <= sa_idx)
+ return NULL;
+ sess_by_sa =
+ vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx);
+ return (sess_by_sa->dev_mask & (1L << drv_id)) ? sess_by_sa->session : NULL;
+}
static_always_inline clib_error_t *
-crypto_get_session (struct rte_cryptodev_sym_session **session,
+crypto_get_session (struct rte_cryptodev_sym_session ** session,
u32 sa_idx,
crypto_resource_t * res,
crypto_worker_main_t * cwm, u8 is_outbound)
{
- crypto_session_key_t key = { 0 };
-
- key.drv_id = res->drv_id;
- key.sa_idx = sa_idx;
+ dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
+ crypto_data_t *data;
+ struct rte_cryptodev_sym_session *sess;
- uword *val = hash_get (cwm->session_by_drv_id_and_sa_index, key.val);
+ data = vec_elt_at_index (dcm->data, res->numa);
+ sess = get_session_by_drv_and_sa_idx (data, res->drv_id, sa_idx);
- if (PREDICT_FALSE (!val))
+ if (PREDICT_FALSE (!sess))
return create_sym_session (session, sa_idx, res, cwm, is_outbound);
- session[0] = (struct rte_cryptodev_sym_session *) val[0];
+ session[0] = sess;
return NULL;
}
/* Not allowed to setup SA with no-aead-cipher/NULL or NULL/NULL */
- is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) |
- (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) |
+ is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) ||
+ (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) ||
(sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256));
if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE)
ret = rte_mempool_get_bulk (data->crypto_op, (void **) ops, n);
+ /* *INDENT-OFF* */
data->crypto_op_get_failed += ! !ret;
+ /* *INDENT-ON* */
return ret;
}
}
static_always_inline void
-crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm, u8 outbound,
- u32 node_index, u32 error, u8 numa)
+crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm,
+ u32 node_index, u32 error, u8 numa, u8 encrypt)
{
dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
crypto_resource_t *res;
/* *INDENT-OFF* */
vec_foreach (res_idx, cwm->resource_idx)
{
- u16 enq;
+ u16 enq, n_ops;
res = vec_elt_at_index (dcm->resource, res_idx[0]);
if (!res->n_ops)
continue;
- enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id + outbound,
- res->ops, res->n_ops);
- res->inflights[outbound] += enq;
+ n_ops = (DPDK_CRYPTO_N_QUEUE_DESC / 2) - res->inflights[encrypt];
+ n_ops = res->n_ops < n_ops ? res->n_ops : n_ops;
+ enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id,
+ res->ops, n_ops);
+ ASSERT (n_ops == enq);
+ res->inflights[encrypt] += enq;
if (PREDICT_FALSE (enq < res->n_ops))
{
icb->salt = salt;
icb->iv[0] = seq;
icb->iv[1] = seq_hi;
-#if DPDK_NO_AEAD
- icb->cnt = clib_host_to_net_u32 (1);
-#endif
}
-#define __unused __attribute__((unused))
static_always_inline void
crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0,
struct rte_crypto_op *op, void *session,
u32 cipher_off, u32 cipher_len,
- u8 * icb __unused, u32 iv_size __unused,
u32 auth_off, u32 auth_len,
- u8 * aad __unused, u32 aad_size __unused,
- u8 * digest, u64 digest_paddr, u32 digest_size __unused)
+ u8 * aad, u8 * digest, u64 digest_paddr)
{
struct rte_crypto_sym_op *sym_op;
sym_op->m_src = mb0;
sym_op->session = session;
-#if DPDK_NO_AEAD
- sym_op->cipher.data.offset = cipher_off;
- sym_op->cipher.data.length = cipher_len;
-
- sym_op->cipher.iv.data = icb;
- sym_op->cipher.iv.phys_addr =
- op->phys_addr + (uintptr_t) icb - (uintptr_t) op;
- sym_op->cipher.iv.length = iv_size;
-
- if (is_aead)
- {
- sym_op->auth.aad.data = aad;
- sym_op->auth.aad.phys_addr =
- op->phys_addr + (uintptr_t) aad - (uintptr_t) op;
- sym_op->auth.aad.length = aad_size;
- }
- else
- {
- sym_op->auth.data.offset = auth_off;
- sym_op->auth.data.length = auth_len;
- }
-
- sym_op->auth.digest.data = digest;
- sym_op->auth.digest.phys_addr = digest_paddr;
- sym_op->auth.digest.length = digest_size;
-#else /* ! DPDK_NO_AEAD */
if (is_aead)
{
sym_op->aead.data.offset = cipher_off;
sym_op->auth.digest.data = digest;
sym_op->auth.digest.phys_addr = digest_paddr;
}
-#endif /* DPDK_NO_AEAD */
}
-#undef __unused
-
#endif /* __DPDK_IPSEC_H__ */
/*