dpdk-ipsec: fix encrypt/decrypt single queue
[vpp.git] / src / plugins / dpdk / ipsec / ipsec.h
index a46f5bf..5728459 100644 (file)
 #define always_inline static inline __attribute__ ((__always_inline__))
 #endif
 
+#define DPDK_CRYPTO_N_QUEUE_DESC  2048
+#define DPDK_CRYPTO_NB_SESS_OBJS  20000
+
 #define foreach_dpdk_crypto_input_next         \
   _(DROP, "error-drop")                                \
   _(IP4_LOOKUP, "ip4-lookup")                   \
   _(IP6_LOOKUP, "ip6-lookup")                   \
   _(INTERFACE_OUTPUT, "interface-output")      \
-  _(DECRYPT_POST, "dpdk-esp-decrypt-post")
+  _(DECRYPT4_POST, "dpdk-esp4-decrypt-post")     \
+  _(DECRYPT6_POST, "dpdk-esp6-decrypt-post")
 
 typedef enum
 {
@@ -57,9 +61,12 @@ typedef struct
 typedef struct
 {
   u32 next;
-  dpdk_gcm_cnt_blk cb __attribute__ ((aligned (16)));
+  u32 bi;
+  u8 encrypt;
+    CLIB_ALIGN_MARK (mark0, 16);
+  dpdk_gcm_cnt_blk cb;
   u8 aad[16];
-  u8 icv[32];
+  u8 icv[32];                  /* XXX last 16B in next cache line */
 } dpdk_op_priv_t;
 
 typedef struct
@@ -68,10 +75,12 @@ typedef struct
   struct rte_crypto_op **ops;
   u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG];
   u16 auth_resource_idx[IPSEC_INTEG_N_ALG];
-} crypto_worker_main_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
+    CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+} crypto_worker_main_t;
 
 typedef struct
 {
+  CLIB_ALIGN_MARK (pad, 8);    /* align up to 8 bytes for 32bit builds */
   char *name;
   enum rte_crypto_sym_xform_type type;
   u32 alg;
@@ -81,7 +90,7 @@ typedef struct
   u8 boundary;
   u8 disabled;
   u8 resources;
-} crypto_alg_t __attribute__ ((aligned (8)));
+} crypto_alg_t;
 
 typedef struct
 {
@@ -92,14 +101,14 @@ typedef struct
   u8 drv_id;
   u8 numa;
   u16 id;
-  const i8 *name;
+  const char *name;
   u32 max_qp;
   u64 features;
 } crypto_dev_t;
 
 typedef struct
 {
-  const i8 *name;
+  const char *name;
   u16 *devs;
 } crypto_drv_t;
 
@@ -116,7 +125,8 @@ typedef struct
   u16 __unused;
   struct rte_crypto_op *ops[VLIB_FRAME_SIZE];
   u32 bi[VLIB_FRAME_SIZE];
-} crypto_resource_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
+    CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+} crypto_resource_t;
 
 typedef struct
 {
@@ -124,6 +134,13 @@ typedef struct
   struct rte_cryptodev_sym_session *session;
 } crypto_session_disposal_t;
 
+typedef struct
+{
+  struct rte_cryptodev_sym_session *session;
+  u64 dev_mask;
+    CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */
+} crypto_session_by_drv_t;
+
 typedef struct
 {
   struct rte_mempool *crypto_op;
@@ -131,16 +148,18 @@ typedef struct
   struct rte_mempool **session_drv;
   crypto_session_disposal_t *session_disposal;
   uword *session_by_sa_index;
-  uword *session_by_drv_id_and_sa_index;
   u64 crypto_op_get_failed;
   u64 session_h_failed;
   u64 *session_drv_failed;
+  crypto_session_by_drv_t *session_by_drv_id_and_sa_index;
+  clib_spinlock_t lockp;
+  /* Required for vec_validate_aligned */
+    CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
 } crypto_data_t;
 
 typedef struct
 {
   crypto_worker_main_t *workers_main;
-  struct rte_cryptodev_sym_session **sa_session;
   crypto_dev_t *dev;
   crypto_resource_t *resource;
   crypto_alg_t *cipher_algs;
@@ -190,38 +209,47 @@ crypto_op_get_priv (struct rte_crypto_op * op)
   return (dpdk_op_priv_t *) (((u8 *) op) + crypto_op_get_priv_offset ());
 }
 
-/* XXX this requires 64 bit builds so hash_xxx macros use u64 key */
-typedef union
+
+static_always_inline void
+add_session_by_drv_and_sa_idx (struct rte_cryptodev_sym_session *session,
+                              crypto_data_t * data, u32 drv_id, u32 sa_idx)
+{
+  crypto_session_by_drv_t *sbd;
+  vec_validate_aligned (data->session_by_drv_id_and_sa_index, sa_idx,
+                       CLIB_CACHE_LINE_BYTES);
+  sbd = vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx);
+  sbd->dev_mask |= 1L << drv_id;
+  sbd->session = session;
+}
+
+static_always_inline struct rte_cryptodev_sym_session *
+get_session_by_drv_and_sa_idx (crypto_data_t * data, u32 drv_id, u32 sa_idx)
 {
-  u64 val;
-  struct
-  {
-    u32 drv_id;
-    u32 sa_idx;
-  };
-} crypto_session_key_t;
+  crypto_session_by_drv_t *sess_by_sa;
+  if (_vec_len (data->session_by_drv_id_and_sa_index) <= sa_idx)
+    return NULL;
+  sess_by_sa =
+    vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx);
+  return (sess_by_sa->dev_mask & (1L << drv_id)) ? sess_by_sa->session : NULL;
+}
 
 static_always_inline clib_error_t *
-crypto_get_session (struct rte_cryptodev_sym_session **session,
+crypto_get_session (struct rte_cryptodev_sym_session ** session,
                    u32 sa_idx,
                    crypto_resource_t * res,
                    crypto_worker_main_t * cwm, u8 is_outbound)
 {
   dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
   crypto_data_t *data;
-  uword *val;
-  crypto_session_key_t key = { 0 };
-
-  key.drv_id = res->drv_id;
-  key.sa_idx = sa_idx;
+  struct rte_cryptodev_sym_session *sess;
 
   data = vec_elt_at_index (dcm->data, res->numa);
-  val = hash_get (data->session_by_drv_id_and_sa_index, key.val);
+  sess = get_session_by_drv_and_sa_idx (data, res->drv_id, sa_idx);
 
-  if (PREDICT_FALSE (!val))
+  if (PREDICT_FALSE (!sess))
     return create_sym_session (session, sa_idx, res, cwm, is_outbound);
 
-  session[0] = (struct rte_cryptodev_sym_session *) val[0];
+  session[0] = sess;
 
   return NULL;
 }
@@ -235,8 +263,8 @@ get_resource (crypto_worker_main_t * cwm, ipsec_sa_t * sa)
 
   /* Not allowed to setup SA with no-aead-cipher/NULL or NULL/NULL */
 
-  is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) |
-            (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) |
+  is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) ||
+            (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) ||
             (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256));
 
   if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE)
@@ -280,8 +308,8 @@ crypto_free_ops (u8 numa, struct rte_crypto_op **ops, u32 n)
 }
 
 static_always_inline void
-crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm, u8 outbound,
-                   u32 node_index, u32 error, u8 numa)
+crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm,
+                   u32 node_index, u32 error, u8 numa, u8 encrypt)
 {
   dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
   crypto_resource_t *res;
@@ -290,15 +318,18 @@ crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm, u8 outbound,
   /* *INDENT-OFF* */
   vec_foreach (res_idx, cwm->resource_idx)
     {
-      u16 enq;
+      u16 enq, n_ops;
       res = vec_elt_at_index (dcm->resource, res_idx[0]);
 
       if (!res->n_ops)
        continue;
 
-      enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id + outbound,
-                                        res->ops, res->n_ops);
-      res->inflights[outbound] += enq;
+      n_ops = (DPDK_CRYPTO_N_QUEUE_DESC / 2) - res->inflights[encrypt];
+      n_ops = res->n_ops < n_ops ? res->n_ops : n_ops;
+      enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id,
+                                        res->ops, n_ops);
+      ASSERT (n_ops == enq);
+      res->inflights[encrypt] += enq;
 
       if (PREDICT_FALSE (enq < res->n_ops))
        {