vlib: introduce vlib_get_main_by_index(), vlib_get_n_threads()
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev_dp_api.c
index 682295b..81f386c 100644 (file)
@@ -18,7 +18,7 @@
 #include <vlib/vlib.h>
 #include <vnet/plugin/plugin.h>
 #include <vnet/crypto/crypto.h>
-#include <vnet/vnet.h>
+#include <vnet/ipsec/ipsec.h>
 #include <vpp/app/version.h>
 
 #include <dpdk/buffer.h>
 #define CRYPTODEV_MAX_AAD_SIZE 16
 #define CRYPTODEV_MAX_N_SGL    8 /**< maximum number of segments */
 
-/* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN */
-#define foreach_vnet_aead_crypto_conversion \
-  _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8)  \
-  _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \
-  _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8)  \
-  _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \
-  _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8)  \
-  _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12)
+/* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN, KEY_LEN
+ */
+#define foreach_vnet_aead_crypto_conversion                                   \
+  _ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 8, 16)                               \
+  _ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 12, 16)                              \
+  _ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 8, 24)                               \
+  _ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 12, 24)                              \
+  _ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 8, 32)                               \
+  _ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 12, 32)
 
 /**
- * crypto (alg, cryptodev_alg), hash (alg, digest-size)
+ * crypto (alg, cryptodev_alg, key_size), hash (alg, digest-size)
  **/
-#define foreach_cryptodev_link_async_alg       \
-  _ (AES_128_CBC, AES_CBC, SHA1, 12)           \
-  _ (AES_192_CBC, AES_CBC, SHA1, 12)           \
-  _ (AES_256_CBC, AES_CBC, SHA1, 12)           \
-  _ (AES_128_CBC, AES_CBC, SHA224, 14)         \
-  _ (AES_192_CBC, AES_CBC, SHA224, 14)         \
-  _ (AES_256_CBC, AES_CBC, SHA224, 14)         \
-  _ (AES_128_CBC, AES_CBC, SHA256, 16)         \
-  _ (AES_192_CBC, AES_CBC, SHA256, 16)         \
-  _ (AES_256_CBC, AES_CBC, SHA256, 16)         \
-  _ (AES_128_CBC, AES_CBC, SHA384, 24)         \
-  _ (AES_192_CBC, AES_CBC, SHA384, 24)         \
-  _ (AES_256_CBC, AES_CBC, SHA384, 24)         \
-  _ (AES_128_CBC, AES_CBC, SHA512, 32)         \
-  _ (AES_192_CBC, AES_CBC, SHA512, 32)         \
-  _ (AES_256_CBC, AES_CBC, SHA512, 32)
+#define foreach_cryptodev_link_async_alg                                      \
+  _ (AES_128_CBC, AES_CBC, 16, SHA1, 12)                                      \
+  _ (AES_192_CBC, AES_CBC, 24, SHA1, 12)                                      \
+  _ (AES_256_CBC, AES_CBC, 32, SHA1, 12)                                      \
+  _ (AES_128_CBC, AES_CBC, 16, SHA224, 14)                                    \
+  _ (AES_192_CBC, AES_CBC, 24, SHA224, 14)                                    \
+  _ (AES_256_CBC, AES_CBC, 32, SHA224, 14)                                    \
+  _ (AES_128_CBC, AES_CBC, 16, SHA256, 16)                                    \
+  _ (AES_192_CBC, AES_CBC, 24, SHA256, 16)                                    \
+  _ (AES_256_CBC, AES_CBC, 32, SHA256, 16)                                    \
+  _ (AES_128_CBC, AES_CBC, 16, SHA384, 24)                                    \
+  _ (AES_192_CBC, AES_CBC, 24, SHA384, 24)                                    \
+  _ (AES_256_CBC, AES_CBC, 32, SHA384, 24)                                    \
+  _ (AES_128_CBC, AES_CBC, 16, SHA512, 32)                                    \
+  _ (AES_192_CBC, AES_CBC, 24, SHA512, 32)                                    \
+  _ (AES_256_CBC, AES_CBC, 32, SHA512, 32)
 
 typedef enum
 {
@@ -84,14 +85,40 @@ typedef enum
 
 typedef struct
 {
-  struct rte_cryptodev_sym_session *keys[CRYPTODEV_N_OP_TYPES];
+  union rte_cryptodev_session_ctx **keys;
 } cryptodev_key_t;
 
+/* Replicate DPDK rte_cryptodev_sym_capability structure with key size ranges
+ * in favor of vpp vector */
+typedef struct
+{
+  enum rte_crypto_sym_xform_type xform_type;
+  union
+  {
+    struct
+    {
+      enum rte_crypto_auth_algorithm algo; /*auth algo */
+      u32 *digest_sizes;                  /* vector of auth digest sizes */
+    } auth;
+    struct
+    {
+      enum rte_crypto_cipher_algorithm algo; /* cipher algo */
+      u32 *key_sizes;                       /* vector of cipher key sizes */
+    } cipher;
+    struct
+    {
+      enum rte_crypto_aead_algorithm algo; /* aead algo */
+      u32 *key_sizes;                     /*vector of aead key sizes */
+      u32 *aad_sizes;                     /*vector of aad sizes */
+      u32 *digest_sizes;                  /* vector of aead digest sizes */
+    } aead;
+  };
+} cryptodev_capability_t;
+
 typedef struct
 {
   u32 dev_id;
   u32 q_id;
-  u8 *dp_service_buffer;
   char *desc;
 } cryptodev_inst_t;
 
@@ -105,7 +132,7 @@ typedef struct
 {
   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
   vlib_buffer_t *b[VNET_CRYPTO_FRAME_SIZE];
-  struct rte_crypto_dp_service_ctx *dp_service;
+  struct rte_crypto_raw_dp_ctx *ctx;
   struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
   struct rte_ring *cached_frame;
   u16 aad_index;
@@ -114,6 +141,7 @@ typedef struct
   u16 cryptodev_id;
   u16 cryptodev_q;
   u16 inflight;
+  union rte_cryptodev_session_ctx reset_sess; /* session data for reset ctx */
 } cryptodev_engine_thread_t;
 
 typedef struct
@@ -125,14 +153,15 @@ typedef struct
   cryptodev_inst_t *cryptodev_inst;
   clib_bitmap_t *active_cdev_inst_mask;
   clib_spinlock_t tlock;
+  cryptodev_capability_t *supported_caps;
 } cryptodev_main_t;
 
 cryptodev_main_t cryptodev_main;
 
-static int
+static_always_inline int
 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
-                   cryptodev_op_type_t op_type,
-                   const vnet_crypto_key_t * key, u32 aad_len)
+                   cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
+                   u32 aad_len)
 {
   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
   memset (xform, 0, sizeof (*xform));
@@ -157,10 +186,10 @@ prepare_aead_xform (struct rte_crypto_sym_xform *xform,
   return 0;
 }
 
-static int
+static_always_inline int
 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
                      cryptodev_op_type_t op_type,
-                     const vnet_crypto_key_t * key)
+                     const vnet_crypto_key_t *key)
 {
   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
   vnet_crypto_key_t *key_cipher, *key_auth;
@@ -194,11 +223,11 @@ prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
 
   switch (key->async_alg)
     {
-#define _(a, b, c, d) \
-  case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\
-    cipher_algo = RTE_CRYPTO_CIPHER_##b; \
-    auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \
-    digest_len = d; \
+#define _(a, b, c, d, e)                                                      \
+  case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
+    cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
+    auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
+    digest_len = e;                                                           \
     break;
 
       foreach_cryptodev_link_async_alg
@@ -221,18 +250,57 @@ prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
   return 0;
 }
 
-static int
-cryptodev_session_create (vnet_crypto_key_t * const key,
-                         struct rte_mempool *sess_priv_pool,
-                         cryptodev_key_t * session_pair, u32 aad_len)
+static_always_inline void
+cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
+{
+  u32 n_devs, i;
+
+  if (sess == NULL)
+    return;
+
+  n_devs = rte_cryptodev_count ();
+
+  for (i = 0; i < n_devs; i++)
+    rte_cryptodev_sym_session_clear (i, sess);
+
+  rte_cryptodev_sym_session_free (sess);
+}
+
+static_always_inline int
+cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
+                         u32 aad_len)
 {
-  struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
-  struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
   cryptodev_main_t *cmt = &cryptodev_main;
+  cryptodev_numa_data_t *numa_data;
   cryptodev_inst_t *dev_inst;
-  struct rte_cryptodev *cdev;
+  vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
+  struct rte_mempool *sess_pool, *sess_priv_pool;
+  cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
+  struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
+  struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
+  struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
+  u32 numa_node = vm->numa_node;
   int ret;
-  uint8_t dev_id = 0;
+
+  numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
+  sess_pool = numa_data->sess_pool;
+  sess_priv_pool = numa_data->sess_priv_pool;
+
+  sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
+    rte_cryptodev_sym_session_create (sess_pool);
+  if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
+    {
+      ret = -1;
+      goto clear_key;
+    }
+
+  sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
+    rte_cryptodev_sym_session_create (sess_pool);
+  if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
+    {
+      ret = -1;
+      goto clear_key;
+    }
 
   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
@@ -249,42 +317,84 @@ cryptodev_session_create (vnet_crypto_key_t * const key,
 
   vec_foreach (dev_inst, cmt->cryptodev_inst)
   {
-    dev_id = dev_inst->dev_id;
-    cdev = rte_cryptodev_pmd_get_dev (dev_id);
+    u32 dev_id = dev_inst->dev_id;
+    struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
 
     /* if the session is already configured for the driver type, avoid
        configuring it again to increase the session data's refcnt */
-    if (session_pair->keys[0]->sess_data[cdev->driver_id].data &&
-       session_pair->keys[1]->sess_data[cdev->driver_id].data)
+    if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[cdev->driver_id].data &&
+       sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[cdev->driver_id].data)
       continue;
 
-    ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[0],
-                                         xforms_enc, sess_priv_pool);
-    ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[1],
-                                         xforms_dec, sess_priv_pool);
+    ret = rte_cryptodev_sym_session_init (
+      dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc, sess_priv_pool);
+    ret = rte_cryptodev_sym_session_init (
+      dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec, sess_priv_pool);
     if (ret < 0)
       return ret;
   }
-  session_pair->keys[0]->opaque_data = aad_len;
-  session_pair->keys[1]->opaque_data = aad_len;
 
-  return 0;
+  sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
+  sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
+
+  CLIB_MEMORY_STORE_BARRIER ();
+  ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess =
+    sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
+  ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess =
+    sessions[CRYPTODEV_OP_TYPE_DECRYPT];
+
+clear_key:
+  if (ret != 0)
+    {
+      cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
+      cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
+    }
+  return ret;
 }
 
-static void
-cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
+static int
+cryptodev_supports_param_value (u32 *params, u32 param_value)
 {
-  u32 n_devs, i;
-
-  if (sess == NULL)
-    return;
+  u32 *value;
+  vec_foreach (value, params)
+    {
+      if (*value == param_value)
+       return 1;
+    }
+  return 0;
+}
 
-  n_devs = rte_cryptodev_count ();
+static int
+cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
+                            u32 key_size, u32 digest_size, u32 aad_size)
+{
+  cryptodev_main_t *cmt = &cryptodev_main;
+  cryptodev_capability_t *cap;
+  vec_foreach (cap, cmt->supported_caps)
+    {
 
-  for (i = 0; i < n_devs; i++)
-    rte_cryptodev_sym_session_clear (i, sess);
+      if (cap->xform_type != idx->type)
+       continue;
 
-  rte_cryptodev_sym_session_free (sess);
+      if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+         cap->auth.algo == idx->algo.auth &&
+         cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
+       return 1;
+
+      if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+         cap->cipher.algo == idx->algo.cipher &&
+         cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
+       return 1;
+
+      if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+         cap->aead.algo == idx->algo.aead &&
+         cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
+         cryptodev_supports_param_value (cap->aead.digest_sizes,
+                                         digest_size) &&
+         cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
+       return 1;
+    }
+  return 0;
 }
 
 static int
@@ -296,8 +406,8 @@ cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key)
 
   alg = key->alg;
 
-#define _(a, b, c, d, e, f)    \
-  if (alg == VNET_CRYPTO_ALG_##a) \
+#define _(a, b, c, d, e, f, g)                                                \
+  if (alg == VNET_CRYPTO_ALG_##a)                                             \
     return 0;
 
   foreach_vnet_aead_crypto_conversion
@@ -310,72 +420,44 @@ cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
                        vnet_crypto_key_index_t idx, u32 aad_len)
 {
   cryptodev_main_t *cmt = &cryptodev_main;
-  cryptodev_numa_data_t *numa_data;
   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
-  struct rte_mempool *sess_pool, *sess_priv_pool;
   cryptodev_key_t *ckey = 0;
-  int ret = 0;
+  u32 i;
 
-  if (kop == VNET_CRYPTO_KEY_OP_DEL)
+  vec_validate (cmt->keys, idx);
+  ckey = vec_elt_at_index (cmt->keys, idx);
+
+  if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
     {
       if (idx >= vec_len (cmt->keys))
        return;
 
-      ckey = pool_elt_at_index (cmt->keys, idx);
-      cryptodev_session_del (ckey->keys[0]);
-      cryptodev_session_del (ckey->keys[1]);
-      ckey->keys[0] = 0;
-      ckey->keys[1] = 0;
-      pool_put (cmt->keys, ckey);
+      vec_foreach_index (i, cmt->per_numa_data)
+       {
+         if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess)
+           {
+             cryptodev_session_del (
+               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess);
+             cryptodev_session_del (
+               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess);
+
+             CLIB_MEMORY_STORE_BARRIER ();
+             ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess = 0;
+             ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess = 0;
+           }
+       }
       return;
     }
-  else if (kop == VNET_CRYPTO_KEY_OP_MODIFY)
-    {
-      if (idx >= vec_len (cmt->keys))
-       return;
 
-      ckey = pool_elt_at_index (cmt->keys, idx);
-
-      cryptodev_session_del (ckey->keys[0]);
-      cryptodev_session_del (ckey->keys[1]);
-      ckey->keys[0] = 0;
-      ckey->keys[1] = 0;
-    }
-  else                         /* create key */
-    pool_get_zero (cmt->keys, ckey);
+  /* create key */
 
   /* do not create session for unsupported alg */
   if (cryptodev_check_supported_vnet_alg (key))
     return;
 
-  numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
-  sess_pool = numa_data->sess_pool;
-  sess_priv_pool = numa_data->sess_priv_pool;
-
-  ckey->keys[0] = rte_cryptodev_sym_session_create (sess_pool);
-  if (!ckey->keys[0])
-    {
-      ret = -1;
-      goto clear_key;
-    }
-
-  ckey->keys[1] = rte_cryptodev_sym_session_create (sess_pool);
-  if (!ckey->keys[1])
-    {
-      ret = -1;
-      goto clear_key;
-    }
-
-  ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len);
-
-clear_key:
-  if (ret != 0)
-    {
-      cryptodev_session_del (ckey->keys[0]);
-      cryptodev_session_del (ckey->keys[1]);
-      memset (ckey, 0, sizeof (*ckey));
-      pool_put (cmt->keys, ckey);
-    }
+  vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
+  vec_foreach_index (i, ckey->keys)
+    vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
 }
 
 /*static*/ void
@@ -447,6 +529,14 @@ compute_ofs_linked_alg (vnet_crypto_async_frame_elt_t * fe, i16 * min_ofs,
   return ofs.raw;
 }
 
+static_always_inline void
+cryptodev_reset_ctx (cryptodev_engine_thread_t *cet)
+{
+  rte_cryptodev_configure_raw_dp_ctx (cet->cryptodev_id, cet->cryptodev_q,
+                                     cet->ctx, RTE_CRYPTO_OP_WITH_SESSION,
+                                     cet->reset_sess, 0);
+}
+
 static_always_inline int
 cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
                                     vnet_crypto_async_frame_t * frame,
@@ -456,14 +546,13 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
   vnet_crypto_async_frame_elt_t *fe;
   struct rte_crypto_vec *vec;
-  struct rte_crypto_data iv_vec, digest_vec;
+  struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
   vlib_buffer_t **b;
   u32 n_elts;
-  cryptodev_key_t *key;
-  u32 last_key_index;
-  union rte_crypto_sym_ofs cofs;
+  u32 last_key_index = ~0;
   i16 min_ofs;
   u32 max_end;
+  int status;
 
   n_elts = frame->n_elts;
 
@@ -478,30 +567,12 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
 
   vec = cet->vec;
   b = cet->b;
-
   fe = frame->elts;
 
-  key = pool_elt_at_index (cmt->keys, fe->key_index);
-  last_key_index = fe->key_index;
-
-  if (PREDICT_FALSE
-      (rte_cryptodev_dp_configure_service
-       (cet->cryptodev_id, cet->cryptodev_q, RTE_CRYPTO_DP_SYM_CHAIN,
-       RTE_CRYPTO_OP_WITH_SESSION,
-       (union rte_cryptodev_session_ctx) key->keys[op_type], cet->dp_service,
-       0) < 0))
-    {
-      cryptodev_mark_frame_err_status (frame,
-                                      VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
-      return -1;
-    }
-
-  cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
-
   while (n_elts)
     {
+      union rte_crypto_sym_ofs cofs;
       u16 n_seg = 1;
-      int status;
 
       if (n_elts > 2)
        {
@@ -513,77 +584,81 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
 
       if (PREDICT_FALSE (last_key_index != fe->key_index))
        {
-         cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
+         cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
 
-         key = pool_elt_at_index (cmt->keys, fe->key_index);
-         last_key_index = fe->key_index;
-
-         if (PREDICT_FALSE
-             (rte_cryptodev_dp_configure_service
-              (cet->cryptodev_id, cet->cryptodev_q, RTE_CRYPTO_DP_SYM_CHAIN,
-               RTE_CRYPTO_OP_WITH_SESSION,
-               (union rte_cryptodev_session_ctx) key->keys[op_type],
-               cet->dp_service, 1) < 0))
+         if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess ==
+                            0))
            {
-             cryptodev_mark_frame_err_status (frame,
-                                              VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
-             return -1;
+             status = cryptodev_session_create (vm, fe->key_index, 0);
+             if (PREDICT_FALSE (status < 0))
+               goto error_exit;
            }
+
+         status = rte_cryptodev_configure_raw_dp_ctx (
+           cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
+           RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type],
+           /*is_update */ 1);
+         if (PREDICT_FALSE (status < 0))
+           goto error_exit;
+
+         last_key_index = fe->key_index;
        }
 
+      cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
+
       vec->len = max_end - min_ofs;
       if (cmt->iova_mode == RTE_IOVA_VA)
        {
-         vec->base = (void *) (b[0]->data + min_ofs);
-         vec->iova = pointer_to_uword (b[0]->data) + min_ofs;
-         iv_vec.base = (void *) fe->iv;
+         vec[0].base = (void *) (b[0]->data + min_ofs);
+         vec[0].iova = pointer_to_uword (b[0]->data) + min_ofs;
+         iv_vec.va = (void *) fe->iv;
          iv_vec.iova = pointer_to_uword (fe->iv);
-         digest_vec.base = (void *) fe->tag;
+         digest_vec.va = (void *) fe->tag;
          digest_vec.iova = pointer_to_uword (fe->tag);
        }
       else
        {
-         vec->base = (void *) (b[0]->data + min_ofs);
-         vec->iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
-         iv_vec.base = (void *) fe->iv;
+         vec[0].base = (void *) (b[0]->data + min_ofs);
+         vec[0].iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
+         iv_vec.va = (void *) fe->iv;
          iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
-         digest_vec.base = (void *) fe->tag;
+         digest_vec.va = (void *) fe->tag;
          digest_vec.iova = vlib_physmem_get_pa (vm, fe->digest);
        }
 
       if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
        {
-         vec->len = b[0]->current_data + b[0]->current_length - min_ofs;
-         if (cryptodev_frame_build_sgl
-             (vm, cmt->iova_mode, vec, &n_seg, b[0],
-              max_end - min_ofs - vec->len) < 0)
-           {
-             cryptodev_mark_frame_err_status (frame,
-                                              VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
-             return -1;
-           }
+         vec[0].len = b[0]->current_data + b[0]->current_length - min_ofs;
+         if (cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
+                                        max_end - min_ofs - vec->len) < 0)
+           goto error_exit;
        }
 
-      status = rte_cryptodev_dp_submit_single_job (cet->dp_service,
-                                                  vec, n_seg, cofs, &iv_vec,
-                                                  &digest_vec, 0,
-                                                  (void *) frame);
-      if (status < 0)
-       {
-         cryptodev_mark_frame_err_status (frame,
-                                          VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
-         return -1;
-       }
+      status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
+                                         &digest_vec, 0, (void *) frame);
+      if (PREDICT_FALSE (status < 0))
+       goto error_exit;
 
       b++;
       fe++;
       n_elts--;
     }
 
-  rte_cryptodev_dp_submit_done (cet->dp_service, frame->n_elts);
-  cet->inflight += frame->n_elts;
+  status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
+  if (PREDICT_FALSE (status < 0))
+    {
+      cryptodev_reset_ctx (cet);
+      return -1;
+    }
 
+  cet->inflight += frame->n_elts;
   return 0;
+
+error_exit:
+  cryptodev_mark_frame_err_status (frame,
+                                  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+  cryptodev_reset_ctx (cet);
+  return -1;
 }
 
 static_always_inline int
@@ -596,12 +671,11 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
   vnet_crypto_async_frame_elt_t *fe;
   vlib_buffer_t **b;
   u32 n_elts;
-  cryptodev_key_t *key;
-  u32 last_key_index;
   union rte_crypto_sym_ofs cofs;
   struct rte_crypto_vec *vec;
-  struct rte_crypto_data iv_vec, digest_vec, aad_vec;
-  u8 sess_aad_len;
+  struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
+  u32 last_key_index = ~0;
+  int status;
 
   n_elts = frame->n_elts;
 
@@ -617,32 +691,11 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
   vec = cet->vec;
   fe = frame->elts;
   b = cet->b;
-
   cofs.raw = 0;
 
-  key = pool_elt_at_index (cmt->keys, fe->key_index);
-  last_key_index = fe->key_index;
-  sess_aad_len = (u8) key->keys[op_type]->opaque_data;
-  if (PREDICT_FALSE (sess_aad_len != aad_len))
-    cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY,
-                           fe->key_index, aad_len);
-
-  if (PREDICT_FALSE
-      (rte_cryptodev_dp_configure_service
-       (cet->cryptodev_id, cet->cryptodev_q, RTE_CRYPTO_DP_SYM_AEAD,
-       RTE_CRYPTO_OP_WITH_SESSION,
-       (union rte_cryptodev_session_ctx) key->keys[op_type], cet->dp_service,
-       0) < 0))
-    {
-      cryptodev_mark_frame_err_status (frame,
-                                      VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
-      return -1;
-    }
-
   while (n_elts)
     {
       u32 aad_offset = ((cet->aad_index++) & CRYPTODEV_AAD_MASK) << 4;
-      int status;
       u16 n_seg = 1;
 
       if (n_elts > 1)
@@ -651,28 +704,36 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
          vlib_prefetch_buffer_header (b[1], LOAD);
        }
 
-      if (last_key_index != fe->key_index)
+      if (PREDICT_FALSE (last_key_index != fe->key_index))
        {
-         key = pool_elt_at_index (cmt->keys, fe->key_index);
-         sess_aad_len = (u8) key->keys[op_type]->opaque_data;
-         if (PREDICT_FALSE (sess_aad_len != aad_len))
+         cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
+
+         if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess ==
+                            0))
            {
-             cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY,
-                                     fe->key_index, aad_len);
+             status = cryptodev_session_create (vm, fe->key_index, aad_len);
+             if (PREDICT_FALSE (status < 0))
+               goto error_exit;
            }
-         last_key_index = fe->key_index;
 
-         if (PREDICT_FALSE
-             (rte_cryptodev_dp_configure_service
-              (cet->cryptodev_id, cet->cryptodev_q, RTE_CRYPTO_DP_SYM_AEAD,
-               RTE_CRYPTO_OP_WITH_SESSION,
-               (union rte_cryptodev_session_ctx) key->keys[op_type],
-               cet->dp_service, 1) < 0))
+         if (PREDICT_FALSE ((u8) key->keys[vm->numa_node][op_type]
+                              .crypto_sess->opaque_data != aad_len))
            {
-             cryptodev_mark_frame_err_status (frame,
-                                              VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
-             return -1;
+             cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
+                                     fe->key_index, aad_len);
+             status = cryptodev_session_create (vm, fe->key_index, aad_len);
+             if (PREDICT_FALSE (status < 0))
+               goto error_exit;
            }
+
+         status = rte_cryptodev_configure_raw_dp_ctx (
+           cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
+           RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type],
+           /*is_update */ 1);
+         if (PREDICT_FALSE (status < 0))
+           goto error_exit;
+
+         last_key_index = fe->key_index;
        }
 
       if (cmt->iova_mode == RTE_IOVA_VA)
@@ -680,11 +741,11 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
          vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
          vec[0].iova = pointer_to_uword (vec[0].base);
          vec[0].len = fe->crypto_total_length;
-         iv_vec.base = (void *) fe->iv;
+         iv_vec.va = (void *) fe->iv;
          iv_vec.iova = pointer_to_uword (fe->iv);
-         digest_vec.base = (void *) fe->tag;
+         digest_vec.va = (void *) fe->tag;
          digest_vec.iova = pointer_to_uword (fe->tag);
-         aad_vec.base = (void *) (cet->aad_buf + aad_offset);
+         aad_vec.va = (void *) (cet->aad_buf + aad_offset);
          aad_vec.iova = cet->aad_phy_addr + aad_offset;
        }
       else
@@ -693,11 +754,11 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
          vec[0].iova =
            vlib_buffer_get_pa (vm, b[0]) + fe->crypto_start_offset;
          vec[0].len = fe->crypto_total_length;
-         iv_vec.base = (void *) fe->iv;
+         iv_vec.va = (void *) fe->iv;
          iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
-         aad_vec.base = (void *) (cet->aad_buf + aad_offset);
+         aad_vec.va = (void *) (cet->aad_buf + aad_offset);
          aad_vec.iova = cet->aad_phy_addr + aad_offset;
-         digest_vec.base = (void *) fe->tag;
+         digest_vec.va = (void *) fe->tag;
          digest_vec.iova = vlib_physmem_get_pa (vm, fe->tag);
        }
 
@@ -712,37 +773,39 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
 
       if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
        {
-         vec[0].len = b[0]->current_data +
-           b[0]->current_length - fe->crypto_start_offset;
-         if (cryptodev_frame_build_sgl
-             (vm, cmt->iova_mode, vec, &n_seg, b[0],
-              fe->crypto_total_length - vec[0].len) < 0)
-           {
-             cryptodev_mark_frame_err_status (frame,
-                                              VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
-             return -1;
-           }
+         vec[0].len = b[0]->current_data + b[0]->current_length -
+                      fe->crypto_start_offset;
+         status =
+           cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
+                                      fe->crypto_total_length - vec[0].len);
+         if (status < 0)
+           goto error_exit;
        }
 
       status =
-       rte_cryptodev_dp_submit_single_job (cet->dp_service, vec, n_seg, cofs,
-                                           &iv_vec, &digest_vec, &aad_vec,
-                                           (void *) frame);
+       rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
+                                  &digest_vec, &aad_vec, (void *) frame);
       if (PREDICT_FALSE (status < 0))
-       {
-         cryptodev_mark_frame_err_status (frame,
-                                          VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
-         return -1;
-       }
+       goto error_exit;
+
       fe++;
       b++;
       n_elts--;
     }
 
-  rte_cryptodev_dp_submit_done (cet->dp_service, frame->n_elts);
+  status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
+  if (PREDICT_FALSE (status < 0))
+    goto error_exit;
+
   cet->inflight += frame->n_elts;
 
   return 0;
+
+error_exit:
+  cryptodev_mark_frame_err_status (frame,
+                                  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+  cryptodev_reset_ctx (cet);
+  return -1;
 }
 
 static u32
@@ -777,6 +840,7 @@ cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
   u32 n_cached_frame = rte_ring_count (cet->cached_frame), n_room_left;
   u8 no_job_to_deq = 0;
   u16 inflight = cet->inflight;
+  int dequeue_status;
 
   n_room_left = CRYPTODEV_DEQ_CACHE_SZ - n_cached_frame - 1;
 
@@ -787,6 +851,7 @@ cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
        {
          vnet_crypto_async_frame_t *f;
          void *f_ret;
+         enum rte_crypto_op_status op_status;
          u8 n_left, err, j;
 
          GET_RING_OBJ (cet->cached_frame, i, f);
@@ -805,14 +870,22 @@ cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
 
          for (j = f->n_elts - n_left; j < f->n_elts && inflight; j++)
            {
-             int ret =
-               rte_cryptodev_dp_sym_dequeue_single_job (cet->dp_service,
-                                                        &f_ret);
-             if (ret < 0)
+             int ret;
+             f_ret = rte_cryptodev_raw_dequeue (cet->ctx, &ret, &op_status);
+
+             if (!f_ret)
                break;
-             f->elts[j].status = ret == 1 ? VNET_CRYPTO_OP_STATUS_COMPLETED :
-               VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
-             err |= ret << 7;
+
+             switch (op_status)
+               {
+               case RTE_CRYPTO_OP_STATUS_SUCCESS:
+                 f->elts[j].status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+                 break;
+               default:
+                 f->elts[j].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
+                 err |= 1 << 7;
+               }
+
              inflight--;
            }
 
@@ -853,10 +926,11 @@ cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
   if (!inflight || no_job_to_deq || !n_room_left)
     goto end_deq;
 
-  n_deq = rte_cryptodev_dp_sym_dequeue (cet->dp_service,
-                                       cryptodev_get_frame_n_elts,
-                                       cryptodev_post_dequeue,
-                                       (void **) &frame, 0, &n_success);
+  n_deq = rte_cryptodev_raw_dequeue_burst (cet->ctx,
+                                          cryptodev_get_frame_n_elts,
+                                          cryptodev_post_dequeue,
+                                          (void **) &frame, 0, &n_success,
+                                          &dequeue_status);
   if (!n_deq)
     goto end_deq;
 
@@ -880,10 +954,11 @@ cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
   /* see if we can dequeue more */
   while (inflight && n_room_left && !no_job_to_deq)
     {
-      n_deq = rte_cryptodev_dp_sym_dequeue (cet->dp_service,
-                                           cryptodev_get_frame_n_elts,
-                                           cryptodev_post_dequeue,
-                                           (void **) &frame, 0, &n_success);
+      n_deq = rte_cryptodev_raw_dequeue_burst (cet->ctx,
+                                              cryptodev_get_frame_n_elts,
+                                              cryptodev_post_dequeue,
+                                              (void **) &frame, 0,
+                                              &n_success, &dequeue_status);
       if (!n_deq)
        break;
       inflight -= n_deq;
@@ -897,8 +972,9 @@ cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
 end_deq:
   if (inflight < cet->inflight)
     {
-      rte_cryptodev_dp_dequeue_done (cet->dp_service,
-                                    cet->inflight - inflight);
+      int res =
+       rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
+      ASSERT (res == 0);
       cet->inflight = inflight;
     }
 
@@ -997,8 +1073,7 @@ cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
       cet->cryptodev_id = cinst->dev_id;
       cet->cryptodev_q = cinst->q_id;
-      cet->dp_service = (struct rte_crypto_dp_service_ctx *)
-         cinst->dp_service_buffer;
+      cryptodev_reset_ctx (cet);
       clib_spinlock_unlock (&cmt->tlock);
       break;
     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
@@ -1023,8 +1098,7 @@ cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
       cet->cryptodev_id = cinst->dev_id;
       cet->cryptodev_q = cinst->q_id;
-      cet->dp_service = (struct rte_crypto_dp_service_ctx *)
-         cinst->dp_service_buffer;
+      cryptodev_reset_ctx (cet);
       clib_spinlock_unlock (&cmt->tlock);
       break;
     default:
@@ -1162,48 +1236,6 @@ VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
     .function = cryptodev_set_assignment_fn,
 };
 
-static int
-check_cryptodev_alg_support (u32 dev_id)
-{
-  const struct rte_cryptodev_symmetric_capability *cap;
-  struct rte_cryptodev_sym_capability_idx cap_idx;
-
-#define _(a, b, c, d, e, f) \
-  cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \
-  cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
-  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
-  if (!cap) \
-    return -RTE_CRYPTO_##b##_##c; \
-  else \
-    { \
-      if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \
-       return -RTE_CRYPTO_##b##_##c; \
-      if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \
-       return -RTE_CRYPTO_##b##_##c; \
-      if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \
-       return -RTE_CRYPTO_##b##_##c; \
-    }
-
-  foreach_vnet_aead_crypto_conversion
-#undef _
-
-#define _(a, b, c, d) \
-  cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
-  cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
-  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
-  if (!cap) \
-    return -RTE_CRYPTO_CIPHER_##b; \
-  cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
-  cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \
-  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
-  if (!cap) \
-    return -RTE_CRYPTO_AUTH_##c;
-
-  foreach_cryptodev_link_async_alg
-#undef _
-    return 0;
-}
-
 static u32
 cryptodev_count_queue (u32 numa)
 {
@@ -1214,12 +1246,6 @@ cryptodev_count_queue (u32 numa)
   for (i = 0; i < n_cryptodev; i++)
     {
       rte_cryptodev_info_get (i, &info);
-      if (rte_cryptodev_socket_id (i) != numa)
-       {
-         clib_warning ("DPDK crypto resource %s is in different numa node "
-             "as %u, ignored", info.device->name, numa);
-         continue;
-       }
       q_count += info.max_nb_queue_pairs;
     }
 
@@ -1229,63 +1255,45 @@ cryptodev_count_queue (u32 numa)
 static int
 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
 {
+  struct rte_cryptodev_config cfg;
   struct rte_cryptodev_info info;
-  struct rte_cryptodev *cdev;
   cryptodev_main_t *cmt = &cryptodev_main;
-  cryptodev_numa_data_t *numa_data = vec_elt_at_index (cmt->per_numa_data,
-                                                      vm->numa_node);
-  u32 dp_size = 0;
   u32 i;
   int ret;
 
-  cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
   rte_cryptodev_info_get (cryptodev_id, &info);
 
-  if (!(info.feature_flags & RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE))
+  if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))
     return -1;
 
-  ret = check_cryptodev_alg_support (cryptodev_id);
-  if (ret != 0)
-    return ret;
+  cfg.socket_id = info.device->numa_node;
+  cfg.nb_queue_pairs = info.max_nb_queue_pairs;
 
+  rte_cryptodev_configure (cryptodev_id, &cfg);
 
-
-  /** If the device is already started, we reuse it, otherwise configure
-   *  both the device and queue pair.
-   **/
-  if (!cdev->data->dev_started)
+  for (i = 0; i < info.max_nb_queue_pairs; i++)
     {
-      struct rte_cryptodev_config cfg;
-
-      cfg.socket_id = vm->numa_node;
-      cfg.nb_queue_pairs = info.max_nb_queue_pairs;
+      struct rte_cryptodev_qp_conf qp_cfg;
 
-      rte_cryptodev_configure (cryptodev_id, &cfg);
+      qp_cfg.mp_session = 0;
+      qp_cfg.mp_session_private = 0;
+      qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
 
-      for (i = 0; i < info.max_nb_queue_pairs; i++)
+      ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
+                                           info.device->numa_node);
+      if (ret)
        {
-         struct rte_cryptodev_qp_conf qp_cfg;
-
-         qp_cfg.mp_session = numa_data->sess_pool;
-         qp_cfg.mp_session_private = numa_data->sess_priv_pool;
-         qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
-
-         ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
-                                               vm->numa_node);
-         if (ret)
-           break;
+         clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
+                       cryptodev_id, i, ret);
+         break;
        }
-      if (i != info.max_nb_queue_pairs)
-       return -1;
-
-      /* start the device */
-      rte_cryptodev_start (i);
     }
 
-  ret = rte_cryptodev_get_dp_service_ctx_data_size (cryptodev_id);
-  if (ret < 0)
+  if (i != info.max_nb_queue_pairs)
     return -1;
-  dp_size = ret;
+
+  /* start the device */
+  rte_cryptodev_start (cryptodev_id);
 
   for (i = 0; i < info.max_nb_queue_pairs; i++)
     {
@@ -1294,7 +1302,7 @@ cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
       cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
       cdev_inst->dev_id = cryptodev_id;
       cdev_inst->q_id = i;
-      vec_validate_aligned(cdev_inst->dp_service_buffer, dp_size, 8);
+
       snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
                "%s_q%u", info.device->name, i);
     }
@@ -1315,24 +1323,257 @@ cryptodev_cmp (void *v1, void *v2)
   return 0;
 }
 
+static void
+remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
+                              u32 param_size_max, u32 increment)
+{
+  u32 i = 0;
+  u32 cap_param_size;
+
+  while (i < vec_len (*param_sizes))
+    {
+      u32 found_param = 0;
+      for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
+          cap_param_size += increment)
+       {
+         if ((*param_sizes)[i] == cap_param_size)
+           {
+             found_param = 1;
+             break;
+           }
+         if (increment == 0)
+           break;
+       }
+      if (!found_param)
+       /* no such param_size in cap so delete  this size in temp_cap params */
+       vec_delete (*param_sizes, 1, i);
+      else
+       i++;
+    }
+}
+
+static void
+cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
+{
+  cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
+
+  switch (temp_cap.xform_type)
+    {
+    case RTE_CRYPTO_SYM_XFORM_AUTH:
+      vec_free (temp_cap.auth.digest_sizes);
+      break;
+    case RTE_CRYPTO_SYM_XFORM_CIPHER:
+      vec_free (temp_cap.cipher.key_sizes);
+      break;
+    case RTE_CRYPTO_SYM_XFORM_AEAD:
+      vec_free (temp_cap.aead.key_sizes);
+      vec_free (temp_cap.aead.aad_sizes);
+      vec_free (temp_cap.aead.digest_sizes);
+      break;
+    default:
+      break;
+    }
+  vec_delete (*temp_caps, 1, temp_cap_id);
+}
+
+static u32
+cryptodev_remove_unsupported_param_sizes (
+  cryptodev_capability_t *temp_cap,
+  const struct rte_cryptodev_capabilities *dev_caps)
+{
+  u32 cap_found = 0;
+  const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
+
+  while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
+    {
+      if (cap->sym.xform_type == temp_cap->xform_type)
+       switch (cap->sym.xform_type)
+         {
+         case RTE_CRYPTO_SYM_XFORM_CIPHER:
+           if (cap->sym.cipher.algo == temp_cap->cipher.algo)
+             {
+               remove_unsupported_param_size (
+                 &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
+                 cap->sym.cipher.key_size.max,
+                 cap->sym.cipher.key_size.increment);
+               if (vec_len (temp_cap->cipher.key_sizes) > 0)
+                 cap_found = 1;
+             }
+           break;
+         case RTE_CRYPTO_SYM_XFORM_AUTH:
+           if (cap->sym.auth.algo == temp_cap->auth.algo)
+             {
+               remove_unsupported_param_size (
+                 &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
+                 cap->sym.auth.digest_size.max,
+                 cap->sym.auth.digest_size.increment);
+               if (vec_len (temp_cap->auth.digest_sizes) > 0)
+                 cap_found = 1;
+             }
+           break;
+         case RTE_CRYPTO_SYM_XFORM_AEAD:
+           if (cap->sym.aead.algo == temp_cap->aead.algo)
+             {
+               remove_unsupported_param_size (
+                 &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
+                 cap->sym.aead.key_size.max,
+                 cap->sym.aead.key_size.increment);
+               remove_unsupported_param_size (
+                 &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
+                 cap->sym.aead.aad_size.max,
+                 cap->sym.aead.aad_size.increment);
+               remove_unsupported_param_size (
+                 &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
+                 cap->sym.aead.digest_size.max,
+                 cap->sym.aead.digest_size.increment);
+               if (vec_len (temp_cap->aead.key_sizes) > 0 &&
+                   vec_len (temp_cap->aead.aad_sizes) > 0 &&
+                   vec_len (temp_cap->aead.digest_sizes) > 0)
+                 cap_found = 1;
+             }
+           break;
+         default:
+           break;
+         }
+      if (cap_found)
+       break;
+      cap++;
+    }
+
+  return cap_found;
+}
+
+static void
+cryptodev_get_common_capabilities ()
+{
+  cryptodev_main_t *cmt = &cryptodev_main;
+  cryptodev_inst_t *dev_inst;
+  struct rte_cryptodev_info dev_info;
+  u32 previous_dev_id, dev_id;
+  u32 cap_id = 0;
+  u32 param;
+  cryptodev_capability_t tmp_cap;
+  const struct rte_cryptodev_capabilities *cap;
+  const struct rte_cryptodev_capabilities *dev_caps;
+
+  if (vec_len (cmt->cryptodev_inst) == 0)
+    return;
+  dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
+  rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
+  cap = &dev_info.capabilities[0];
+
+  /*init capabilities vector*/
+  while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
+    {
+      ASSERT (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+      tmp_cap.xform_type = cap->sym.xform_type;
+      switch (cap->sym.xform_type)
+       {
+       case RTE_CRYPTO_SYM_XFORM_CIPHER:
+         tmp_cap.cipher.key_sizes = 0;
+         tmp_cap.cipher.algo = cap->sym.cipher.algo;
+         for (param = cap->sym.cipher.key_size.min;
+              param <= cap->sym.cipher.key_size.max;
+              param += cap->sym.cipher.key_size.increment)
+           {
+             vec_add1 (tmp_cap.cipher.key_sizes, param);
+             if (cap->sym.cipher.key_size.increment == 0)
+               break;
+           }
+         break;
+       case RTE_CRYPTO_SYM_XFORM_AUTH:
+         tmp_cap.auth.algo = cap->sym.auth.algo;
+         tmp_cap.auth.digest_sizes = 0;
+         for (param = cap->sym.auth.digest_size.min;
+              param <= cap->sym.auth.digest_size.max;
+              param += cap->sym.auth.digest_size.increment)
+           {
+             vec_add1 (tmp_cap.auth.digest_sizes, param);
+             if (cap->sym.auth.digest_size.increment == 0)
+               break;
+           }
+         break;
+       case RTE_CRYPTO_SYM_XFORM_AEAD:
+         tmp_cap.aead.key_sizes = 0;
+         tmp_cap.aead.aad_sizes = 0;
+         tmp_cap.aead.digest_sizes = 0;
+         tmp_cap.aead.algo = cap->sym.aead.algo;
+         for (param = cap->sym.aead.key_size.min;
+              param <= cap->sym.aead.key_size.max;
+              param += cap->sym.aead.key_size.increment)
+           {
+             vec_add1 (tmp_cap.aead.key_sizes, param);
+             if (cap->sym.aead.key_size.increment == 0)
+               break;
+           }
+         for (param = cap->sym.aead.aad_size.min;
+              param <= cap->sym.aead.aad_size.max;
+              param += cap->sym.aead.aad_size.increment)
+           {
+             vec_add1 (tmp_cap.aead.aad_sizes, param);
+             if (cap->sym.aead.aad_size.increment == 0)
+               break;
+           }
+         for (param = cap->sym.aead.digest_size.min;
+              param <= cap->sym.aead.digest_size.max;
+              param += cap->sym.aead.digest_size.increment)
+           {
+             vec_add1 (tmp_cap.aead.digest_sizes, param);
+             if (cap->sym.aead.digest_size.increment == 0)
+               break;
+           }
+         break;
+       default:
+         break;
+       }
+
+      vec_add1 (cmt->supported_caps, tmp_cap);
+      cap++;
+    }
+
+  while (cap_id < vec_len (cmt->supported_caps))
+    {
+      u32 cap_is_supported = 1;
+      previous_dev_id = cmt->cryptodev_inst->dev_id;
+
+      vec_foreach (dev_inst, cmt->cryptodev_inst)
+       {
+         dev_id = dev_inst->dev_id;
+         if (previous_dev_id != dev_id)
+           {
+             previous_dev_id = dev_id;
+             rte_cryptodev_info_get (dev_id, &dev_info);
+             dev_caps = &dev_info.capabilities[0];
+             cap_is_supported = cryptodev_remove_unsupported_param_sizes (
+               &cmt->supported_caps[cap_id], dev_caps);
+             if (!cap_is_supported)
+               {
+                 cryptodev_delete_cap (&cmt->supported_caps, cap_id);
+                 /*no need to check other devices as this one doesn't support
+                  * this temp_cap*/
+                 break;
+               }
+           }
+       }
+      if (cap_is_supported)
+       cap_id++;
+    }
+}
+
 static int
 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
 {
   cryptodev_main_t *cmt = &cryptodev_main;
   u32 n_queues = cryptodev_count_queue (vm->numa_node);
   u32 i;
-  int ret;
 
   if (n_queues < n_workers)
     return -1;
 
   for (i = 0; i < rte_cryptodev_count (); i++)
-    {
-      ret = cryptodev_configure (vm, i);
-      if (ret)
-       continue;
-    }
+    cryptodev_configure (vm, i);
 
+  cryptodev_get_common_capabilities ();
   vec_sort_with_function(cmt->cryptodev_inst, cryptodev_cmp);
 
   /* if there is not enough device stop cryptodev */
@@ -1342,22 +1583,24 @@ cryptodev_probe (vlib_main_t *vm, u32 n_workers)
   return 0;
 }
 
-static int
-cryptodev_get_session_sz (vlib_main_t *vm, u32 n_workers)
+static void
+cryptodev_get_max_sz (u32 *max_sess_sz, u32 *max_dp_sz)
 {
-  u32 sess_data_sz = 0, i;
-
-  if (rte_cryptodev_count () == 0)
-    return -1;
+  cryptodev_main_t *cmt = &cryptodev_main;
+  cryptodev_inst_t *cinst;
+  u32 max_sess = 0, max_dp = 0;
 
-  for (i = 0; i < rte_cryptodev_count (); i++)
+  vec_foreach (cinst, cmt->cryptodev_inst)
     {
-      u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i);
+      u32 sess_sz = rte_cryptodev_sym_get_private_session_size (cinst->dev_id);
+      u32 dp_sz = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
 
-      sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
+      max_sess = clib_max (sess_sz, max_sess);
+      max_dp = clib_max (dp_sz, max_dp);
     }
 
-  return sess_data_sz;
+  *max_sess_sz = max_sess;
+  *max_dp_sz = max_dp;
 }
 
 static void
@@ -1381,7 +1624,74 @@ dpdk_disable_cryptodev_engine (vlib_main_t * vm)
        rte_free (ptd->aad_buf);
       if (ptd->cached_frame)
        rte_ring_free (ptd->cached_frame);
+      if (ptd->reset_sess.crypto_sess)
+       {
+         struct rte_mempool *mp =
+           rte_mempool_from_obj ((void *) ptd->reset_sess.crypto_sess);
+
+         rte_mempool_free (mp);
+         ptd->reset_sess.crypto_sess = 0;
+       }
+    }
+}
+
+static clib_error_t *
+create_reset_sess (cryptodev_engine_thread_t *ptd, u32 lcore, u32 numa,
+                  u32 sess_sz)
+{
+  struct rte_crypto_sym_xform xform = { 0 };
+  struct rte_crypto_aead_xform *aead_xform = &xform.aead;
+  struct rte_cryptodev_sym_session *sess;
+  struct rte_mempool *mp = 0;
+  u8 key[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+  u8 *name = 0;
+  clib_error_t *error = 0;
+
+  /* create session pool for the numa node */
+  name = format (0, "vcryptodev_s_reset_%u_%u", numa, lcore);
+  mp = rte_cryptodev_sym_session_pool_create ((char *) name, 2, sess_sz, 0, 0,
+                                             numa);
+  if (!mp)
+    {
+      error = clib_error_return (0, "Not enough memory for mp %s", name);
+      goto error_exit;
     }
+  vec_free (name);
+
+  xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+  aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
+  aead_xform->op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+  aead_xform->aad_length = 8;
+  aead_xform->digest_length = 16;
+  aead_xform->iv.offset = 0;
+  aead_xform->iv.length = 12;
+  aead_xform->key.data = key;
+  aead_xform->key.length = 16;
+
+  sess = rte_cryptodev_sym_session_create (mp);
+  if (!sess)
+    {
+      error = clib_error_return (0, "failed to create session");
+      goto error_exit;
+    }
+
+  if (rte_cryptodev_sym_session_init (ptd->cryptodev_id, sess, &xform, mp) < 0)
+    {
+      error = clib_error_return (0, "failed to create session private");
+      goto error_exit;
+    }
+
+  ptd->reset_sess.crypto_sess = sess;
+
+  return 0;
+
+error_exit:
+  if (mp)
+    rte_mempool_free (mp);
+  if (name)
+    vec_free (name);
+
+  return error;
 }
 
 clib_error_t *
@@ -1395,52 +1705,18 @@ dpdk_cryptodev_init (vlib_main_t * vm)
   u32 skip_master = vlib_num_workers () > 0;
   u32 n_workers = tm->n_vlib_mains - skip_master;
   u32 numa = vm->numa_node;
-  i32 sess_sz;
+  u32 sess_sz, dp_sz;
   u32 eidx;
   u32 i;
   u8 *name = 0;
   clib_error_t *error;
+  struct rte_cryptodev_sym_capability_idx cap_auth_idx;
+  struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
+  struct rte_cryptodev_sym_capability_idx cap_aead_idx;
 
   cmt->iova_mode = rte_eal_iova_mode ();
 
-  sess_sz = cryptodev_get_session_sz(vm, n_workers);
-  if (sess_sz < 0)
-    {
-      error = clib_error_return (0, "Not enough cryptodevs");
-      return error;
-    }
-
   vec_validate (cmt->per_numa_data, vm->numa_node);
-  numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
-
-  /* create session pool for the numa node */
-  name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
-  mp = rte_cryptodev_sym_session_pool_create ((char *) name,
-                                             CRYPTODEV_NB_SESSION,
-                                             0, 0, 0, numa);
-  if (!mp)
-    {
-      error = clib_error_return (0, "Not enough memory for mp %s", name);
-      goto err_handling;
-    }
-  vec_free (name);
-
-  numa_data->sess_pool = mp;
-
-  /* create session private pool for the numa node */
-  name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
-  mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
-                          0, NULL, NULL, NULL, NULL, numa, 0);
-  if (!mp)
-    {
-      error = clib_error_return (0, "Not enough memory for mp %s", name);
-      vec_free (name);
-      goto err_handling;
-    }
-
-  vec_free (name);
-
-  numa_data->sess_priv_pool = mp;
 
   /* probe all cryptodev devices and get queue info */
   if (cryptodev_probe (vm, n_workers) < 0)
@@ -1449,6 +1725,8 @@ dpdk_cryptodev_init (vlib_main_t * vm)
       goto err_handling;
     }
 
+  cryptodev_get_max_sz (&sess_sz, &dp_sz);
+
   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
   clib_spinlock_init (&cmt->tlock);
 
@@ -1457,11 +1735,13 @@ dpdk_cryptodev_init (vlib_main_t * vm)
   for (i = skip_master; i < tm->n_vlib_mains; i++)
     {
       ptd = cmt->per_thread_data + i;
-      cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
+      numa = vlib_get_main_by_index (i)->numa_node;
+
       ptd->aad_buf = rte_zmalloc_socket (0, CRYPTODEV_NB_CRYPTO_OPS *
                                         CRYPTODEV_MAX_AAD_SIZE,
                                         CLIB_CACHE_LINE_BYTES,
                                         numa);
+
       if (ptd->aad_buf == 0)
        {
          error = clib_error_return (0, "Failed to alloc aad buf");
@@ -1470,6 +1750,13 @@ dpdk_cryptodev_init (vlib_main_t * vm)
 
       ptd->aad_phy_addr = rte_malloc_virt2iova (ptd->aad_buf);
 
+      ptd->ctx = rte_zmalloc_socket (0, dp_sz, CLIB_CACHE_LINE_BYTES, numa);
+      if (!ptd->ctx)
+       {
+         error = clib_error_return (0, "Failed to alloc raw dp ctx");
+         goto err_handling;
+       }
+
       name = format (0, "cache_frame_ring_%u%u", numa, i);
       ptd->cached_frame = rte_ring_create ((char *)name,
                                           CRYPTODEV_DEQ_CACHE_SZ, numa,
@@ -1477,43 +1764,102 @@ dpdk_cryptodev_init (vlib_main_t * vm)
 
       if (ptd->cached_frame == 0)
        {
-         error = clib_error_return (0, "Failed to frame ring");
+         error = clib_error_return (0, "Failed to alloc frame ring");
          goto err_handling;
        }
       vec_free (name);
+
+      vec_validate (cmt->per_numa_data, numa);
+      numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
+
+      if (!numa_data->sess_pool)
+       {
+         /* create session pool for the numa node */
+         name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
+         mp = rte_cryptodev_sym_session_pool_create (
+           (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
+         if (!mp)
+           {
+             error =
+               clib_error_return (0, "Not enough memory for mp %s", name);
+             goto err_handling;
+           }
+         vec_free (name);
+
+         numa_data->sess_pool = mp;
+
+         /* create session private pool for the numa node */
+         name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
+         mp =
+           rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz,
+                               0, 0, NULL, NULL, NULL, NULL, numa, 0);
+         if (!mp)
+           {
+             error =
+               clib_error_return (0, "Not enough memory for mp %s", name);
+             vec_free (name);
+             goto err_handling;
+           }
+
+         vec_free (name);
+
+         numa_data->sess_priv_pool = mp;
+       }
+
+      error = create_reset_sess (ptd, i, numa, sess_sz);
+      if (error)
+       goto err_handling;
+
+      cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
     }
 
   /* register handler */
-  eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79,
-                                      "DPDK Cryptodev Engine");
-
-#define _(a, b, c, d, e, f) \
-  vnet_crypto_register_async_handler \
-    (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
-       cryptodev_enqueue_gcm_aad_##f##_enc,\
-       cryptodev_frame_dequeue); \
-  vnet_crypto_register_async_handler \
-    (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
-       cryptodev_enqueue_gcm_aad_##f##_dec, \
-       cryptodev_frame_dequeue);
+  eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
+                                     "DPDK Cryptodev Engine");
+
+#define _(a, b, c, d, e, f, g)                                                \
+  cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;                              \
+  cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c;                              \
+  if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f))                   \
+    {                                                                         \
+      vnet_crypto_register_async_handler (                                    \
+       vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC,                 \
+       cryptodev_enqueue_gcm_aad_##f##_enc, cryptodev_frame_dequeue);        \
+      vnet_crypto_register_async_handler (                                    \
+       vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC,                 \
+       cryptodev_enqueue_gcm_aad_##f##_dec, cryptodev_frame_dequeue);        \
+    }
 
   foreach_vnet_aead_crypto_conversion
 #undef _
-
-#define _(a, b, c, d) \
-  vnet_crypto_register_async_handler \
-    (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \
-       cryptodev_enqueue_linked_alg_enc, \
-       cryptodev_frame_dequeue); \
-  vnet_crypto_register_async_handler \
-    (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \
-       cryptodev_enqueue_linked_alg_dec, \
-       cryptodev_frame_dequeue);
+/* clang-format off */
+#define _(a, b, c, d, e)                                                      \
+  cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;                              \
+  cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC;                        \
+  cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;                          \
+  cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b;                         \
+  if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) &&             \
+      cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1))                 \
+    {                                                                         \
+      vnet_crypto_register_async_handler (                                    \
+       vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC,                    \
+       cryptodev_enqueue_linked_alg_enc, cryptodev_frame_dequeue);           \
+      vnet_crypto_register_async_handler (                                    \
+       vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC,                    \
+       cryptodev_enqueue_linked_alg_dec, cryptodev_frame_dequeue);           \
+    }
 
     foreach_cryptodev_link_async_alg
 #undef _
 
   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
+  /* clang-format on */
+
+  /* this engine is only enabled when cryptodev device(s) are presented in
+   * startup.conf. Assume it is wanted to be used, turn on async mode here.
+   */
+  vnet_crypto_request_async_mode (1);
+  ipsec_set_async_mode (1);
 
   return 0;