crypto-ipsecmb: bump intel-ipsec-mb version to 1.4
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
index 3c5495a..9d87410 100644 (file)
 #include <vnet/crypto/crypto.h>
 #include <vppinfra/cpu.h>
 
-#define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE
+#define HMAC_MAX_BLOCK_SIZE  IMB_SHA_512_BLOCK_SIZE
 #define EXPANDED_KEY_N_BYTES (16 * 15)
 
 typedef struct
 {
   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
-  MB_MGR *mgr;
-  __m128i cbc_iv;
+  IMB_MGR *mgr;
+#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
+  IMB_JOB burst_jobs[IMB_MAX_BURST_SIZE];
+#endif
 } ipsecmb_per_thread_data_t;
 
 typedef struct
@@ -56,27 +58,31 @@ typedef struct
 {
   u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
   u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
-} ipsecmb_aes_cbc_key_data_t;
+} ipsecmb_aes_key_data_t;
 
 static ipsecmb_main_t ipsecmb_main = { };
 
+/* clang-format off */
 /*
  * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
  */
 #define foreach_ipsecmb_hmac_op                                \
-  _(SHA1,   SHA1,    sha1,   64,  20, 20)                      \
+  _(SHA1,   SHA_1,   sha1,   64,  20, 20)                      \
   _(SHA224, SHA_224, sha224, 64,  32, 28)                      \
   _(SHA256, SHA_256, sha256, 64,  32, 32)                      \
   _(SHA384, SHA_384, sha384, 128, 64, 48)                      \
   _(SHA512, SHA_512, sha512, 128, 64, 64)
 
 /*
- * (Alg, key-len-bits)
+ * (Alg, key-len-bits, JOB_CIPHER_MODE)
  */
-#define foreach_ipsecmb_cbc_cipher_op                          \
-  _(AES_128_CBC, 128)                                          \
-  _(AES_192_CBC, 192)                                          \
-  _(AES_256_CBC, 256)
+#define foreach_ipsecmb_cipher_op                                             \
+  _ (AES_128_CBC, 128, CBC)                                                   \
+  _ (AES_192_CBC, 192, CBC)                                                   \
+  _ (AES_256_CBC, 256, CBC)                                                   \
+  _ (AES_128_CTR, 128, CNTR)                                                  \
+  _ (AES_192_CTR, 192, CNTR)                                                  \
+  _ (AES_256_CTR, 256, CNTR)
 
 /*
  * (Alg, key-len-bytes, iv-len-bytes)
@@ -85,16 +91,36 @@ static ipsecmb_main_t ipsecmb_main = { };
   _(AES_128_GCM, 128)                                          \
   _(AES_192_GCM, 192)                                          \
   _(AES_256_GCM, 256)
+/* clang-format on */
+static_always_inline vnet_crypto_op_status_t
+ipsecmb_status_job (IMB_STATUS status)
+{
+  switch (status)
+    {
+    case IMB_STATUS_COMPLETED:
+      return VNET_CRYPTO_OP_STATUS_COMPLETED;
+    case IMB_STATUS_BEING_PROCESSED:
+    case IMB_STATUS_COMPLETED_CIPHER:
+    case IMB_STATUS_COMPLETED_AUTH:
+      return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS;
+    case IMB_STATUS_INVALID_ARGS:
+    case IMB_STATUS_INTERNAL_ERROR:
+    case IMB_STATUS_ERROR:
+      return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
+    }
+  ASSERT (0);
+  return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
+}
 
 always_inline void
-ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
+ipsecmb_retire_hmac_job (IMB_JOB *job, u32 *n_fail, u32 digest_size)
 {
   vnet_crypto_op_t *op = job->user_data;
   u32 len = op->digest_len ? op->digest_len : digest_size;
 
-  if (STS_COMPLETED != job->status)
+  if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
     {
-      op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+      op->status = ipsecmb_status_job (job->status);
       *n_fail = *n_fail + 1;
       return;
     }
@@ -116,15 +142,71 @@ ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
 }
 
+#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
+static_always_inline u32
+ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
+                        u32 block_size, u32 hash_size, u32 digest_size,
+                        IMB_HASH_ALG alg)
+{
+  ipsecmb_main_t *imbm = &ipsecmb_main;
+  ipsecmb_per_thread_data_t *ptd =
+    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
+  IMB_JOB *job;
+  u32 i, n_fail = 0, ops_index = 0;
+  u8 scratch[n_ops][digest_size];
+  const u32 burst_sz =
+    (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
+
+  while (n_ops)
+    {
+      const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
+      /*
+       * configure all the jobs first ...
+       */
+      for (i = 0; i < n; i++, ops_index++)
+       {
+         vnet_crypto_op_t *op = ops[ops_index];
+         const u8 *kd = (u8 *) imbm->key_data[op->key_index];
+
+         job = &ptd->burst_jobs[i];
+
+         job->src = op->src;
+         job->hash_start_src_offset_in_bytes = 0;
+         job->msg_len_to_hash_in_bytes = op->len;
+         job->auth_tag_output_len_in_bytes = digest_size;
+         job->auth_tag_output = scratch[ops_index];
+
+         job->u.HMAC._hashed_auth_key_xor_ipad = kd;
+         job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
+         job->user_data = op;
+       }
+
+      /*
+       * submit all jobs to be processed and retire completed jobs
+       */
+      IMB_SUBMIT_HASH_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, alg);
+
+      for (i = 0; i < n; i++)
+       {
+         job = &ptd->burst_jobs[i];
+         ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
+       }
+
+      n_ops -= n;
+    }
+
+  return ops_index - n_fail;
+}
+#else
 static_always_inline u32
-ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
-                        u32 n_ops, u32 block_size, u32 hash_size,
-                        u32 digest_size, JOB_HASH_ALG alg)
+ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
+                        u32 block_size, u32 hash_size, u32 digest_size,
+                        JOB_HASH_ALG alg)
 {
   ipsecmb_main_t *imbm = &ipsecmb_main;
-  ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
-                                                    vm->thread_index);
-  JOB_AES_HMAC *job;
+  ipsecmb_per_thread_data_t *ptd =
+    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
+  IMB_JOB *job;
   u32 i, n_fail = 0;
   u8 scratch[n_ops][digest_size];
 
@@ -145,9 +227,9 @@ ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
       job->auth_tag_output_len_in_bytes = digest_size;
       job->auth_tag_output = scratch[i];
 
-      job->cipher_mode = NULL_CIPHER;
-      job->cipher_direction = DECRYPT;
-      job->chain_order = HASH_CIPHER;
+      job->cipher_mode = IMB_CIPHER_NULL;
+      job->cipher_direction = IMB_DIR_DECRYPT;
+      job->chain_order = IMB_ORDER_HASH_CIPHER;
 
       job->u.HMAC._hashed_auth_key_xor_ipad = kd;
       job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
@@ -164,48 +246,108 @@ ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
 
   return n_ops - n_fail;
 }
+#endif
 
+/* clang-format off */
 #define _(a, b, c, d, e, f)                                             \
 static_always_inline u32                                                \
 ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
                       vnet_crypto_op_t * ops[],                         \
                       u32 n_ops)                                        \
-{ return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); }        \
+{ return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f,              \
+               IMB_AUTH_HMAC_##b); }                                   \
 
 foreach_ipsecmb_hmac_op;
 #undef _
+/* clang-format on */
 
 always_inline void
-ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
+ipsecmb_retire_cipher_job (IMB_JOB *job, u32 *n_fail)
 {
   vnet_crypto_op_t *op = job->user_data;
 
-  if (STS_COMPLETED != job->status)
+  if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
     {
-      op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+      op->status = ipsecmb_status_job (job->status);
       *n_fail = *n_fail + 1;
     }
   else
     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
 }
 
+#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
+static_always_inline u32
+ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+                              u32 n_ops, u32 key_len,
+                              IMB_CIPHER_DIRECTION direction,
+                              IMB_CIPHER_MODE cipher_mode)
+{
+  ipsecmb_main_t *imbm = &ipsecmb_main;
+  ipsecmb_per_thread_data_t *ptd =
+    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
+  IMB_JOB *job;
+  u32 i, n_fail = 0, ops_index = 0;
+  const u32 burst_sz =
+    (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
+
+  while (n_ops)
+    {
+      const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
+
+      for (i = 0; i < n; i++)
+       {
+         ipsecmb_aes_key_data_t *kd;
+         vnet_crypto_op_t *op = ops[ops_index++];
+         kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
+
+         job = &ptd->burst_jobs[i];
+
+         job->src = op->src;
+         job->dst = op->dst;
+         job->msg_len_to_cipher_in_bytes = op->len;
+         job->cipher_start_src_offset_in_bytes = 0;
+
+         job->hash_alg = IMB_AUTH_NULL;
+
+         job->enc_keys = kd->enc_key_exp;
+         job->dec_keys = kd->dec_key_exp;
+         job->iv = op->iv;
+         job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE;
+
+         job->user_data = op;
+       }
+
+      IMB_SUBMIT_CIPHER_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n,
+                                      cipher_mode, direction, key_len / 8);
+      for (i = 0; i < n; i++)
+       {
+         job = &ptd->burst_jobs[i];
+         ipsecmb_retire_cipher_job (job, &n_fail);
+       }
+
+      n_ops -= n;
+    }
+
+  return ops_index - n_fail;
+}
+#else
 static_always_inline u32
-ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
                               u32 n_ops, u32 key_len,
-                              JOB_CIPHER_DIRECTION direction)
+                              JOB_CIPHER_DIRECTION direction,
+                              JOB_CIPHER_MODE cipher_mode)
 {
   ipsecmb_main_t *imbm = &ipsecmb_main;
-  ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
-                                                    vm->thread_index);
-  JOB_AES_HMAC *job;
+  ipsecmb_per_thread_data_t *ptd =
+    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
+  IMB_JOB *job;
   u32 i, n_fail = 0;
 
   for (i = 0; i < n_ops; i++)
     {
-      ipsecmb_aes_cbc_key_data_t *kd;
+      ipsecmb_aes_key_data_t *kd;
       vnet_crypto_op_t *op = ops[i];
-      kd = (ipsecmb_aes_cbc_key_data_t *) imbm->key_data[op->key_index];
-      __m128i iv;
+      kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
 
       job = IMB_GET_NEXT_JOB (ptd->mgr);
 
@@ -214,23 +356,18 @@ ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
       job->msg_len_to_cipher_in_bytes = op->len;
       job->cipher_start_src_offset_in_bytes = 0;
 
-      job->hash_alg = NULL_HASH;
-      job->cipher_mode = CBC;
+      job->hash_alg = IMB_AUTH_NULL;
+      job->cipher_mode = cipher_mode;
       job->cipher_direction = direction;
-      job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
-
-      if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
-       {
-         iv = ptd->cbc_iv;
-         _mm_storeu_si128 ((__m128i *) op->iv, iv);
-         ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
-       }
+      job->chain_order =
+       (direction == IMB_DIR_ENCRYPT ? IMB_ORDER_CIPHER_HASH :
+                                             IMB_ORDER_HASH_CIPHER);
 
       job->aes_key_len_in_bytes = key_len / 8;
-      job->aes_enc_key_expanded = kd->enc_key_exp;
-      job->aes_dec_key_expanded = kd->dec_key_exp;
+      job->enc_keys = kd->enc_key_exp;
+      job->dec_keys = kd->dec_key_exp;
       job->iv = op->iv;
-      job->iv_len_in_bytes = AES_BLOCK_SIZE;
+      job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE;
 
       job->user_data = op;
 
@@ -245,21 +382,25 @@ ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
 
   return n_ops - n_fail;
 }
+#endif
+
+/* clang-format off */
+#define _(a, b, c)                                                            \
+  static_always_inline u32 ipsecmb_ops_cipher_enc_##a (                       \
+    vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
+  {                                                                           \
+    return ipsecmb_ops_aes_cipher_inline (                                    \
+                    vm, ops, n_ops, b, IMB_DIR_ENCRYPT, IMB_CIPHER_##c);      \
+  }                                                                           \
+                                                                              \
+  static_always_inline u32 ipsecmb_ops_cipher_dec_##a (                       \
+    vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
+  {                                                                           \
+    return ipsecmb_ops_aes_cipher_inline (                                    \
+                   vm, ops, n_ops, b, IMB_DIR_DECRYPT, IMB_CIPHER_##c);       \
+  }
 
-#define _(a, b)                                                              \
-static_always_inline u32                                                     \
-ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm,                            \
-                                vnet_crypto_op_t * ops[],                    \
-                                u32 n_ops)                                   \
-{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, ENCRYPT); }       \
-                                                                             \
-static_always_inline u32                                                     \
-ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm,                            \
-                                vnet_crypto_op_t * ops[],                    \
-                                u32 n_ops)                                   \
-{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, DECRYPT); }       \
-
-foreach_ipsecmb_cbc_cipher_op;
+foreach_ipsecmb_cipher_op;
 #undef _
 
 #define _(a, b)                                                              \
@@ -270,7 +411,7 @@ ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm,                  \
   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
                                                      vm->thread_index);      \
-  MB_MGR *m = ptd->mgr;                                                      \
+  IMB_MGR *m = ptd->mgr;                                                     \
   vnet_crypto_op_chunk_t *chp;                                               \
   u32 i, j;                                                                  \
                                                                              \
@@ -305,7 +446,7 @@ ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
                                                      vm->thread_index);      \
-  MB_MGR *m = ptd->mgr;                                                      \
+  IMB_MGR *m = ptd->mgr;                                                     \
   u32 i;                                                                     \
                                                                              \
   for (i = 0; i < n_ops; i++)                                                \
@@ -331,7 +472,7 @@ ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm,                  \
   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
                                                      vm->thread_index);      \
-  MB_MGR *m = ptd->mgr;                                                      \
+  IMB_MGR *m = ptd->mgr;                                                     \
   vnet_crypto_op_chunk_t *chp;                                               \
   u32 i, j, n_failed = 0;                                                    \
                                                                              \
@@ -373,7 +514,7 @@ ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
                                                      vm->thread_index);      \
-  MB_MGR *m = ptd->mgr;                                                      \
+  IMB_MGR *m = ptd->mgr;                                                     \
   u32 i, n_failed = 0;                                                       \
                                                                              \
   for (i = 0; i < n_ops; i++)                                                \
@@ -398,33 +539,227 @@ ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
                                                                              \
   return n_ops - n_failed;                                                   \
 }
-
+/* clang-format on */
 foreach_ipsecmb_gcm_cipher_op;
 #undef _
 
-clib_error_t *
-crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
+#ifdef HAVE_IPSECMB_CHACHA_POLY
+always_inline void
+ipsecmb_retire_aead_job (IMB_JOB *job, u32 *n_fail)
 {
-  ipsecmb_per_thread_data_t *ptd;
-  clib_error_t *err = 0;
-  int fd;
+  vnet_crypto_op_t *op = job->user_data;
+  u32 len = op->tag_len;
+
+  if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
+    {
+      op->status = ipsecmb_status_job (job->status);
+      *n_fail = *n_fail + 1;
+      return;
+    }
+
+  if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+    {
+      if (memcmp (op->tag, job->auth_tag_output, len))
+       {
+         *n_fail = *n_fail + 1;
+         op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+         return;
+       }
+    }
 
-  if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
-    return clib_error_return_unix (0, "failed to open '/dev/urandom'");
+  clib_memcpy_fast (op->tag, job->auth_tag_output, len);
 
-  vec_foreach (ptd, imbm->per_thread_data)
-  {
-    if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
-      {
-       err = clib_error_return_unix (0, "'/dev/urandom' read failure");
-       close (fd);
-       return (err);
-      }
-  }
+  op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+}
 
-  close (fd);
-  return (NULL);
+static_always_inline u32
+ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
+                        IMB_CIPHER_DIRECTION dir)
+{
+  ipsecmb_main_t *imbm = &ipsecmb_main;
+  ipsecmb_per_thread_data_t *ptd =
+    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
+  struct IMB_JOB *job;
+  IMB_MGR *m = ptd->mgr;
+  u32 i, n_fail = 0, last_key_index = ~0;
+  u8 scratch[VLIB_FRAME_SIZE][16];
+  u8 *key = 0;
+
+  for (i = 0; i < n_ops; i++)
+    {
+      vnet_crypto_op_t *op = ops[i];
+
+      job = IMB_GET_NEXT_JOB (m);
+      if (last_key_index != op->key_index)
+       {
+         vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
+
+         key = kd->data;
+         last_key_index = op->key_index;
+       }
+
+      job->cipher_direction = dir;
+      job->chain_order = IMB_ORDER_HASH_CIPHER;
+      job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
+      job->hash_alg = IMB_AUTH_CHACHA20_POLY1305;
+      job->enc_keys = job->dec_keys = key;
+      job->key_len_in_bytes = 32;
+
+      job->u.CHACHA20_POLY1305.aad = op->aad;
+      job->u.CHACHA20_POLY1305.aad_len_in_bytes = op->aad_len;
+      job->src = op->src;
+      job->dst = op->dst;
+
+      job->iv = op->iv;
+      job->iv_len_in_bytes = 12;
+      job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes =
+       op->len;
+      job->cipher_start_src_offset_in_bytes =
+       job->hash_start_src_offset_in_bytes = 0;
+
+      job->auth_tag_output = scratch[i];
+      job->auth_tag_output_len_in_bytes = 16;
+
+      job->user_data = op;
+
+      job = IMB_SUBMIT_JOB_NOCHECK (ptd->mgr);
+      if (job)
+       ipsecmb_retire_aead_job (job, &n_fail);
+
+      op++;
+    }
+
+  while ((job = IMB_FLUSH_JOB (ptd->mgr)))
+    ipsecmb_retire_aead_job (job, &n_fail);
+
+  return n_ops - n_fail;
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly_enc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+                            u32 n_ops)
+{
+  return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_ENCRYPT);
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly_dec (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+                            u32 n_ops)
+{
+  return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_DECRYPT);
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+                                vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+                                IMB_CIPHER_DIRECTION dir)
+{
+  ipsecmb_main_t *imbm = &ipsecmb_main;
+  ipsecmb_per_thread_data_t *ptd =
+    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
+  IMB_MGR *m = ptd->mgr;
+  u32 i, n_fail = 0, last_key_index = ~0;
+  u8 *key = 0;
+
+  if (dir == IMB_DIR_ENCRYPT)
+    {
+      for (i = 0; i < n_ops; i++)
+       {
+         vnet_crypto_op_t *op = ops[i];
+         struct chacha20_poly1305_context_data ctx;
+         vnet_crypto_op_chunk_t *chp;
+         u32 j;
+
+         ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
+
+         if (last_key_index != op->key_index)
+           {
+             vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
+
+             key = kd->data;
+             last_key_index = op->key_index;
+           }
+
+         IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
+                                     op->aad_len);
+
+         chp = chunks + op->chunk_index;
+         for (j = 0; j < op->n_chunks; j++)
+           {
+             IMB_CHACHA20_POLY1305_ENC_UPDATE (m, key, &ctx, chp->dst,
+                                               chp->src, chp->len);
+             chp += 1;
+           }
+
+         IMB_CHACHA20_POLY1305_ENC_FINALIZE (m, &ctx, op->tag, op->tag_len);
+
+         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+       }
+    }
+  else /* dir == IMB_DIR_DECRYPT */
+    {
+      for (i = 0; i < n_ops; i++)
+       {
+         vnet_crypto_op_t *op = ops[i];
+         struct chacha20_poly1305_context_data ctx;
+         vnet_crypto_op_chunk_t *chp;
+         u8 scratch[16];
+         u32 j;
+
+         ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
+
+         if (last_key_index != op->key_index)
+           {
+             vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
+
+             key = kd->data;
+             last_key_index = op->key_index;
+           }
+
+         IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
+                                     op->aad_len);
+
+         chp = chunks + op->chunk_index;
+         for (j = 0; j < op->n_chunks; j++)
+           {
+             IMB_CHACHA20_POLY1305_DEC_UPDATE (m, key, &ctx, chp->dst,
+                                               chp->src, chp->len);
+             chp += 1;
+           }
+
+         IMB_CHACHA20_POLY1305_DEC_FINALIZE (m, &ctx, scratch, op->tag_len);
+
+         if (memcmp (op->tag, scratch, op->tag_len))
+           {
+             n_fail = n_fail + 1;
+             op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+           }
+         else
+           op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+       }
+    }
+
+  return n_ops - n_fail;
+}
+
+static_always_inline u32
+ipsec_mb_ops_chacha_poly_enc_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+                                     vnet_crypto_op_chunk_t *chunks,
+                                     u32 n_ops)
+{
+  return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
+                                         IMB_DIR_ENCRYPT);
+}
+
+static_always_inline u32
+ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+                                     vnet_crypto_op_chunk_t *chunks,
+                                     u32 n_ops)
+{
+  return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
+                                         IMB_DIR_DECRYPT);
 }
+#endif
 
 static void
 crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
@@ -436,6 +771,10 @@ crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
   u32 i;
   void *kd;
 
+  /** TODO: add linked alg support **/
+  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+    return;
+
   if (kop == VNET_CRYPTO_KEY_OP_DEL)
     {
       if (idx >= vec_len (imbm->key_data))
@@ -459,18 +798,14 @@ crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
       clib_mem_free_s (imbm->key_data[idx]);
     }
 
-  /** TODO: add linked alg support **/
-  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
-    return;
-
   kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
                                                     CLIB_CACHE_LINE_BYTES);
 
   /* AES CBC key expansion */
   if (ad->keyexp)
     {
-      ad->keyexp (key->data, ((ipsecmb_aes_cbc_key_data_t *) kd)->enc_key_exp,
-                 ((ipsecmb_aes_cbc_key_data_t *) kd)->dec_key_exp);
+      ad->keyexp (key->data, ((ipsecmb_aes_key_data_t *) kd)->enc_key_exp,
+                 ((ipsecmb_aes_key_data_t *) kd)->dec_key_exp);
       return;
     }
 
@@ -512,8 +847,7 @@ crypto_ipsecmb_init (vlib_main_t * vm)
   ipsecmb_alg_data_t *ad;
   ipsecmb_per_thread_data_t *ptd;
   vlib_thread_main_t *tm = vlib_get_thread_main ();
-  clib_error_t *error;
-  MB_MGR *m = 0;
+  IMB_MGR *m = 0;
   u32 eidx;
   u8 *name;
 
@@ -534,9 +868,13 @@ crypto_ipsecmb_init (vlib_main_t * vm)
   vec_foreach (ptd, imbm->per_thread_data)
     {
        ptd->mgr = alloc_mb_mgr (0);
-        if (clib_cpu_supports_avx512f ())
+#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
+       clib_memset_u8 (ptd->burst_jobs, 0,
+                       sizeof (IMB_JOB) * IMB_MAX_BURST_SIZE);
+#endif
+       if (clib_cpu_supports_avx512f ())
          init_mb_mgr_avx512 (ptd->mgr);
-        else if (clib_cpu_supports_avx2 ())
+       else if (clib_cpu_supports_avx2 () && clib_cpu_supports_bmi2 ())
          init_mb_mgr_avx2 (ptd->mgr);
        else
          init_mb_mgr_sse (ptd->mgr);
@@ -546,9 +884,6 @@ crypto_ipsecmb_init (vlib_main_t * vm)
     }
   /* *INDENT-ON* */
 
-  if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
-    return (error);
-
 #define _(a, b, c, d, e, f)                                              \
   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
                                     ipsecmb_ops_hmac_##a);               \
@@ -560,16 +895,16 @@ crypto_ipsecmb_init (vlib_main_t * vm)
 
   foreach_ipsecmb_hmac_op;
 #undef _
-#define _(a, b)                                                         \
-  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
-                                    ipsecmb_ops_cbc_cipher_enc_##a);    \
-  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
-                                    ipsecmb_ops_cbc_cipher_dec_##a);    \
-  ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                            \
-  ad->data_size = sizeof (ipsecmb_aes_cbc_key_data_t);                  \
-  ad->keyexp = m->keyexp_##b;                                           \
-
-  foreach_ipsecmb_cbc_cipher_op;
+#define _(a, b, c)                                                            \
+  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,       \
+                                   ipsecmb_ops_cipher_enc_##a);              \
+  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,       \
+                                   ipsecmb_ops_cipher_dec_##a);              \
+  ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                                  \
+  ad->data_size = sizeof (ipsecmb_aes_key_data_t);                            \
+  ad->keyexp = m->keyexp_##b;
+
+  foreach_ipsecmb_cipher_op;
 #undef _
 #define _(a, b)                                                         \
   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
@@ -589,6 +924,23 @@ crypto_ipsecmb_init (vlib_main_t * vm)
   foreach_ipsecmb_gcm_cipher_op;
 #undef _
 
+#ifdef HAVE_IPSECMB_CHACHA_POLY
+  vnet_crypto_register_ops_handler (vm, eidx,
+                                   VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
+                                   ipsecmb_ops_chacha_poly_enc);
+  vnet_crypto_register_ops_handler (vm, eidx,
+                                   VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
+                                   ipsecmb_ops_chacha_poly_dec);
+  vnet_crypto_register_chained_ops_handler (
+    vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
+    ipsec_mb_ops_chacha_poly_enc_chained);
+  vnet_crypto_register_chained_ops_handler (
+    vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
+    ipsec_mb_ops_chacha_poly_dec_chained);
+  ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305;
+  ad->data_size = 0;
+#endif
+
   vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
   return (NULL);
 }