Bump to intel-ipsec-mb version 0.52
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
index 309623e..6d4d913 100644 (file)
@@ -15,6 +15,8 @@
  * limitations under the License.
  */
 
+#include <fcntl.h>
+
 #include <intel-ipsec-mb.h>
 
 #include <vnet/vnet.h>
@@ -26,6 +28,7 @@
 typedef struct
 {
   MB_MGR *mgr;
+  __m128i cbc_iv;
 } ipsecmb_per_thread_data_t;
 
 typedef struct ipsecmb_main_t_
@@ -33,6 +36,26 @@ typedef struct ipsecmb_main_t_
   ipsecmb_per_thread_data_t *per_thread_data;
 } ipsecmb_main_t;
 
+/**
+ * AES GCM key=expansion VFT
+ */
+typedef void (*ase_gcm_pre_t) (const void *key,
+                              struct gcm_key_data * key_data);
+
+typedef struct ipsecmb_gcm_pre_vft_t_
+{
+  ase_gcm_pre_t ase_gcm_pre_128;
+  ase_gcm_pre_t ase_gcm_pre_192;
+  ase_gcm_pre_t ase_gcm_pre_256;
+} ipsecmb_gcm_pre_vft_t;
+
+static ipsecmb_gcm_pre_vft_t ipsecmb_gcm_pre_vft;
+
+#define INIT_IPSEC_MB_GCM_PRE(_arch)                                    \
+  ipsecmb_gcm_pre_vft.ase_gcm_pre_128 = aes_gcm_pre_128_##_arch;        \
+  ipsecmb_gcm_pre_vft.ase_gcm_pre_192 = aes_gcm_pre_192_##_arch;        \
+  ipsecmb_gcm_pre_vft.ase_gcm_pre_256 = aes_gcm_pre_256_##_arch;
+
 static ipsecmb_main_t ipsecmb_main;
 
 #define foreach_ipsecmb_hmac_op                                \
@@ -41,10 +64,21 @@ static ipsecmb_main_t ipsecmb_main;
   _(SHA384, SHA_384, sha384)                                   \
   _(SHA512, SHA_512, sha512)
 
-#define foreach_ipsecmb_cipher_op                              \
-  _(AES_128_CBC, 128)                                          \
-  _(AES_192_CBC, 192)                                          \
-  _(AES_256_CBC, 256)
+/*
+ * (Alg, key-len-bits, key-len-bytes, iv-len-bytes)
+ */
+#define foreach_ipsecmb_cbc_cipher_op                          \
+  _(AES_128_CBC, 128, 16, 16)                                  \
+  _(AES_192_CBC, 192, 24, 16)                                  \
+  _(AES_256_CBC, 256, 32, 16)
+
+/*
+ * (Alg, key-len-bits, key-len-bytes, iv-len-bytes)
+ */
+#define foreach_ipsecmb_gcm_cipher_op                          \
+  _(AES_128_GCM, 128, 16, 12)                                  \
+  _(AES_192_GCM, 192, 24, 12)                                  \
+  _(AES_256_GCM, 256, 32, 12)
 
 always_inline void
 hash_expand_keys (const MB_MGR * mgr,
@@ -199,11 +233,11 @@ ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
 }
 
 static_always_inline u32
-ipsecmb_ops_cipher_inline (vlib_main_t * vm,
-                          const ipsecmb_per_thread_data_t * ptd,
-                          vnet_crypto_op_t * ops[],
-                          u32 n_ops,
-                          keyexp_t fn, JOB_CIPHER_DIRECTION direction)
+ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm,
+                              ipsecmb_per_thread_data_t * ptd,
+                              vnet_crypto_op_t * ops[],
+                              u32 n_ops, u32 key_len, u32 iv_len,
+                              keyexp_t fn, JOB_CIPHER_DIRECTION direction)
 {
   JOB_AES_HMAC *job;
   u32 i, n_fail = 0;
@@ -216,6 +250,7 @@ ipsecmb_ops_cipher_inline (vlib_main_t * vm,
       u8 aes_enc_key_expanded[EXPANDED_KEY_N_BYTES];
       u8 aes_dec_key_expanded[EXPANDED_KEY_N_BYTES];
       vnet_crypto_op_t *op = ops[i];
+      __m128i iv;
 
       fn (op->key, aes_enc_key_expanded, aes_dec_key_expanded);
 
@@ -231,11 +266,18 @@ ipsecmb_ops_cipher_inline (vlib_main_t * vm,
       job->cipher_direction = direction;
       job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
 
-      job->aes_key_len_in_bytes = op->key_len;
+      if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
+       {
+         iv = ptd->cbc_iv;
+         _mm_storeu_si128 ((__m128i *) op->iv, iv);
+         ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
+       }
+
+      job->aes_key_len_in_bytes = key_len;
       job->aes_enc_key_expanded = aes_enc_key_expanded;
       job->aes_dec_key_expanded = aes_dec_key_expanded;
       job->iv = op->iv;
-      job->iv_len_in_bytes = op->iv_len;
+      job->iv_len_in_bytes = iv_len;
 
       job->user_data = op;
 
@@ -257,11 +299,11 @@ ipsecmb_ops_cipher_inline (vlib_main_t * vm,
   return n_ops - n_fail;
 }
 
-#define _(a, b)                                                         \
+#define _(a, b, c, d)                                                   \
 static_always_inline u32                                                \
-ipsecmb_ops_cipher_enc_##a (vlib_main_t * vm,                           \
-                            vnet_crypto_op_t * ops[],                   \
-                            u32 n_ops)                                  \
+ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm,                       \
+                                vnet_crypto_op_t * ops[],               \
+                                u32 n_ops)                              \
 {                                                                       \
   ipsecmb_per_thread_data_t *ptd;                                       \
   ipsecmb_main_t *imbm;                                                 \
@@ -269,18 +311,18 @@ ipsecmb_ops_cipher_enc_##a (vlib_main_t * vm,                           \
   imbm = &ipsecmb_main;                                                 \
   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
                                                                         \
-  return ipsecmb_ops_cipher_inline (vm, ptd, ops, n_ops,                \
-                                    ptd->mgr->keyexp_##b,               \
-                                    ENCRYPT);                           \
+  return ipsecmb_ops_cbc_cipher_inline (vm, ptd, ops, n_ops, c, d,      \
+                                        ptd->mgr->keyexp_##b,           \
+                                        ENCRYPT);                       \
   }
-foreach_ipsecmb_cipher_op;
+foreach_ipsecmb_cbc_cipher_op;
 #undef _
 
-#define _(a, b)                                                         \
+#define _(a, b, c, d)                                                   \
 static_always_inline u32                                                \
-ipsecmb_ops_cipher_dec_##a (vlib_main_t * vm,                           \
-                            vnet_crypto_op_t * ops[],                   \
-                            u32 n_ops)                                  \
+ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm,                       \
+                                vnet_crypto_op_t * ops[],               \
+                                u32 n_ops)                              \
 {                                                                       \
   ipsecmb_per_thread_data_t *ptd;                                       \
   ipsecmb_main_t *imbm;                                                 \
@@ -288,13 +330,187 @@ ipsecmb_ops_cipher_dec_##a (vlib_main_t * vm,                           \
   imbm = &ipsecmb_main;                                                 \
   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
                                                                         \
-  return ipsecmb_ops_cipher_inline (vm, ptd, ops, n_ops,                \
-                                    ptd->mgr->keyexp_##b,               \
-                                    DECRYPT);                           \
+  return ipsecmb_ops_cbc_cipher_inline (vm, ptd, ops, n_ops, c, d,      \
+                                        ptd->mgr->keyexp_##b,           \
+                                        DECRYPT);                       \
   }
-foreach_ipsecmb_cipher_op;
+foreach_ipsecmb_cbc_cipher_op;
 #undef _
 
+always_inline void
+ipsecmb_retire_gcm_cipher_job (JOB_AES_HMAC * job,
+                              u32 * n_fail, JOB_CIPHER_DIRECTION direction)
+{
+  vnet_crypto_op_t *op = job->user_data;
+
+  if (STS_COMPLETED != job->status)
+    {
+      op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+      *n_fail = *n_fail + 1;
+    }
+  else
+    op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+
+  if (DECRYPT == direction)
+    {
+      if ((memcmp (op->tag, job->auth_tag_output, op->tag_len)))
+       {
+         *n_fail = *n_fail + 1;
+         op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+       }
+    }
+}
+
+static_always_inline u32
+ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm,
+                              ipsecmb_per_thread_data_t * ptd,
+                              vnet_crypto_op_t * ops[],
+                              u32 n_ops, u32 key_len, u32 iv_len,
+                              ase_gcm_pre_t fn,
+                              JOB_CIPHER_DIRECTION direction)
+{
+  JOB_AES_HMAC *job;
+  u32 i, n_fail = 0;
+  u8 scratch[n_ops][64];
+
+  /*
+   * queue all the jobs first ...
+   */
+  for (i = 0; i < n_ops; i++)
+    {
+      struct gcm_key_data key_data;
+      vnet_crypto_op_t *op = ops[i];
+      u32 nonce[3];
+      __m128i iv;
+
+      fn (op->key, &key_data);
+
+      job = IMB_GET_NEXT_JOB (ptd->mgr);
+
+      job->src = op->src;
+      job->dst = op->dst;
+      job->msg_len_to_cipher_in_bytes = op->len;
+      job->cipher_start_src_offset_in_bytes = 0;
+
+      job->hash_alg = AES_GMAC;
+      job->cipher_mode = GCM;
+      job->cipher_direction = direction;
+      job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
+
+      if (direction == ENCRYPT)
+       {
+         if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
+           {
+             iv = ptd->cbc_iv;
+             // only use 8 bytes of the IV
+             clib_memcpy_fast (op->iv, &iv, 8);
+             ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
+           }
+         nonce[0] = op->salt;
+         clib_memcpy_fast (nonce + 1, op->iv, 8);
+         job->iv = (u8 *) nonce;
+       }
+      else
+       {
+         nonce[0] = op->salt;
+         clib_memcpy_fast (nonce + 1, op->iv, 8);
+         job->iv = op->iv;
+       }
+
+      job->aes_key_len_in_bytes = key_len;
+      job->aes_enc_key_expanded = &key_data;
+      job->aes_dec_key_expanded = &key_data;
+      job->iv_len_in_bytes = iv_len;
+
+      job->u.GCM.aad = op->aad;
+      job->u.GCM.aad_len_in_bytes = op->aad_len;
+      job->auth_tag_output_len_in_bytes = op->tag_len;
+      if (DECRYPT == direction)
+       job->auth_tag_output = scratch[i];
+      else
+       job->auth_tag_output = op->tag;
+      job->user_data = op;
+
+      job = IMB_SUBMIT_JOB (ptd->mgr);
+
+      if (job)
+       ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
+    }
+
+  /*
+   * .. then flush (i.e. complete) them
+   *  We will have queued enough to satisfy the 'multi' buffer
+   */
+  while ((job = IMB_FLUSH_JOB (ptd->mgr)))
+    {
+      ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
+    }
+
+  return n_ops - n_fail;
+}
+
+#define _(a, b, c, d)                                                        \
+static_always_inline u32                                                     \
+ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm,                            \
+                                vnet_crypto_op_t * ops[],                    \
+                                u32 n_ops)                                   \
+{                                                                            \
+  ipsecmb_per_thread_data_t *ptd;                                            \
+  ipsecmb_main_t *imbm;                                                      \
+                                                                             \
+  imbm = &ipsecmb_main;                                                      \
+  ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);          \
+                                                                             \
+  return ipsecmb_ops_gcm_cipher_inline (vm, ptd, ops, n_ops, c, d,           \
+                                        ipsecmb_gcm_pre_vft.ase_gcm_pre_##b, \
+                                        ENCRYPT);                            \
+  }
+foreach_ipsecmb_gcm_cipher_op;
+#undef _
+
+#define _(a, b, c, d)                                                        \
+static_always_inline u32                                                     \
+ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm,                            \
+                                vnet_crypto_op_t * ops[],                    \
+                                u32 n_ops)                                   \
+{                                                                            \
+  ipsecmb_per_thread_data_t *ptd;                                            \
+  ipsecmb_main_t *imbm;                                                      \
+                                                                             \
+  imbm = &ipsecmb_main;                                                      \
+  ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);          \
+                                                                             \
+  return ipsecmb_ops_gcm_cipher_inline (vm, ptd, ops, n_ops, c, d,           \
+                                        ipsecmb_gcm_pre_vft.ase_gcm_pre_##b, \
+                                        DECRYPT);                            \
+  }
+foreach_ipsecmb_gcm_cipher_op;
+#undef _
+
+clib_error_t *
+crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
+{
+  ipsecmb_per_thread_data_t *ptd;
+  clib_error_t *err = 0;
+  int fd;
+
+  if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
+    return clib_error_return_unix (0, "failed to open '/dev/urandom'");
+
+  vec_foreach (ptd, imbm->per_thread_data)
+  {
+    if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
+      {
+       err = clib_error_return_unix (0, "'/dev/urandom' read failure");
+       close (fd);
+       return (err);
+      }
+  }
+
+  close (fd);
+  return (NULL);
+}
+
 static clib_error_t *
 crypto_ipsecmb_init (vlib_main_t * vm)
 {
@@ -303,6 +519,7 @@ crypto_ipsecmb_init (vlib_main_t * vm)
   vlib_thread_main_t *tm = vlib_get_thread_main ();
   clib_error_t *error;
   u32 eidx;
+  u8 *name;
 
   if ((error = vlib_call_init_function (vm, vnet_crypto_init)))
     return error;
@@ -310,8 +527,9 @@ crypto_ipsecmb_init (vlib_main_t * vm)
   /*
    * A priority that is better than OpenSSL but worse than VPP natvie
    */
-  eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80,
-                                     "Intel IPSEC multi-buffer");
+  name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
+                IMB_VERSION_STR, 0);
+  eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
 
   vec_validate (imbm->per_thread_data, tm->n_vlib_mains - 1);
 
@@ -321,6 +539,7 @@ crypto_ipsecmb_init (vlib_main_t * vm)
       {
        ptd->mgr = alloc_mb_mgr (0);
        init_mb_mgr_avx512 (ptd->mgr);
+       INIT_IPSEC_MB_GCM_PRE (avx_gen4);
       }
     }
   else if (clib_cpu_supports_avx2 ())
@@ -329,6 +548,7 @@ crypto_ipsecmb_init (vlib_main_t * vm)
       {
        ptd->mgr = alloc_mb_mgr (0);
        init_mb_mgr_avx2 (ptd->mgr);
+       INIT_IPSEC_MB_GCM_PRE (avx_gen2);
       }
     }
   else
@@ -337,29 +557,46 @@ crypto_ipsecmb_init (vlib_main_t * vm)
       {
        ptd->mgr = alloc_mb_mgr (0);
        init_mb_mgr_sse (ptd->mgr);
+       INIT_IPSEC_MB_GCM_PRE (sse);
       }
     }
 
+  if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
+    return (error);
+
+
 #define _(a, b, c)                                                       \
   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
                                     ipsecmb_ops_hmac_##a);               \
 
   foreach_ipsecmb_hmac_op;
 #undef _
-#define _(a, b)                                                         \
+#define _(a, b, c, d)                                                   \
+  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
+                                    ipsecmb_ops_cbc_cipher_enc_##a);    \
+
+  foreach_ipsecmb_cbc_cipher_op;
+#undef _
+#define _(a, b, c, d)                                                   \
+  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
+                                    ipsecmb_ops_cbc_cipher_dec_##a);    \
+
+  foreach_ipsecmb_cbc_cipher_op;
+#undef _
+#define _(a, b, c, d)                                                   \
   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
-                                    ipsecmb_ops_cipher_enc_##a);        \
+                                    ipsecmb_ops_gcm_cipher_enc_##a);    \
 
-  foreach_ipsecmb_cipher_op;
+  foreach_ipsecmb_gcm_cipher_op;
 #undef _
-#define _(a, b)                                                         \
+#define _(a, b, c, d)                                                   \
   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
-                                    ipsecmb_ops_cipher_dec_##a);        \
+                                    ipsecmb_ops_gcm_cipher_dec_##a);    \
 
-  foreach_ipsecmb_cipher_op;
+  foreach_ipsecmb_gcm_cipher_op;
 #undef _
 
-  return 0;
+  return (NULL);
 }
 
 VLIB_INIT_FUNCTION (crypto_ipsecmb_init);