_(SHA512, SHA_512, sha512, 128, 64, 64)
/*
- * (Alg, key-len-bits, iv-len-bytes)
+ * (Alg, key-len-bits)
*/
#define foreach_ipsecmb_cbc_cipher_op \
- _(AES_128_CBC, 128, 16) \
- _(AES_192_CBC, 192, 16) \
- _(AES_256_CBC, 256, 16)
+ _(AES_128_CBC, 128) \
+ _(AES_192_CBC, 192) \
+ _(AES_256_CBC, 256)
/*
* (Alg, key-len-bytes, iv-len-bytes)
*/
#define foreach_ipsecmb_gcm_cipher_op \
- _(AES_128_GCM, 128, 12) \
- _(AES_192_GCM, 192, 12) \
- _(AES_256_GCM, 256, 12)
+ _(AES_128_GCM, 128) \
+ _(AES_192_GCM, 192) \
+ _(AES_256_GCM, 256)
always_inline void
ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
}
static_always_inline u32
-ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm,
- vnet_crypto_op_t * ops[],
- u32 n_ops, u32 key_len, u32 iv_len,
+ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+ u32 n_ops, u32 key_len,
JOB_CIPHER_DIRECTION direction)
{
ipsecmb_main_t *imbm = &ipsecmb_main;
JOB_AES_HMAC *job;
u32 i, n_fail = 0;
- /*
- * queue all the jobs first ...
- */
for (i = 0; i < n_ops; i++)
{
ipsecmb_aes_cbc_key_data_t *kd;
job->aes_enc_key_expanded = kd->enc_key_exp;
job->aes_dec_key_expanded = kd->dec_key_exp;
job->iv = op->iv;
- job->iv_len_in_bytes = iv_len;
+ job->iv_len_in_bytes = AES_BLOCK_SIZE;
job->user_data = op;
ipsecmb_retire_cipher_job (job, &n_fail);
}
- /*
- * .. then flush (i.e. complete) them
- * We will have queued enough to satisfy the 'multi' buffer
- */
while ((job = IMB_FLUSH_JOB (ptd->mgr)))
- {
- ipsecmb_retire_cipher_job (job, &n_fail);
- }
+ ipsecmb_retire_cipher_job (job, &n_fail);
return n_ops - n_fail;
}
-#define _(a, b, c) \
+#define _(a, b) \
static_always_inline u32 \
ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm, \
vnet_crypto_op_t * ops[], \
u32 n_ops) \
-{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, c, ENCRYPT); } \
-
-foreach_ipsecmb_cbc_cipher_op;
-#undef _
-
-#define _(a, b, c) \
+{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, ENCRYPT); } \
+ \
static_always_inline u32 \
ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm, \
vnet_crypto_op_t * ops[], \
u32 n_ops) \
-{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, c, DECRYPT); } \
+{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, DECRYPT); } \
foreach_ipsecmb_cbc_cipher_op;
#undef _
{
op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
*n_fail = *n_fail + 1;
+ return;
}
else
op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
}
static_always_inline u32
-ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm,
- vnet_crypto_op_t * ops[],
- u32 n_ops, u32 key_len, u32 iv_len,
+ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+ u32 n_ops, u32 key_len,
JOB_CIPHER_DIRECTION direction)
{
ipsecmb_main_t *imbm = &ipsecmb_main;
struct gcm_key_data *kd;
vnet_crypto_op_t *op = ops[i];
kd = (struct gcm_key_data *) imbm->key_data[op->key_index];
- u32 nonce[3];
- __m128i iv;
job = IMB_GET_NEXT_JOB (ptd->mgr);
job->cipher_direction = direction;
job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
- if (direction == ENCRYPT)
- {
- if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
- {
- iv = ptd->cbc_iv;
- // only use 8 bytes of the IV
- clib_memcpy_fast (op->iv, &iv, 8);
- ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
- }
- nonce[0] = op->salt;
- clib_memcpy_fast (nonce + 1, op->iv, 8);
- job->iv = (u8 *) nonce;
- }
- else
- {
- nonce[0] = op->salt;
- clib_memcpy_fast (nonce + 1, op->iv, 8);
- job->iv = op->iv;
- }
-
+ job->iv = op->iv;
job->aes_key_len_in_bytes = key_len / 8;
job->aes_enc_key_expanded = kd;
job->aes_dec_key_expanded = kd;
- job->iv_len_in_bytes = iv_len;
+ job->iv_len_in_bytes = 12;
job->u.GCM.aad = op->aad;
job->u.GCM.aad_len_in_bytes = op->aad_len;
ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
}
- /*
- * .. then flush (i.e. complete) them
- * We will have queued enough to satisfy the 'multi' buffer
- */
while ((job = IMB_FLUSH_JOB (ptd->mgr)))
- {
- ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
- }
+ ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
return n_ops - n_fail;
}
-#define _(a, b, c) \
+#define _(a, b) \
static_always_inline u32 \
-ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, \
- vnet_crypto_op_t * ops[], \
+ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
u32 n_ops) \
-{ return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, c, ENCRYPT); } \
-
-foreach_ipsecmb_gcm_cipher_op;
-#undef _
-
-#define _(a, b, c) \
+{ return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, ENCRYPT); } \
+ \
static_always_inline u32 \
-ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, \
- vnet_crypto_op_t * ops[], \
+ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
u32 n_ops) \
-{ return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, c, DECRYPT); } \
+{ return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, DECRYPT); } \
foreach_ipsecmb_gcm_cipher_op;
#undef _
u32 eidx;
u8 *name;
- if ((error = vlib_call_init_function (vm, vnet_crypto_init)))
- return error;
+ if (!clib_cpu_supports_aes ())
+ return 0;
/*
* A priority that is better than OpenSSL but worse than VPP natvie
foreach_ipsecmb_hmac_op;
#undef _
-#define _(a, b, c) \
+#define _(a, b) \
vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
ipsecmb_ops_cbc_cipher_enc_##a); \
vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
foreach_ipsecmb_cbc_cipher_op;
#undef _
-#define _(a, b, c) \
+#define _(a, b) \
vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
ipsecmb_ops_gcm_cipher_enc_##a); \
vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
return (NULL);
}
-VLIB_INIT_FUNCTION (crypto_ipsecmb_init);
+/* *INDENT-OFF* */
+VLIB_INIT_FUNCTION (crypto_ipsecmb_init) =
+{
+ .runs_after = VLIB_INITS ("vnet_crypto_init"),
+};
+/* *INDENT-ON* */
/* *INDENT-OFF* */
VLIB_PLUGIN_REGISTER () =
{
.version = VPP_BUILD_VER,
- .description = "Intel IPSEC multi-buffer",
+ .description = "Intel IPSEC Multi-buffer Crypto Engine",
};
/* *INDENT-ON* */