X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fcrypto_ipsecmb%2Fipsecmb.c;h=11e52322775556d23999d275deea3e1e9abbd164;hb=2fc4091319bdbbac25ce1132cfb73b5077426b75;hp=17cc90b544b8e1093b67a128a1f4c017c689f930;hpb=4cb83811e3aa10ae780294a0e40ff901714baff9;p=vpp.git diff --git a/src/plugins/crypto_ipsecmb/ipsecmb.c b/src/plugins/crypto_ipsecmb/ipsecmb.c index 17cc90b544b..11e52322775 100644 --- a/src/plugins/crypto_ipsecmb/ipsecmb.c +++ b/src/plugins/crypto_ipsecmb/ipsecmb.c @@ -70,20 +70,20 @@ static ipsecmb_main_t ipsecmb_main = { }; _(SHA512, SHA_512, sha512, 128, 64, 64) /* - * (Alg, key-len-bits, iv-len-bytes) + * (Alg, key-len-bits) */ #define foreach_ipsecmb_cbc_cipher_op \ - _(AES_128_CBC, 128, 16) \ - _(AES_192_CBC, 192, 16) \ - _(AES_256_CBC, 256, 16) + _(AES_128_CBC, 128) \ + _(AES_192_CBC, 192) \ + _(AES_256_CBC, 256) /* * (Alg, key-len-bytes, iv-len-bytes) */ #define foreach_ipsecmb_gcm_cipher_op \ - _(AES_128_GCM, 128, 12) \ - _(AES_192_GCM, 192, 12) \ - _(AES_256_GCM, 256, 12) + _(AES_128_GCM, 128) \ + _(AES_192_GCM, 192) \ + _(AES_256_GCM, 256) always_inline void ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size) @@ -189,9 +189,8 @@ ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail) } static_always_inline u32 -ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, - vnet_crypto_op_t * ops[], - u32 n_ops, u32 key_len, u32 iv_len, +ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[], + u32 n_ops, u32 key_len, JOB_CIPHER_DIRECTION direction) { ipsecmb_main_t *imbm = &ipsecmb_main; @@ -200,9 +199,6 @@ ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, JOB_AES_HMAC *job; u32 i, n_fail = 0; - /* - * queue all the jobs first ... - */ for (i = 0; i < n_ops; i++) { ipsecmb_aes_cbc_key_data_t *kd; @@ -233,7 +229,7 @@ ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, job->aes_enc_key_expanded = kd->enc_key_exp; job->aes_dec_key_expanded = kd->dec_key_exp; job->iv = op->iv; - job->iv_len_in_bytes = iv_len; + job->iv_len_in_bytes = AES_BLOCK_SIZE; job->user_data = op; @@ -243,166 +239,164 @@ ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, ipsecmb_retire_cipher_job (job, &n_fail); } - /* - * .. then flush (i.e. complete) them - * We will have queued enough to satisfy the 'multi' buffer - */ while ((job = IMB_FLUSH_JOB (ptd->mgr))) - { - ipsecmb_retire_cipher_job (job, &n_fail); - } + ipsecmb_retire_cipher_job (job, &n_fail); return n_ops - n_fail; } -#define _(a, b, c) \ +#define _(a, b) \ static_always_inline u32 \ ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm, \ vnet_crypto_op_t * ops[], \ u32 n_ops) \ -{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, c, ENCRYPT); } \ - -foreach_ipsecmb_cbc_cipher_op; -#undef _ - -#define _(a, b, c) \ +{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, ENCRYPT); } \ + \ static_always_inline u32 \ ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm, \ vnet_crypto_op_t * ops[], \ u32 n_ops) \ -{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, c, DECRYPT); } \ +{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, DECRYPT); } \ foreach_ipsecmb_cbc_cipher_op; #undef _ -always_inline void -ipsecmb_retire_gcm_cipher_job (JOB_AES_HMAC * job, - u32 * n_fail, JOB_CIPHER_DIRECTION direction) -{ - vnet_crypto_op_t *op = job->user_data; - - if (STS_COMPLETED != job->status) - { - op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; - *n_fail = *n_fail + 1; - } - else - op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; - - if (DECRYPT == direction) - { - if ((memcmp (op->tag, job->auth_tag_output, op->tag_len))) - { - *n_fail = *n_fail + 1; - op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; - } - } -} - -static_always_inline u32 -ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm, - vnet_crypto_op_t * ops[], - u32 n_ops, u32 key_len, u32 iv_len, - JOB_CIPHER_DIRECTION direction) -{ - ipsecmb_main_t *imbm = &ipsecmb_main; - ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, - vm->thread_index); - JOB_AES_HMAC *job; - u32 i, n_fail = 0; - u8 scratch[n_ops][64]; - - /* - * queue all the jobs first ... - */ - for (i = 0; i < n_ops; i++) - { - struct gcm_key_data *kd; - vnet_crypto_op_t *op = ops[i]; - kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; - u32 nonce[3]; - __m128i iv; - - job = IMB_GET_NEXT_JOB (ptd->mgr); - - job->src = op->src; - job->dst = op->dst; - job->msg_len_to_cipher_in_bytes = op->len; - job->cipher_start_src_offset_in_bytes = 0; - - job->hash_alg = AES_GMAC; - job->cipher_mode = GCM; - job->cipher_direction = direction; - job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER); - - if (direction == ENCRYPT) - { - if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV) - { - iv = ptd->cbc_iv; - // only use 8 bytes of the IV - clib_memcpy_fast (op->iv, &iv, 8); - ptd->cbc_iv = _mm_aesenc_si128 (iv, iv); - } - nonce[0] = op->salt; - clib_memcpy_fast (nonce + 1, op->iv, 8); - job->iv = (u8 *) nonce; - } - else - { - nonce[0] = op->salt; - clib_memcpy_fast (nonce + 1, op->iv, 8); - job->iv = op->iv; - } - - job->aes_key_len_in_bytes = key_len / 8; - job->aes_enc_key_expanded = kd; - job->aes_dec_key_expanded = kd; - job->iv_len_in_bytes = iv_len; - - job->u.GCM.aad = op->aad; - job->u.GCM.aad_len_in_bytes = op->aad_len; - job->auth_tag_output_len_in_bytes = op->tag_len; - if (DECRYPT == direction) - job->auth_tag_output = scratch[i]; - else - job->auth_tag_output = op->tag; - job->user_data = op; - - job = IMB_SUBMIT_JOB (ptd->mgr); - - if (job) - ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction); - } - - /* - * .. then flush (i.e. complete) them - * We will have queued enough to satisfy the 'multi' buffer - */ - while ((job = IMB_FLUSH_JOB (ptd->mgr))) - { - ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction); - } - - return n_ops - n_fail; -} - -#define _(a, b, c) \ +#define _(a, b) \ static_always_inline u32 \ -ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, \ - vnet_crypto_op_t * ops[], \ - u32 n_ops) \ -{ return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, c, ENCRYPT); } \ - -foreach_ipsecmb_gcm_cipher_op; -#undef _ - -#define _(a, b, c) \ +ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm, \ + vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \ +{ \ + ipsecmb_main_t *imbm = &ipsecmb_main; \ + ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ + vm->thread_index); \ + MB_MGR *m = ptd->mgr; \ + vnet_crypto_op_chunk_t *chp; \ + u32 i, j; \ + \ + for (i = 0; i < n_ops; i++) \ + { \ + struct gcm_key_data *kd; \ + struct gcm_context_data ctx; \ + vnet_crypto_op_t *op = ops[i]; \ + \ + kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ + ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \ + IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \ + chp = chunks + op->chunk_index; \ + for (j = 0; j < op->n_chunks; j++) \ + { \ + IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \ + chp->len); \ + chp += 1; \ + } \ + IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len); \ + \ + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ + } \ + \ + return n_ops; \ +} \ + \ static_always_inline u32 \ -ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, \ - vnet_crypto_op_t * ops[], \ +ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ u32 n_ops) \ -{ return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, c, DECRYPT); } \ +{ \ + ipsecmb_main_t *imbm = &ipsecmb_main; \ + ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ + vm->thread_index); \ + MB_MGR *m = ptd->mgr; \ + u32 i; \ + \ + for (i = 0; i < n_ops; i++) \ + { \ + struct gcm_key_data *kd; \ + struct gcm_context_data ctx; \ + vnet_crypto_op_t *op = ops[i]; \ + \ + kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ + IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \ + op->aad, op->aad_len, op->tag, op->tag_len); \ + \ + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ + } \ + \ + return n_ops; \ +} \ + \ +static_always_inline u32 \ +ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm, \ + vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \ +{ \ + ipsecmb_main_t *imbm = &ipsecmb_main; \ + ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ + vm->thread_index); \ + MB_MGR *m = ptd->mgr; \ + vnet_crypto_op_chunk_t *chp; \ + u32 i, j, n_failed = 0; \ + \ + for (i = 0; i < n_ops; i++) \ + { \ + struct gcm_key_data *kd; \ + struct gcm_context_data ctx; \ + vnet_crypto_op_t *op = ops[i]; \ + u8 scratch[64]; \ + \ + kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ + ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \ + IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \ + chp = chunks + op->chunk_index; \ + for (j = 0; j < op->n_chunks; j++) \ + { \ + IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \ + chp->len); \ + chp += 1; \ + } \ + IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len); \ + \ + if ((memcmp (op->tag, scratch, op->tag_len))) \ + { \ + op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \ + n_failed++; \ + } \ + else \ + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ + } \ + \ + return n_ops - n_failed; \ +} \ + \ +static_always_inline u32 \ +ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ + u32 n_ops) \ +{ \ + ipsecmb_main_t *imbm = &ipsecmb_main; \ + ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ + vm->thread_index); \ + MB_MGR *m = ptd->mgr; \ + u32 i, n_failed = 0; \ + \ + for (i = 0; i < n_ops; i++) \ + { \ + struct gcm_key_data *kd; \ + struct gcm_context_data ctx; \ + vnet_crypto_op_t *op = ops[i]; \ + u8 scratch[64]; \ + \ + kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ + IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \ + op->aad, op->aad_len, scratch, op->tag_len); \ + \ + if ((memcmp (op->tag, scratch, op->tag_len))) \ + { \ + op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \ + n_failed++; \ + } \ + else \ + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ + } \ + \ + return n_ops - n_failed; \ +} foreach_ipsecmb_gcm_cipher_op; #undef _ @@ -449,9 +443,7 @@ crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop, if (imbm->key_data[idx] == 0) return; - clib_memset_u8 (imbm->key_data[idx], 0, - clib_mem_size (imbm->key_data[idx])); - clib_mem_free (imbm->key_data[idx]); + clib_mem_free_s (imbm->key_data[idx]); imbm->key_data[idx] = 0; return; } @@ -463,9 +455,7 @@ crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop, if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx]) { - clib_memset_u8 (imbm->key_data[idx], 0, - clib_mem_size (imbm->key_data[idx])); - clib_mem_free (imbm->key_data[idx]); + clib_mem_free_s (imbm->key_data[idx]); } kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size, @@ -522,8 +512,8 @@ crypto_ipsecmb_init (vlib_main_t * vm) u32 eidx; u8 *name; - if ((error = vlib_call_init_function (vm, vnet_crypto_init))) - return error; + if (!clib_cpu_supports_aes ()) + return 0; /* * A priority that is better than OpenSSL but worse than VPP natvie @@ -564,7 +554,7 @@ crypto_ipsecmb_init (vlib_main_t * vm) foreach_ipsecmb_hmac_op; #undef _ -#define _(a, b, c) \ +#define _(a, b) \ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ ipsecmb_ops_cbc_cipher_enc_##a); \ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ @@ -575,11 +565,17 @@ crypto_ipsecmb_init (vlib_main_t * vm) foreach_ipsecmb_cbc_cipher_op; #undef _ -#define _(a, b, c) \ +#define _(a, b) \ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ ipsecmb_ops_gcm_cipher_enc_##a); \ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ ipsecmb_ops_gcm_cipher_dec_##a); \ + vnet_crypto_register_chained_ops_handler \ + (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ + ipsecmb_ops_gcm_cipher_enc_##a##_chained); \ + vnet_crypto_register_chained_ops_handler \ + (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ + ipsecmb_ops_gcm_cipher_dec_##a##_chained); \ ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \ ad->data_size = sizeof (struct gcm_key_data); \ ad->aes_gcm_pre = m->gcm##b##_pre; \ @@ -591,13 +587,18 @@ crypto_ipsecmb_init (vlib_main_t * vm) return (NULL); } -VLIB_INIT_FUNCTION (crypto_ipsecmb_init); +/* *INDENT-OFF* */ +VLIB_INIT_FUNCTION (crypto_ipsecmb_init) = +{ + .runs_after = VLIB_INITS ("vnet_crypto_init"), +}; +/* *INDENT-ON* */ /* *INDENT-OFF* */ VLIB_PLUGIN_REGISTER () = { .version = VPP_BUILD_VER, - .description = "Intel IPSEC multi-buffer", + .description = "Intel IPSEC Multi-buffer Crypto Engine", }; /* *INDENT-ON* */