X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fcrypto_ipsecmb%2Fipsecmb.c;h=11e52322775556d23999d275deea3e1e9abbd164;hb=2fc4091319bdbbac25ce1132cfb73b5077426b75;hp=d579c0d74a65f6e9547ba812cccd6ab022cd07b6;hpb=d0d6b942e954b5e26d04ba830ec1fe5fa1f4d4b6;p=vpp.git diff --git a/src/plugins/crypto_ipsecmb/ipsecmb.c b/src/plugins/crypto_ipsecmb/ipsecmb.c index d579c0d74a6..11e52322775 100644 --- a/src/plugins/crypto_ipsecmb/ipsecmb.c +++ b/src/plugins/crypto_ipsecmb/ipsecmb.c @@ -25,97 +25,107 @@ #include #include +#define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE +#define EXPANDED_KEY_N_BYTES (16 * 15) + typedef struct { MB_MGR *mgr; __m128i cbc_iv; } ipsecmb_per_thread_data_t; +typedef struct +{ + u16 data_size; + u8 block_size; + aes_gcm_pre_t aes_gcm_pre; + keyexp_t keyexp; + hash_one_block_t hash_one_block; + hash_fn_t hash_fn; +} ipsecmb_alg_data_t; + typedef struct ipsecmb_main_t_ { ipsecmb_per_thread_data_t *per_thread_data; + ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS]; + void **key_data; } ipsecmb_main_t; -static ipsecmb_main_t ipsecmb_main; - -#define foreach_ipsecmb_hmac_op \ - _(SHA1, SHA1, sha1) \ - _(SHA256, SHA_256, sha256) \ - _(SHA384, SHA_384, sha384) \ - _(SHA512, SHA_512, sha512) - -#define foreach_ipsecmb_cipher_op \ - _(AES_128_CBC, 128, 16, 16) \ - _(AES_192_CBC, 192, 24, 16) \ - _(AES_256_CBC, 256, 32, 16) - -always_inline void -hash_expand_keys (const MB_MGR * mgr, - const u8 * key, - u32 length, - u8 block_size, - u8 ipad[256], u8 opad[256], hash_one_block_t fn) +typedef struct { - u8 buf[block_size]; - int i = 0; + u8 enc_key_exp[EXPANDED_KEY_N_BYTES]; + u8 dec_key_exp[EXPANDED_KEY_N_BYTES]; +} ipsecmb_aes_cbc_key_data_t; - if (length > block_size) - { - return; - } +static ipsecmb_main_t ipsecmb_main = { }; - memset (buf, 0x36, sizeof (buf)); - for (i = 0; i < length; i++) - { - buf[i] ^= key[i]; - } - fn (buf, ipad); +/* + * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes) + */ +#define foreach_ipsecmb_hmac_op \ + _(SHA1, SHA1, sha1, 64, 20, 20) \ + _(SHA224, SHA_224, sha224, 64, 32, 28) \ + _(SHA256, SHA_256, sha256, 64, 32, 32) \ + _(SHA384, SHA_384, sha384, 128, 64, 48) \ + _(SHA512, SHA_512, sha512, 128, 64, 64) - memset (buf, 0x5c, sizeof (buf)); +/* + * (Alg, key-len-bits) + */ +#define foreach_ipsecmb_cbc_cipher_op \ + _(AES_128_CBC, 128) \ + _(AES_192_CBC, 192) \ + _(AES_256_CBC, 256) - for (i = 0; i < length; i++) - { - buf[i] ^= key[i]; - } - fn (buf, opad); -} +/* + * (Alg, key-len-bytes, iv-len-bytes) + */ +#define foreach_ipsecmb_gcm_cipher_op \ + _(AES_128_GCM, 128) \ + _(AES_192_GCM, 192) \ + _(AES_256_GCM, 256) always_inline void -ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail) +ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size) { vnet_crypto_op_t *op = job->user_data; + u32 len = op->digest_len ? op->digest_len : digest_size; if (STS_COMPLETED != job->status) { op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; *n_fail = *n_fail + 1; + return; } - else - op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK) { - if ((memcmp (op->digest, job->auth_tag_output, op->digest_len))) + if ((memcmp (op->digest, job->auth_tag_output, len))) { *n_fail = *n_fail + 1; op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; + return; } } + else if (len == digest_size) + clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size); else - clib_memcpy_fast (op->digest, job->auth_tag_output, op->digest_len); + clib_memcpy_fast (op->digest, job->auth_tag_output, len); + + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; } static_always_inline u32 -ipsecmb_ops_hmac_inline (vlib_main_t * vm, - const ipsecmb_per_thread_data_t * ptd, - vnet_crypto_op_t * ops[], - u32 n_ops, - u32 block_size, - hash_one_block_t fn, JOB_HASH_ALG alg) +ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[], + u32 n_ops, u32 block_size, u32 hash_size, + u32 digest_size, JOB_HASH_ALG alg) { + ipsecmb_main_t *imbm = &ipsecmb_main; + ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, + vm->thread_index); JOB_AES_HMAC *job; u32 i, n_fail = 0; - u8 scratch[n_ops][64]; + u8 scratch[n_ops][digest_size]; /* * queue all the jobs first ... @@ -123,10 +133,7 @@ ipsecmb_ops_hmac_inline (vlib_main_t * vm, for (i = 0; i < n_ops; i++) { vnet_crypto_op_t *op = ops[i]; - u8 ipad[256], opad[256]; - - hash_expand_keys (ptd->mgr, op->key, op->key_len, - block_size, ipad, opad, fn); + u8 *kd = (u8 *) imbm->key_data[op->key_index]; job = IMB_GET_NEXT_JOB (ptd->mgr); @@ -134,59 +141,39 @@ ipsecmb_ops_hmac_inline (vlib_main_t * vm, job->hash_start_src_offset_in_bytes = 0; job->msg_len_to_hash_in_bytes = op->len; job->hash_alg = alg; - job->auth_tag_output_len_in_bytes = op->digest_len; + job->auth_tag_output_len_in_bytes = digest_size; job->auth_tag_output = scratch[i]; job->cipher_mode = NULL_CIPHER; job->cipher_direction = DECRYPT; job->chain_order = HASH_CIPHER; - job->aes_key_len_in_bytes = op->key_len; - - job->u.HMAC._hashed_auth_key_xor_ipad = ipad; - job->u.HMAC._hashed_auth_key_xor_opad = opad; + job->u.HMAC._hashed_auth_key_xor_ipad = kd; + job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size; job->user_data = op; job = IMB_SUBMIT_JOB (ptd->mgr); if (job) - ipsecmb_retire_hmac_job (job, &n_fail); + ipsecmb_retire_hmac_job (job, &n_fail, digest_size); } - /* - * .. then flush (i.e. complete) them - * We will have queued enough to satisfy the 'multi' buffer - */ while ((job = IMB_FLUSH_JOB (ptd->mgr))) - { - ipsecmb_retire_hmac_job (job, &n_fail); - } + ipsecmb_retire_hmac_job (job, &n_fail, digest_size); return n_ops - n_fail; } -#define _(a, b, c) \ +#define _(a, b, c, d, e, f) \ static_always_inline u32 \ ipsecmb_ops_hmac_##a (vlib_main_t * vm, \ vnet_crypto_op_t * ops[], \ u32 n_ops) \ -{ \ - ipsecmb_per_thread_data_t *ptd; \ - ipsecmb_main_t *imbm; \ - \ - imbm = &ipsecmb_main; \ - ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \ - \ - return ipsecmb_ops_hmac_inline (vm, ptd, ops, n_ops, \ - b##_BLOCK_SIZE, \ - ptd->mgr->c##_one_block, \ - b); \ - } +{ return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); } \ + foreach_ipsecmb_hmac_op; #undef _ -#define EXPANDED_KEY_N_BYTES (16 * 15) - always_inline void ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail) { @@ -202,27 +189,23 @@ ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail) } static_always_inline u32 -ipsecmb_ops_cipher_inline (vlib_main_t * vm, - ipsecmb_per_thread_data_t * ptd, - vnet_crypto_op_t * ops[], - u32 n_ops, u32 key_len, u32 iv_len, - keyexp_t fn, JOB_CIPHER_DIRECTION direction) +ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[], + u32 n_ops, u32 key_len, + JOB_CIPHER_DIRECTION direction) { + ipsecmb_main_t *imbm = &ipsecmb_main; + ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, + vm->thread_index); JOB_AES_HMAC *job; u32 i, n_fail = 0; - /* - * queue all the jobs first ... - */ for (i = 0; i < n_ops; i++) { - u8 aes_enc_key_expanded[EXPANDED_KEY_N_BYTES]; - u8 aes_dec_key_expanded[EXPANDED_KEY_N_BYTES]; + ipsecmb_aes_cbc_key_data_t *kd; vnet_crypto_op_t *op = ops[i]; + kd = (ipsecmb_aes_cbc_key_data_t *) imbm->key_data[op->key_index]; __m128i iv; - fn (op->key, aes_enc_key_expanded, aes_dec_key_expanded); - job = IMB_GET_NEXT_JOB (ptd->mgr); job->src = op->src; @@ -242,11 +225,11 @@ ipsecmb_ops_cipher_inline (vlib_main_t * vm, ptd->cbc_iv = _mm_aesenc_si128 (iv, iv); } - job->aes_key_len_in_bytes = key_len; - job->aes_enc_key_expanded = aes_enc_key_expanded; - job->aes_dec_key_expanded = aes_dec_key_expanded; + job->aes_key_len_in_bytes = key_len / 8; + job->aes_enc_key_expanded = kd->enc_key_exp; + job->aes_dec_key_expanded = kd->dec_key_exp; job->iv = op->iv; - job->iv_len_in_bytes = iv_len; + job->iv_len_in_bytes = AES_BLOCK_SIZE; job->user_data = op; @@ -256,54 +239,166 @@ ipsecmb_ops_cipher_inline (vlib_main_t * vm, ipsecmb_retire_cipher_job (job, &n_fail); } - /* - * .. then flush (i.e. complete) them - * We will have queued enough to satisfy the 'multi' buffer - */ while ((job = IMB_FLUSH_JOB (ptd->mgr))) - { - ipsecmb_retire_cipher_job (job, &n_fail); - } + ipsecmb_retire_cipher_job (job, &n_fail); return n_ops - n_fail; } -#define _(a, b, c, d) \ -static_always_inline u32 \ -ipsecmb_ops_cipher_enc_##a (vlib_main_t * vm, \ - vnet_crypto_op_t * ops[], \ - u32 n_ops) \ -{ \ - ipsecmb_per_thread_data_t *ptd; \ - ipsecmb_main_t *imbm; \ - \ - imbm = &ipsecmb_main; \ - ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \ - \ - return ipsecmb_ops_cipher_inline (vm, ptd, ops, n_ops, c, d, \ - ptd->mgr->keyexp_##b, \ - ENCRYPT); \ - } -foreach_ipsecmb_cipher_op; +#define _(a, b) \ +static_always_inline u32 \ +ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm, \ + vnet_crypto_op_t * ops[], \ + u32 n_ops) \ +{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, ENCRYPT); } \ + \ +static_always_inline u32 \ +ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm, \ + vnet_crypto_op_t * ops[], \ + u32 n_ops) \ +{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, DECRYPT); } \ + +foreach_ipsecmb_cbc_cipher_op; #undef _ -#define _(a, b, c, d) \ -static_always_inline u32 \ -ipsecmb_ops_cipher_dec_##a (vlib_main_t * vm, \ - vnet_crypto_op_t * ops[], \ - u32 n_ops) \ -{ \ - ipsecmb_per_thread_data_t *ptd; \ - ipsecmb_main_t *imbm; \ - \ - imbm = &ipsecmb_main; \ - ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \ - \ - return ipsecmb_ops_cipher_inline (vm, ptd, ops, n_ops, c, d, \ - ptd->mgr->keyexp_##b, \ - DECRYPT); \ - } -foreach_ipsecmb_cipher_op; +#define _(a, b) \ +static_always_inline u32 \ +ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm, \ + vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \ +{ \ + ipsecmb_main_t *imbm = &ipsecmb_main; \ + ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ + vm->thread_index); \ + MB_MGR *m = ptd->mgr; \ + vnet_crypto_op_chunk_t *chp; \ + u32 i, j; \ + \ + for (i = 0; i < n_ops; i++) \ + { \ + struct gcm_key_data *kd; \ + struct gcm_context_data ctx; \ + vnet_crypto_op_t *op = ops[i]; \ + \ + kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ + ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \ + IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \ + chp = chunks + op->chunk_index; \ + for (j = 0; j < op->n_chunks; j++) \ + { \ + IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \ + chp->len); \ + chp += 1; \ + } \ + IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len); \ + \ + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ + } \ + \ + return n_ops; \ +} \ + \ +static_always_inline u32 \ +ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ + u32 n_ops) \ +{ \ + ipsecmb_main_t *imbm = &ipsecmb_main; \ + ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ + vm->thread_index); \ + MB_MGR *m = ptd->mgr; \ + u32 i; \ + \ + for (i = 0; i < n_ops; i++) \ + { \ + struct gcm_key_data *kd; \ + struct gcm_context_data ctx; \ + vnet_crypto_op_t *op = ops[i]; \ + \ + kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ + IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \ + op->aad, op->aad_len, op->tag, op->tag_len); \ + \ + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ + } \ + \ + return n_ops; \ +} \ + \ +static_always_inline u32 \ +ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm, \ + vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \ +{ \ + ipsecmb_main_t *imbm = &ipsecmb_main; \ + ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ + vm->thread_index); \ + MB_MGR *m = ptd->mgr; \ + vnet_crypto_op_chunk_t *chp; \ + u32 i, j, n_failed = 0; \ + \ + for (i = 0; i < n_ops; i++) \ + { \ + struct gcm_key_data *kd; \ + struct gcm_context_data ctx; \ + vnet_crypto_op_t *op = ops[i]; \ + u8 scratch[64]; \ + \ + kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ + ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \ + IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \ + chp = chunks + op->chunk_index; \ + for (j = 0; j < op->n_chunks; j++) \ + { \ + IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \ + chp->len); \ + chp += 1; \ + } \ + IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len); \ + \ + if ((memcmp (op->tag, scratch, op->tag_len))) \ + { \ + op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \ + n_failed++; \ + } \ + else \ + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ + } \ + \ + return n_ops - n_failed; \ +} \ + \ +static_always_inline u32 \ +ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ + u32 n_ops) \ +{ \ + ipsecmb_main_t *imbm = &ipsecmb_main; \ + ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ + vm->thread_index); \ + MB_MGR *m = ptd->mgr; \ + u32 i, n_failed = 0; \ + \ + for (i = 0; i < n_ops; i++) \ + { \ + struct gcm_key_data *kd; \ + struct gcm_context_data ctx; \ + vnet_crypto_op_t *op = ops[i]; \ + u8 scratch[64]; \ + \ + kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ + IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \ + op->aad, op->aad_len, scratch, op->tag_len); \ + \ + if ((memcmp (op->tag, scratch, op->tag_len))) \ + { \ + op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \ + n_failed++; \ + } \ + else \ + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ + } \ + \ + return n_ops - n_failed; \ +} + +foreach_ipsecmb_gcm_cipher_op; #undef _ clib_error_t * @@ -330,84 +425,180 @@ crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm) return (NULL); } +static void +crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop, + vnet_crypto_key_index_t idx) +{ + ipsecmb_main_t *imbm = &ipsecmb_main; + vnet_crypto_key_t *key = vnet_crypto_get_key (idx); + ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg; + u32 i; + void *kd; + + if (kop == VNET_CRYPTO_KEY_OP_DEL) + { + if (idx >= vec_len (imbm->key_data)) + return; + + if (imbm->key_data[idx] == 0) + return; + + clib_mem_free_s (imbm->key_data[idx]); + imbm->key_data[idx] = 0; + return; + } + + if (ad->data_size == 0) + return; + + vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES); + + if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx]) + { + clib_mem_free_s (imbm->key_data[idx]); + } + + kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size, + CLIB_CACHE_LINE_BYTES); + + /* AES CBC key expansion */ + if (ad->keyexp) + { + ad->keyexp (key->data, ((ipsecmb_aes_cbc_key_data_t *) kd)->enc_key_exp, + ((ipsecmb_aes_cbc_key_data_t *) kd)->dec_key_exp); + return; + } + + /* AES GCM */ + if (ad->aes_gcm_pre) + { + ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd); + return; + } + + /* HMAC */ + if (ad->hash_one_block) + { + const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64); + u64 pad[block_qw], key_hash[block_qw]; + + clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE); + if (vec_len (key->data) <= ad->block_size) + clib_memcpy_fast (key_hash, key->data, vec_len (key->data)); + else + ad->hash_fn (key->data, vec_len (key->data), key_hash); + + for (i = 0; i < block_qw; i++) + pad[i] = key_hash[i] ^ 0x3636363636363636; + ad->hash_one_block (pad, kd); + + for (i = 0; i < block_qw; i++) + pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c; + ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2)); + + return; + } +} + static clib_error_t * crypto_ipsecmb_init (vlib_main_t * vm) { ipsecmb_main_t *imbm = &ipsecmb_main; + ipsecmb_alg_data_t *ad; ipsecmb_per_thread_data_t *ptd; vlib_thread_main_t *tm = vlib_get_thread_main (); clib_error_t *error; + MB_MGR *m = 0; u32 eidx; + u8 *name; - if ((error = vlib_call_init_function (vm, vnet_crypto_init))) - return error; + if (!clib_cpu_supports_aes ()) + return 0; /* * A priority that is better than OpenSSL but worse than VPP natvie */ - eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, - "Intel IPSEC multi-buffer"); + name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c", + IMB_VERSION_STR, 0); + eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name); vec_validate (imbm->per_thread_data, tm->n_vlib_mains - 1); - if (clib_cpu_supports_avx512f ()) - { - vec_foreach (ptd, imbm->per_thread_data) - { - ptd->mgr = alloc_mb_mgr (0); - init_mb_mgr_avx512 (ptd->mgr); - } - } - else if (clib_cpu_supports_avx2 ()) - { - vec_foreach (ptd, imbm->per_thread_data) - { - ptd->mgr = alloc_mb_mgr (0); - init_mb_mgr_avx2 (ptd->mgr); - } - } - else + /* *INDENT-OFF* */ + vec_foreach (ptd, imbm->per_thread_data) { - vec_foreach (ptd, imbm->per_thread_data) - { ptd->mgr = alloc_mb_mgr (0); - init_mb_mgr_sse (ptd->mgr); - } + if (clib_cpu_supports_avx512f ()) + init_mb_mgr_avx512 (ptd->mgr); + else if (clib_cpu_supports_avx2 ()) + init_mb_mgr_avx2 (ptd->mgr); + else + init_mb_mgr_sse (ptd->mgr); + + if (ptd == imbm->per_thread_data) + m = ptd->mgr; } + /* *INDENT-ON* */ if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm))) return (error); - -#define _(a, b, c) \ +#define _(a, b, c, d, e, f) \ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \ ipsecmb_ops_hmac_##a); \ + ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a; \ + ad->block_size = d; \ + ad->data_size = e * 2; \ + ad->hash_one_block = m-> c##_one_block; \ + ad->hash_fn = m-> c; \ foreach_ipsecmb_hmac_op; #undef _ -#define _(a, b, c, d) \ +#define _(a, b) \ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ - ipsecmb_ops_cipher_enc_##a); \ + ipsecmb_ops_cbc_cipher_enc_##a); \ + vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ + ipsecmb_ops_cbc_cipher_dec_##a); \ + ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \ + ad->data_size = sizeof (ipsecmb_aes_cbc_key_data_t); \ + ad->keyexp = m->keyexp_##b; \ - foreach_ipsecmb_cipher_op; + foreach_ipsecmb_cbc_cipher_op; #undef _ -#define _(a, b, c, d) \ +#define _(a, b) \ + vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ + ipsecmb_ops_gcm_cipher_enc_##a); \ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ - ipsecmb_ops_cipher_dec_##a); \ - - foreach_ipsecmb_cipher_op; + ipsecmb_ops_gcm_cipher_dec_##a); \ + vnet_crypto_register_chained_ops_handler \ + (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ + ipsecmb_ops_gcm_cipher_enc_##a##_chained); \ + vnet_crypto_register_chained_ops_handler \ + (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ + ipsecmb_ops_gcm_cipher_dec_##a##_chained); \ + ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \ + ad->data_size = sizeof (struct gcm_key_data); \ + ad->aes_gcm_pre = m->gcm##b##_pre; \ + + foreach_ipsecmb_gcm_cipher_op; #undef _ + vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler); return (NULL); } -VLIB_INIT_FUNCTION (crypto_ipsecmb_init); +/* *INDENT-OFF* */ +VLIB_INIT_FUNCTION (crypto_ipsecmb_init) = +{ + .runs_after = VLIB_INITS ("vnet_crypto_init"), +}; +/* *INDENT-ON* */ /* *INDENT-OFF* */ VLIB_PLUGIN_REGISTER () = { .version = VPP_BUILD_VER, - .description = "Intel IPSEC multi-buffer", + .description = "Intel IPSEC Multi-buffer Crypto Engine", }; /* *INDENT-ON* */