_ (AES_256_GCM_TAG16_AAD8, 256, 1, 8) \
_ (AES_256_GCM_TAG16_AAD12, 256, 1, 12)
+#define foreach_ipsecmb_linked_cipher_auth_op \
+ _ (AES_128_CBC_SHA1_TAG12, 128, SHA_1, CBC, 12) \
+ _ (AES_192_CBC_SHA1_TAG12, 192, SHA_1, CBC, 12) \
+ _ (AES_256_CBC_SHA1_TAG12, 256, SHA_1, CBC, 12) \
+ _ (AES_128_CTR_SHA1_TAG12, 128, SHA_1, CNTR, 12) \
+ _ (AES_192_CTR_SHA1_TAG12, 192, SHA_1, CNTR, 12) \
+ _ (AES_256_CTR_SHA1_TAG12, 256, SHA_1, CNTR, 12) \
+ _ (AES_128_CTR_SHA256_TAG16, 128, SHA_256, CNTR, 16) \
+ _ (AES_192_CTR_SHA256_TAG16, 192, SHA_256, CNTR, 16) \
+ _ (AES_256_CTR_SHA256_TAG16, 256, SHA_256, CNTR, 16)
+
#define foreach_chacha_poly_fixed_aad_lengths _ (0) _ (8) _ (12)
static_always_inline vnet_crypto_op_status_t
foreach_ipsecmb_gcm_cipher_op;
#undef _
+static_always_inline u32
+ipsecmb_ops_cipher_auth_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops, u32 key_len, u32 tag_len,
+ IMB_CIPHER_MODE cipher_mode,
+ IMB_HASH_ALG hash_alg,
+ IMB_CIPHER_DIRECTION direction)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + vm->thread_index;
+ IMB_JOB *job;
+ u32 i, n_fail = 0;
+
+ for (i = 0; i < n_ops; i++)
+ {
+ ipsecmb_aes_key_data_t *kd;
+ vnet_crypto_op_t *op = ops[i];
+ kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
+
+ job = IMB_GET_NEXT_JOB (ptd->mgr);
+
+ job->src = op->src;
+ job->dst = op->dst;
+ job->msg_len_to_cipher_in_bytes = op->len;
+ job->cipher_start_src_offset_in_bytes = 0;
+
+ job->cipher_mode = cipher_mode;
+ job->cipher_direction = direction;
+ job->hash_alg = hash_alg;
+
+ job->key_len_in_bytes = key_len / 8;
+ job->enc_keys = kd->enc_key_exp;
+ job->dec_keys = kd->dec_key_exp;
+ job->iv = op->iv;
+ job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE;
+
+ job->auth_tag_output = op->digest;
+ job->auth_tag_output_len_in_bytes = tag_len;
+
+ job->chain_order = (direction == IMB_DIR_ENCRYPT) ?
+ IMB_ORDER_CIPHER_HASH :
+ IMB_ORDER_HASH_CIPHER;
+
+ job->user_data = op;
+
+ job = IMB_SUBMIT_JOB (ptd->mgr);
+ if (job)
+ ipsecmb_retire_cipher_job (job, &n_fail);
+ }
+
+ while ((job = IMB_FLUSH_JOB (ptd->mgr)))
+ ipsecmb_retire_cipher_job (job, &n_fail);
+
+ return n_ops - n_fail;
+}
+
+#define _(n, k, h, c, t) \
+ static_always_inline u32 ipsecmb_ops_enc_##n ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return ipsecmb_ops_cipher_auth_inline (vm, ops, n_ops, k, t, \
+ IMB_CIPHER_##c, IMB_AUTH_HMAC_##h, \
+ IMB_DIR_ENCRYPT); \
+ } \
+ \
+ static_always_inline u32 ipsecmb_ops_dec_##n ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return ipsecmb_ops_cipher_auth_inline (vm, ops, n_ops, k, t, \
+ IMB_CIPHER_##c, IMB_AUTH_HMAC_##h, \
+ IMB_DIR_DECRYPT); \
+ }
+foreach_ipsecmb_linked_cipher_auth_op
+#undef _
+
#ifdef HAVE_IPSECMB_CHACHA_POLY
-always_inline void
-ipsecmb_retire_aead_job (IMB_JOB *job, u32 *n_fail)
+ always_inline void
+ ipsecmb_retire_aead_job (IMB_JOB *job, u32 *n_fail)
{
vnet_crypto_op_t *op = job->user_data;
u32 len = op->tag_len;
foreach_chacha_poly_fixed_aad_lengths
#undef _
#endif
+#define _(n, k, h, c, t) \
+ { .opt = VNET_CRYPTO_OP_##n##_ENC, .fn = ipsecmb_ops_enc_##n }, \
+ { .opt = VNET_CRYPTO_OP_##n##_DEC, .fn = ipsecmb_ops_dec_##n },
+
+ foreach_ipsecmb_linked_cipher_auth_op
+#undef _
+
{}
};
#include <vlib/vlib.h>
#include <vnet/plugin/plugin.h>
-#include <vnet/crypto/crypto.h>
-#include <native/crypto_native.h>
#include <vppinfra/crypto/aes_cbc.h>
+#include <native/sha2.h>
#if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
#pragma GCC optimize ("O3")
foreach_aes_cbc_handler_type;
#undef _
+#define foreach_crypto_native_cbc_hmac_op \
+ _ (128, 224, CLIB_SHA2_224, 14) \
+ _ (192, 224, CLIB_SHA2_224, 14) \
+ _ (256, 224, CLIB_SHA2_224, 14) \
+ _ (128, 256, CLIB_SHA2_256, 16) \
+ _ (192, 256, CLIB_SHA2_256, 16) \
+ _ (256, 256, CLIB_SHA2_256, 16)
+
+#define _(k, b, clib_sha2, t) \
+ static u32 crypto_native_ops_enc_aes_##k##_cbc_hmac_sha##b##_tag##t ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ aes_ops_enc_aes_cbc (vm, ops, n_ops, AES_KEY_##k); \
+ crypto_native_ops_hmac_sha2 (vm, ops, n_ops, 0, clib_sha2); \
+ return n_ops; \
+ } \
+ \
+ static u32 crypto_native_ops_dec_aes_##k##_cbc_hmac_sha##b##_tag##t ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ crypto_native_ops_hmac_sha2 (vm, ops, n_ops, 0, clib_sha2); \
+ aes_ops_dec_aes_cbc (vm, ops, n_ops, AES_KEY_##k); \
+ return n_ops; \
+ } \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##k##_cbc_hmac_sha##b##_tag##t##_enc) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##k##_CBC_SHA##b##_TAG##t##_ENC, \
+ .fn = crypto_native_ops_enc_aes_##k##_cbc_hmac_sha##b##_tag##t, \
+ .probe = aes_cbc_cpu_probe, \
+ }; \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##k##_cbc_hmac_sha##b##_tag##t##_dec) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##k##_CBC_SHA##b##_TAG##t##_DEC, \
+ .fn = crypto_native_ops_dec_aes_##k##_cbc_hmac_sha##b##_tag##t, \
+ .probe = aes_cbc_cpu_probe, \
+ }; \
+ \
+ CRYPTO_NATIVE_KEY_HANDLER (aes_##k##_cbc_hmac_sha##b##_tag##t) = { \
+ .alg_id = VNET_CRYPTO_ALG_AES_##k##_CBC_SHA##b##_TAG##t, \
+ .key_fn = aes_cbc_key_exp_##k, \
+ .probe = aes_cbc_cpu_probe, \
+ };
+
+foreach_crypto_native_cbc_hmac_op
+#undef _
\ No newline at end of file
#include <vlib/vlib.h>
#include <vnet/plugin/plugin.h>
-#include <vnet/crypto/crypto.h>
-#include <native/crypto_native.h>
#include <vppinfra/crypto/aes_ctr.h>
+#include <native/sha2.h>
#if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
#pragma GCC optimize("O3")
.probe = probe, \
};
-_ (128) _ (192) _ (256)
+_ (128)
+_ (192)
+_ (256)
+#undef _
+
+#define foreach_crypto_native_ctr_hmac_op \
+ _ (128, 256, CLIB_SHA2_256, 16) \
+ _ (192, 256, CLIB_SHA2_256, 16) \
+ _ (256, 256, CLIB_SHA2_256, 16)
+
+#define _(k, b, clib_sha2, t) \
+ static u32 crypto_native_ops_enc_aes_##k##_ctr_hmac_sha##b##_tag##t ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ aes_ops_aes_ctr (vm, ops, n_ops, 0, AES_KEY_##k, 0); \
+ crypto_native_ops_hmac_sha2 (vm, ops, n_ops, 0, clib_sha2); \
+ return n_ops; \
+ } \
+ \
+ static u32 crypto_native_ops_dec_aes_##k##_ctr_hmac_sha##b##_tag##t ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ crypto_native_ops_hmac_sha2 (vm, ops, n_ops, 0, clib_sha2); \
+ aes_ops_aes_ctr (vm, ops, n_ops, 0, AES_KEY_##k, 0); \
+ return n_ops; \
+ } \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##k##_ctr_hmac_sha##b##_tag##t##_enc) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##k##_CTR_SHA##b##_TAG##t##_ENC, \
+ .fn = crypto_native_ops_enc_aes_##k##_ctr_hmac_sha##b##_tag##t, \
+ .probe = probe, \
+ }; \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##k##_ctr_hmac_sha##b##_tag##t##_dec) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##k##_CTR_SHA##b##_TAG##t##_DEC, \
+ .fn = crypto_native_ops_dec_aes_##k##_ctr_hmac_sha##b##_tag##t, \
+ .probe = probe, \
+ }; \
+ \
+ CRYPTO_NATIVE_KEY_HANDLER (aes_##k##_ctr_hmac_sha##b##_tag##t) = { \
+ .alg_id = VNET_CRYPTO_ALG_AES_##k##_CTR_SHA##b##_TAG##t, \
+ .key_fn = aes_ctr_key_exp_##k, \
+ .probe = probe, \
+ };
+
+foreach_crypto_native_ctr_hmac_op
#undef _
#include <vlib/vlib.h>
#include <vnet/plugin/plugin.h>
-#include <vnet/crypto/crypto.h>
-#include <native/crypto_native.h>
-#include <vppinfra/crypto/sha2.h>
+#include <native/sha2.h>
static_always_inline u32
crypto_native_ops_hash_sha2 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
return n_ops;
}
-static_always_inline u32
-crypto_native_ops_hmac_sha2 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
- u32 n_ops, vnet_crypto_op_chunk_t *chunks,
- clib_sha2_type_t type)
-{
- crypto_native_main_t *cm = &crypto_native_main;
- vnet_crypto_op_t *op = ops[0];
- u32 n_left = n_ops;
- clib_sha2_hmac_ctx_t ctx;
- u8 buffer[64];
- u32 sz, n_fail = 0;
-
- for (; n_left; n_left--, op++)
- {
- clib_sha2_hmac_init (
- &ctx, type, (clib_sha2_hmac_key_data_t *) cm->key_data[op->key_index]);
- if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
- {
- vnet_crypto_op_chunk_t *chp = chunks + op->chunk_index;
- for (int j = 0; j < op->n_chunks; j++, chp++)
- clib_sha2_hmac_update (&ctx, chp->src, chp->len);
- }
- else
- clib_sha2_hmac_update (&ctx, op->src, op->len);
-
- clib_sha2_hmac_final (&ctx, buffer);
-
- if (op->digest_len)
- {
- sz = op->digest_len;
- if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
- {
- if ((memcmp (op->digest, buffer, sz)))
- {
- n_fail++;
- op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
- continue;
- }
- }
- else
- clib_memcpy_fast (op->digest, buffer, sz);
- }
- else
- {
- sz = clib_sha2_variants[type].digest_size;
- if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
- {
- if ((memcmp (op->digest, buffer, sz)))
- {
- n_fail++;
- op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
- continue;
- }
- }
- else
- clib_memcpy_fast (op->digest, buffer, sz);
- }
-
- op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
- }
-
- return n_ops - n_fail;
-}
-
static void *
sha2_key_add (vnet_crypto_key_t *key, clib_sha2_type_t type)
{
--- /dev/null
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2025 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __sha2_h__
+#define __sha2_h__
+#include <vppinfra/crypto/sha2.h>
+#include <vnet/crypto/crypto.h>
+#include <native/crypto_native.h>
+
+static_always_inline u32
+crypto_native_ops_hmac_sha2 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops, vnet_crypto_op_chunk_t *chunks,
+ clib_sha2_type_t type)
+{
+ crypto_native_main_t *cm = &crypto_native_main;
+ vnet_crypto_op_t *op = ops[0];
+ u32 n_left = n_ops;
+ clib_sha2_hmac_ctx_t ctx;
+ u8 buffer[64];
+ u32 sz, n_fail = 0;
+
+ for (; n_left; n_left--, op++)
+ {
+ clib_sha2_hmac_init (
+ &ctx, type, (clib_sha2_hmac_key_data_t *) cm->key_data[op->key_index]);
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ vnet_crypto_op_chunk_t *chp = chunks + op->chunk_index;
+ for (int j = 0; j < op->n_chunks; j++, chp++)
+ clib_sha2_hmac_update (&ctx, chp->src, chp->len);
+ }
+ else
+ clib_sha2_hmac_update (&ctx, op->src, op->len);
+
+ clib_sha2_hmac_final (&ctx, buffer);
+
+ if (op->digest_len)
+ {
+ sz = op->digest_len;
+ if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+ {
+ if ((memcmp (op->digest, buffer, sz)))
+ {
+ n_fail++;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ continue;
+ }
+ }
+ else
+ clib_memcpy_fast (op->digest, buffer, sz);
+ }
+ else
+ {
+ sz = clib_sha2_variants[type].digest_size;
+ if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+ {
+ if ((memcmp (op->digest, buffer, sz)))
+ {
+ n_fail++;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ continue;
+ }
+ }
+ else
+ clib_memcpy_fast (op->digest, buffer, sz);
+ }
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+
+ return n_ops - n_fail;
+}
+#endif /* __sha2_h__ */
\ No newline at end of file
_ (AES_192_CBC_MD5_TAG12, EVP_aes_192_cbc, EVP_md5, 12) \
_ (AES_256_CBC_MD5_TAG12, EVP_aes_256_cbc, EVP_md5, 12)
+#define foreach_openssl_linked_ctr_hmac_op \
+ _ (AES_128_CTR_SHA1_TAG12, EVP_aes_128_ctr, EVP_sha1, 12) \
+ _ (AES_192_CTR_SHA1_TAG12, EVP_aes_192_ctr, EVP_sha1, 12) \
+ _ (AES_256_CTR_SHA1_TAG12, EVP_aes_256_ctr, EVP_sha1, 12) \
+ _ (AES_128_CTR_SHA256_TAG16, EVP_aes_128_ctr, EVP_sha256, 16) \
+ _ (AES_192_CTR_SHA256_TAG16, EVP_aes_192_ctr, EVP_sha256, 16) \
+ _ (AES_256_CTR_SHA256_TAG16, EVP_aes_256_ctr, EVP_sha256, 16) \
+ _ (AES_128_CTR_SHA384_TAG24, EVP_aes_128_ctr, EVP_sha384, 24) \
+ _ (AES_192_CTR_SHA384_TAG24, EVP_aes_192_ctr, EVP_sha384, 24) \
+ _ (AES_256_CTR_SHA384_TAG24, EVP_aes_256_ctr, EVP_sha384, 24) \
+ _ (AES_128_CTR_SHA512_TAG32, EVP_aes_128_ctr, EVP_sha512, 32) \
+ _ (AES_192_CTR_SHA512_TAG32, EVP_aes_192_ctr, EVP_sha512, 32) \
+ _ (AES_256_CTR_SHA512_TAG32, EVP_aes_256_ctr, EVP_sha512, 32)
+
#define foreach_openssl_chacha20_evp_op \
_ (chacha20_poly1305, CHACHA20_POLY1305, EVP_chacha20_poly1305, 0, 0) \
_ (chacha20_poly1305, CHACHA20_POLY1305_TAG16_AAD0, EVP_chacha20_poly1305, \
openssl_ctx_hmac (key, kop, idx, m ()); \
return NULL; \
}
-foreach_openssl_linked_cbc_hmac_op
+foreach_openssl_linked_cbc_hmac_op foreach_openssl_linked_ctr_hmac_op
#undef _
#define _(a, b) \
#undef _
#define _(n, c, m, t) cm->ctx_fn[VNET_CRYPTO_ALG_##n] = openssl_ctx_##n;
- foreach_openssl_linked_cbc_hmac_op
+ foreach_openssl_linked_cbc_hmac_op foreach_openssl_linked_ctr_hmac_op
#undef _
#define _(a, b) cm->ctx_fn[VNET_CRYPTO_ALG_HMAC_##a] = openssl_ctx_hmac_##a;
{ .opt = VNET_CRYPTO_OP_##n##_DEC, \
.fn = openssl_ops_dec_##n, \
.cfn = openssl_ops_dec_chained_##n },
- foreach_openssl_linked_cbc_hmac_op
+ foreach_openssl_linked_cbc_hmac_op foreach_openssl_linked_ctr_hmac_op
#undef _
#define _(a, b) \
{ .opt = VNET_CRYPTO_OP_##a##_HMAC, \