typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- MB_MGR *mgr;
__m128i cbc_iv;
+ MB_MGR *mgr;
+ JOB_AES_HMAC burst_jobs[IMB_MAX_BURST_SIZE];
} ipsecmb_per_thread_data_t;
typedef struct
{
u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
-} ipsecmb_aes_cbc_key_data_t;
+} ipsecmb_aes_key_data_t;
static ipsecmb_main_t ipsecmb_main = { };
_(SHA512, SHA_512, sha512, 128, 64, 64)
/*
- * (Alg, key-len-bits)
+ * (Alg, key-len-bits, JOB_CIPHER_MODE)
*/
-#define foreach_ipsecmb_cbc_cipher_op \
- _(AES_128_CBC, 128) \
- _(AES_192_CBC, 192) \
- _(AES_256_CBC, 256)
+#define foreach_ipsecmb_cipher_op \
+ _ (AES_128_CBC, 128, CBC) \
+ _ (AES_192_CBC, 192, CBC) \
+ _ (AES_256_CBC, 256, CBC) \
+ _ (AES_128_CTR, 128, CNTR) \
+ _ (AES_192_CTR, 192, CNTR) \
+ _ (AES_256_CTR, 256, CNTR)
/*
* (Alg, key-len-bytes, iv-len-bytes)
_(AES_192_GCM, 192) \
_(AES_256_GCM, 256)
+static_always_inline vnet_crypto_op_status_t
+ipsecmb_status_job (JOB_STS status)
+{
+ switch (status)
+ {
+ case STS_COMPLETED:
+ return VNET_CRYPTO_OP_STATUS_COMPLETED;
+ case STS_BEING_PROCESSED:
+ case STS_COMPLETED_AES:
+ case STS_COMPLETED_HMAC:
+ return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS;
+ case STS_INVALID_ARGS:
+ case STS_INTERNAL_ERROR:
+ case STS_ERROR:
+ return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
+ }
+ ASSERT (0);
+ return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
+}
+
always_inline void
ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
{
vnet_crypto_op_t *op = job->user_data;
u32 len = op->digest_len ? op->digest_len : digest_size;
- if (STS_COMPLETED != job->status)
+ if (PREDICT_FALSE (STS_COMPLETED != job->status))
{
- op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ op->status = ipsecmb_status_job (job->status);
*n_fail = *n_fail + 1;
return;
}
ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
vm->thread_index);
JOB_AES_HMAC *job;
- u32 i, n_fail = 0;
+ u32 i, n_fail = 0, ops_index = 0;
u8 scratch[n_ops][digest_size];
+ const u32 burst_sz =
+ (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
- /*
- * queue all the jobs first ...
- */
- for (i = 0; i < n_ops; i++)
+ while (n_ops)
{
- vnet_crypto_op_t *op = ops[i];
- u8 *kd = (u8 *) imbm->key_data[op->key_index];
+ const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
+ /*
+ * configure all the jobs first ...
+ */
+ for (i = 0; i < n; i++, ops_index++)
+ {
+ vnet_crypto_op_t *op = ops[ops_index];
+ const u8 *kd = (u8 *) imbm->key_data[op->key_index];
- job = IMB_GET_NEXT_JOB (ptd->mgr);
+ job = &ptd->burst_jobs[i];
- job->src = op->src;
- job->hash_start_src_offset_in_bytes = 0;
- job->msg_len_to_hash_in_bytes = op->len;
- job->hash_alg = alg;
- job->auth_tag_output_len_in_bytes = digest_size;
- job->auth_tag_output = scratch[i];
+ job->src = op->src;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->msg_len_to_hash_in_bytes = op->len;
+ job->auth_tag_output_len_in_bytes = digest_size;
+ job->auth_tag_output = scratch[ops_index];
- job->cipher_mode = NULL_CIPHER;
- job->cipher_direction = DECRYPT;
- job->chain_order = HASH_CIPHER;
+ job->u.HMAC._hashed_auth_key_xor_ipad = kd;
+ job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
+ job->user_data = op;
+ }
- job->u.HMAC._hashed_auth_key_xor_ipad = kd;
- job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
- job->user_data = op;
+ /*
+ * submit all jobs to be processed and retire completed jobs
+ */
+ IMB_SUBMIT_HASH_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, alg);
- job = IMB_SUBMIT_JOB (ptd->mgr);
+ for (i = 0; i < n; i++)
+ {
+ job = &ptd->burst_jobs[i];
+ ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
+ }
- if (job)
- ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
+ n_ops -= n;
}
- while ((job = IMB_FLUSH_JOB (ptd->mgr)))
- ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
-
- return n_ops - n_fail;
+ return ops_index - n_fail;
}
#define _(a, b, c, d, e, f) \
{
vnet_crypto_op_t *op = job->user_data;
- if (STS_COMPLETED != job->status)
+ if (PREDICT_FALSE (STS_COMPLETED != job->status))
{
- op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ op->status = ipsecmb_status_job (job->status);
*n_fail = *n_fail + 1;
}
else
}
static_always_inline u32
-ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
u32 n_ops, u32 key_len,
- JOB_CIPHER_DIRECTION direction)
+ JOB_CIPHER_DIRECTION direction,
+ JOB_CIPHER_MODE cipher_mode)
{
ipsecmb_main_t *imbm = &ipsecmb_main;
ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
vm->thread_index);
JOB_AES_HMAC *job;
- u32 i, n_fail = 0;
+ u32 i, n_fail = 0, ops_index = 0;
+ const u32 burst_sz =
+ (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
- for (i = 0; i < n_ops; i++)
+ while (n_ops)
{
- ipsecmb_aes_cbc_key_data_t *kd;
- vnet_crypto_op_t *op = ops[i];
- kd = (ipsecmb_aes_cbc_key_data_t *) imbm->key_data[op->key_index];
- __m128i iv;
+ const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
- job = IMB_GET_NEXT_JOB (ptd->mgr);
+ for (i = 0; i < n; i++)
+ {
+ ipsecmb_aes_key_data_t *kd;
+ vnet_crypto_op_t *op = ops[ops_index++];
+ kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
- job->src = op->src;
- job->dst = op->dst;
- job->msg_len_to_cipher_in_bytes = op->len;
- job->cipher_start_src_offset_in_bytes = 0;
+ job = &ptd->burst_jobs[i];
- job->hash_alg = NULL_HASH;
- job->cipher_mode = CBC;
- job->cipher_direction = direction;
- job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
+ job->src = op->src;
+ job->dst = op->dst;
+ job->msg_len_to_cipher_in_bytes = op->len;
+ job->cipher_start_src_offset_in_bytes = 0;
- if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
- {
- iv = ptd->cbc_iv;
- _mm_storeu_si128 ((__m128i *) op->iv, iv);
- ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
- }
+ job->hash_alg = NULL_HASH;
- job->aes_key_len_in_bytes = key_len / 8;
- job->aes_enc_key_expanded = kd->enc_key_exp;
- job->aes_dec_key_expanded = kd->dec_key_exp;
- job->iv = op->iv;
- job->iv_len_in_bytes = AES_BLOCK_SIZE;
+ if ((direction == ENCRYPT) &&
+ (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
+ {
+ const __m128i iv = ptd->cbc_iv;
+ _mm_storeu_si128 ((__m128i *) op->iv, iv);
+ ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
+ }
- job->user_data = op;
+ job->aes_enc_key_expanded = kd->enc_key_exp;
+ job->aes_dec_key_expanded = kd->dec_key_exp;
+ job->iv = op->iv;
+ job->iv_len_in_bytes = AES_BLOCK_SIZE;
- job = IMB_SUBMIT_JOB (ptd->mgr);
+ job->user_data = op;
+ }
- if (job)
- ipsecmb_retire_cipher_job (job, &n_fail);
- }
+ IMB_SUBMIT_CIPHER_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n,
+ cipher_mode, direction, key_len / 8);
+ for (i = 0; i < n; i++)
+ {
+ job = &ptd->burst_jobs[i];
+ ipsecmb_retire_cipher_job (job, &n_fail);
+ }
- while ((job = IMB_FLUSH_JOB (ptd->mgr)))
- ipsecmb_retire_cipher_job (job, &n_fail);
+ n_ops -= n;
+ }
- return n_ops - n_fail;
+ return ops_index - n_fail;
}
-#define _(a, b) \
-static_always_inline u32 \
-ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm, \
- vnet_crypto_op_t * ops[], \
- u32 n_ops) \
-{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, ENCRYPT); } \
- \
-static_always_inline u32 \
-ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm, \
- vnet_crypto_op_t * ops[], \
- u32 n_ops) \
-{ return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, DECRYPT); } \
+#define _(a, b, c) \
+ static_always_inline u32 ipsecmb_ops_cipher_enc_##a ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, ENCRYPT, c); \
+ } \
+ \
+ static_always_inline u32 ipsecmb_ops_cipher_dec_##a ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, DECRYPT, c); \
+ }
-foreach_ipsecmb_cbc_cipher_op;
+foreach_ipsecmb_cipher_op;
#undef _
#define _(a, b) \
foreach_ipsecmb_gcm_cipher_op;
#undef _
+#ifdef HAVE_IPSECMB_CHACHA_POLY
+always_inline void
+ipsecmb_retire_aead_job (JOB_AES_HMAC *job, u32 *n_fail)
+{
+ vnet_crypto_op_t *op = job->user_data;
+ u32 len = op->tag_len;
+
+ if (PREDICT_FALSE (STS_COMPLETED != job->status))
+ {
+ op->status = ipsecmb_status_job (job->status);
+ *n_fail = *n_fail + 1;
+ return;
+ }
+
+ if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+ {
+ if (memcmp (op->tag, job->auth_tag_output, len))
+ {
+ *n_fail = *n_fail + 1;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ return;
+ }
+ }
+
+ clib_memcpy_fast (op->tag, job->auth_tag_output, len);
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
+ IMB_CIPHER_DIRECTION dir)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ ipsecmb_per_thread_data_t *ptd =
+ vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
+ struct JOB_AES_HMAC *job;
+ MB_MGR *m = ptd->mgr;
+ u32 i, n_fail = 0, last_key_index = ~0;
+ u8 scratch[VLIB_FRAME_SIZE][16];
+ u8 iv_data[16];
+ u8 *key = 0;
+
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ __m128i iv;
+
+ job = IMB_GET_NEXT_JOB (m);
+ if (last_key_index != op->key_index)
+ {
+ vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
+
+ key = kd->data;
+ last_key_index = op->key_index;
+ }
+
+ job->cipher_direction = dir;
+ job->chain_order = IMB_ORDER_HASH_CIPHER;
+ job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
+ job->hash_alg = IMB_AUTH_CHACHA20_POLY1305;
+ job->enc_keys = job->dec_keys = key;
+ job->key_len_in_bytes = 32;
+
+ job->u.CHACHA20_POLY1305.aad = op->aad;
+ job->u.CHACHA20_POLY1305.aad_len_in_bytes = op->aad_len;
+ job->src = op->src;
+ job->dst = op->dst;
+
+ if ((dir == IMB_DIR_ENCRYPT) &&
+ (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
+ {
+ iv = ptd->cbc_iv;
+ _mm_storeu_si128 ((__m128i *) iv_data, iv);
+ clib_memcpy_fast (op->iv, iv_data, 12);
+ ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
+ }
+
+ job->iv = op->iv;
+ job->iv_len_in_bytes = 12;
+ job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes =
+ op->len;
+ job->cipher_start_src_offset_in_bytes =
+ job->hash_start_src_offset_in_bytes = 0;
+
+ job->auth_tag_output = scratch[i];
+ job->auth_tag_output_len_in_bytes = 16;
+
+ job->user_data = op;
+
+ job = IMB_SUBMIT_JOB_NOCHECK (ptd->mgr);
+ if (job)
+ ipsecmb_retire_aead_job (job, &n_fail);
+
+ op++;
+ }
+
+ while ((job = IMB_FLUSH_JOB (ptd->mgr)))
+ ipsecmb_retire_aead_job (job, &n_fail);
+
+ return n_ops - n_fail;
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly_enc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops)
+{
+ return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_ENCRYPT);
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly_dec (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops)
+{
+ return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_DECRYPT);
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ IMB_CIPHER_DIRECTION dir)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ ipsecmb_per_thread_data_t *ptd =
+ vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
+ MB_MGR *m = ptd->mgr;
+ u32 i, n_fail = 0, last_key_index = ~0;
+ u8 iv_data[16];
+ u8 *key = 0;
+
+ if (dir == IMB_DIR_ENCRYPT)
+ {
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ struct chacha20_poly1305_context_data ctx;
+ vnet_crypto_op_chunk_t *chp;
+ __m128i iv;
+ u32 j;
+
+ ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
+
+ if (last_key_index != op->key_index)
+ {
+ vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
+
+ key = kd->data;
+ last_key_index = op->key_index;
+ }
+
+ if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
+ {
+ iv = ptd->cbc_iv;
+ _mm_storeu_si128 ((__m128i *) iv_data, iv);
+ clib_memcpy_fast (op->iv, iv_data, 12);
+ ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
+ }
+
+ IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
+ op->aad_len);
+
+ chp = chunks + op->chunk_index;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ IMB_CHACHA20_POLY1305_ENC_UPDATE (m, key, &ctx, chp->dst,
+ chp->src, chp->len);
+ chp += 1;
+ }
+
+ IMB_CHACHA20_POLY1305_ENC_FINALIZE (m, &ctx, op->tag, op->tag_len);
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ }
+ else /* dir == IMB_DIR_DECRYPT */
+ {
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ struct chacha20_poly1305_context_data ctx;
+ vnet_crypto_op_chunk_t *chp;
+ u8 scratch[16];
+ u32 j;
+
+ ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
+
+ if (last_key_index != op->key_index)
+ {
+ vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
+
+ key = kd->data;
+ last_key_index = op->key_index;
+ }
+
+ IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
+ op->aad_len);
+
+ chp = chunks + op->chunk_index;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ IMB_CHACHA20_POLY1305_DEC_UPDATE (m, key, &ctx, chp->dst,
+ chp->src, chp->len);
+ chp += 1;
+ }
+
+ IMB_CHACHA20_POLY1305_DEC_FINALIZE (m, &ctx, scratch, op->tag_len);
+
+ if (memcmp (op->tag, scratch, op->tag_len))
+ {
+ n_fail = n_fail + 1;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ }
+ else
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ }
+
+ return n_ops - n_fail;
+}
+
+static_always_inline u32
+ipsec_mb_ops_chacha_poly_enc_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks,
+ u32 n_ops)
+{
+ return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
+ IMB_DIR_ENCRYPT);
+}
+
+static_always_inline u32
+ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks,
+ u32 n_ops)
+{
+ return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
+ IMB_DIR_DECRYPT);
+}
+#endif
+
clib_error_t *
crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
{
u32 i;
void *kd;
+ /** TODO: add linked alg support **/
+ if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ return;
+
if (kop == VNET_CRYPTO_KEY_OP_DEL)
{
if (idx >= vec_len (imbm->key_data))
/* AES CBC key expansion */
if (ad->keyexp)
{
- ad->keyexp (key->data, ((ipsecmb_aes_cbc_key_data_t *) kd)->enc_key_exp,
- ((ipsecmb_aes_cbc_key_data_t *) kd)->dec_key_exp);
+ ad->keyexp (key->data, ((ipsecmb_aes_key_data_t *) kd)->enc_key_exp,
+ ((ipsecmb_aes_key_data_t *) kd)->dec_key_exp);
return;
}
MB_MGR *m = 0;
u32 eidx;
u8 *name;
+ const u32 burst_jobs_sz = sizeof (JOB_AES_HMAC) * IMB_MAX_BURST_SIZE;
if (!clib_cpu_supports_aes ())
return 0;
vec_foreach (ptd, imbm->per_thread_data)
{
ptd->mgr = alloc_mb_mgr (0);
- if (clib_cpu_supports_avx512f ())
+ memset (ptd->burst_jobs, 0, burst_jobs_sz);
+
+ if (clib_cpu_supports_avx512f ())
init_mb_mgr_avx512 (ptd->mgr);
else if (clib_cpu_supports_avx2 ())
init_mb_mgr_avx2 (ptd->mgr);
foreach_ipsecmb_hmac_op;
#undef _
-#define _(a, b) \
- vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
- ipsecmb_ops_cbc_cipher_enc_##a); \
- vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
- ipsecmb_ops_cbc_cipher_dec_##a); \
- ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \
- ad->data_size = sizeof (ipsecmb_aes_cbc_key_data_t); \
- ad->keyexp = m->keyexp_##b; \
-
- foreach_ipsecmb_cbc_cipher_op;
+#define _(a, b, c) \
+ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
+ ipsecmb_ops_cipher_enc_##a); \
+ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
+ ipsecmb_ops_cipher_dec_##a); \
+ ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \
+ ad->data_size = sizeof (ipsecmb_aes_key_data_t); \
+ ad->keyexp = m->keyexp_##b;
+
+ foreach_ipsecmb_cipher_op;
#undef _
#define _(a, b) \
vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
foreach_ipsecmb_gcm_cipher_op;
#undef _
+#ifdef HAVE_IPSECMB_CHACHA_POLY
+ vnet_crypto_register_ops_handler (vm, eidx,
+ VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
+ ipsecmb_ops_chacha_poly_enc);
+ vnet_crypto_register_ops_handler (vm, eidx,
+ VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
+ ipsecmb_ops_chacha_poly_dec);
+ vnet_crypto_register_chained_ops_handler (
+ vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
+ ipsec_mb_ops_chacha_poly_enc_chained);
+ vnet_crypto_register_chained_ops_handler (
+ vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
+ ipsec_mb_ops_chacha_poly_dec_chained);
+ ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305;
+ ad->data_size = 0;
+#endif
+
vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
return (NULL);
}