2 * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
4 * Copyright (c) 2019 Cisco Systemss
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
20 #include <intel-ipsec-mb.h>
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
28 #define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE
29 #define EXPANDED_KEY_N_BYTES (16 * 15)
33 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
35 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
36 JOB_AES_HMAC burst_jobs[IMB_MAX_BURST_SIZE];
38 } ipsecmb_per_thread_data_t;
44 aes_gcm_pre_t aes_gcm_pre;
46 hash_one_block_t hash_one_block;
50 typedef struct ipsecmb_main_t_
52 ipsecmb_per_thread_data_t *per_thread_data;
53 ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
59 u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
60 u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
61 } ipsecmb_aes_key_data_t;
63 static ipsecmb_main_t ipsecmb_main = { };
66 * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
68 #define foreach_ipsecmb_hmac_op \
69 _(SHA1, SHA1, sha1, 64, 20, 20) \
70 _(SHA224, SHA_224, sha224, 64, 32, 28) \
71 _(SHA256, SHA_256, sha256, 64, 32, 32) \
72 _(SHA384, SHA_384, sha384, 128, 64, 48) \
73 _(SHA512, SHA_512, sha512, 128, 64, 64)
76 * (Alg, key-len-bits, JOB_CIPHER_MODE)
78 #define foreach_ipsecmb_cipher_op \
79 _ (AES_128_CBC, 128, CBC) \
80 _ (AES_192_CBC, 192, CBC) \
81 _ (AES_256_CBC, 256, CBC) \
82 _ (AES_128_CTR, 128, CNTR) \
83 _ (AES_192_CTR, 192, CNTR) \
84 _ (AES_256_CTR, 256, CNTR)
87 * (Alg, key-len-bytes, iv-len-bytes)
89 #define foreach_ipsecmb_gcm_cipher_op \
94 static_always_inline vnet_crypto_op_status_t
95 ipsecmb_status_job (JOB_STS status)
100 return VNET_CRYPTO_OP_STATUS_COMPLETED;
101 case STS_BEING_PROCESSED:
102 case STS_COMPLETED_AES:
103 case STS_COMPLETED_HMAC:
104 return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS;
105 case STS_INVALID_ARGS:
106 case STS_INTERNAL_ERROR:
108 return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
111 return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
115 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
117 vnet_crypto_op_t *op = job->user_data;
118 u32 len = op->digest_len ? op->digest_len : digest_size;
120 if (PREDICT_FALSE (STS_COMPLETED != job->status))
122 op->status = ipsecmb_status_job (job->status);
123 *n_fail = *n_fail + 1;
127 if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
129 if ((memcmp (op->digest, job->auth_tag_output, len)))
131 *n_fail = *n_fail + 1;
132 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
136 else if (len == digest_size)
137 clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
139 clib_memcpy_fast (op->digest, job->auth_tag_output, len);
141 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
144 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
145 static_always_inline u32
146 ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
147 u32 block_size, u32 hash_size, u32 digest_size,
150 ipsecmb_main_t *imbm = &ipsecmb_main;
151 ipsecmb_per_thread_data_t *ptd =
152 vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
154 u32 i, n_fail = 0, ops_index = 0;
155 u8 scratch[n_ops][digest_size];
157 (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
161 const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
163 * configure all the jobs first ...
165 for (i = 0; i < n; i++, ops_index++)
167 vnet_crypto_op_t *op = ops[ops_index];
168 const u8 *kd = (u8 *) imbm->key_data[op->key_index];
170 job = &ptd->burst_jobs[i];
173 job->hash_start_src_offset_in_bytes = 0;
174 job->msg_len_to_hash_in_bytes = op->len;
175 job->auth_tag_output_len_in_bytes = digest_size;
176 job->auth_tag_output = scratch[ops_index];
178 job->u.HMAC._hashed_auth_key_xor_ipad = kd;
179 job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
184 * submit all jobs to be processed and retire completed jobs
186 IMB_SUBMIT_HASH_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, alg);
188 for (i = 0; i < n; i++)
190 job = &ptd->burst_jobs[i];
191 ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
197 return ops_index - n_fail;
200 static_always_inline u32
201 ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
202 u32 block_size, u32 hash_size, u32 digest_size,
205 ipsecmb_main_t *imbm = &ipsecmb_main;
206 ipsecmb_per_thread_data_t *ptd =
207 vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
210 u8 scratch[n_ops][digest_size];
213 * queue all the jobs first ...
215 for (i = 0; i < n_ops; i++)
217 vnet_crypto_op_t *op = ops[i];
218 u8 *kd = (u8 *) imbm->key_data[op->key_index];
220 job = IMB_GET_NEXT_JOB (ptd->mgr);
223 job->hash_start_src_offset_in_bytes = 0;
224 job->msg_len_to_hash_in_bytes = op->len;
226 job->auth_tag_output_len_in_bytes = digest_size;
227 job->auth_tag_output = scratch[i];
229 job->cipher_mode = NULL_CIPHER;
230 job->cipher_direction = DECRYPT;
231 job->chain_order = HASH_CIPHER;
233 job->u.HMAC._hashed_auth_key_xor_ipad = kd;
234 job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
237 job = IMB_SUBMIT_JOB (ptd->mgr);
240 ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
243 while ((job = IMB_FLUSH_JOB (ptd->mgr)))
244 ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
246 return n_ops - n_fail;
250 #define _(a, b, c, d, e, f) \
251 static_always_inline u32 \
252 ipsecmb_ops_hmac_##a (vlib_main_t * vm, \
253 vnet_crypto_op_t * ops[], \
255 { return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); } \
257 foreach_ipsecmb_hmac_op;
261 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
263 vnet_crypto_op_t *op = job->user_data;
265 if (PREDICT_FALSE (STS_COMPLETED != job->status))
267 op->status = ipsecmb_status_job (job->status);
268 *n_fail = *n_fail + 1;
271 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
274 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
275 static_always_inline u32
276 ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
277 u32 n_ops, u32 key_len,
278 JOB_CIPHER_DIRECTION direction,
279 JOB_CIPHER_MODE cipher_mode)
281 ipsecmb_main_t *imbm = &ipsecmb_main;
282 ipsecmb_per_thread_data_t *ptd =
283 vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
285 u32 i, n_fail = 0, ops_index = 0;
287 (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
291 const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
293 for (i = 0; i < n; i++)
295 ipsecmb_aes_key_data_t *kd;
296 vnet_crypto_op_t *op = ops[ops_index++];
297 kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
299 job = &ptd->burst_jobs[i];
303 job->msg_len_to_cipher_in_bytes = op->len;
304 job->cipher_start_src_offset_in_bytes = 0;
306 job->hash_alg = NULL_HASH;
308 job->aes_enc_key_expanded = kd->enc_key_exp;
309 job->aes_dec_key_expanded = kd->dec_key_exp;
311 job->iv_len_in_bytes = AES_BLOCK_SIZE;
316 IMB_SUBMIT_CIPHER_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n,
317 cipher_mode, direction, key_len / 8);
318 for (i = 0; i < n; i++)
320 job = &ptd->burst_jobs[i];
321 ipsecmb_retire_cipher_job (job, &n_fail);
327 return ops_index - n_fail;
330 static_always_inline u32
331 ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
332 u32 n_ops, u32 key_len,
333 JOB_CIPHER_DIRECTION direction,
334 JOB_CIPHER_MODE cipher_mode)
336 ipsecmb_main_t *imbm = &ipsecmb_main;
337 ipsecmb_per_thread_data_t *ptd =
338 vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
342 for (i = 0; i < n_ops; i++)
344 ipsecmb_aes_key_data_t *kd;
345 vnet_crypto_op_t *op = ops[i];
346 kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
348 job = IMB_GET_NEXT_JOB (ptd->mgr);
352 job->msg_len_to_cipher_in_bytes = op->len;
353 job->cipher_start_src_offset_in_bytes = 0;
355 job->hash_alg = NULL_HASH;
356 job->cipher_mode = cipher_mode;
357 job->cipher_direction = direction;
358 job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
360 job->aes_key_len_in_bytes = key_len / 8;
361 job->aes_enc_key_expanded = kd->enc_key_exp;
362 job->aes_dec_key_expanded = kd->dec_key_exp;
364 job->iv_len_in_bytes = AES_BLOCK_SIZE;
368 job = IMB_SUBMIT_JOB (ptd->mgr);
371 ipsecmb_retire_cipher_job (job, &n_fail);
374 while ((job = IMB_FLUSH_JOB (ptd->mgr)))
375 ipsecmb_retire_cipher_job (job, &n_fail);
377 return n_ops - n_fail;
382 static_always_inline u32 ipsecmb_ops_cipher_enc_##a ( \
383 vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
385 return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, ENCRYPT, c); \
388 static_always_inline u32 ipsecmb_ops_cipher_dec_##a ( \
389 vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
391 return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, DECRYPT, c); \
394 foreach_ipsecmb_cipher_op;
398 static_always_inline u32 \
399 ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm, \
400 vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \
402 ipsecmb_main_t *imbm = &ipsecmb_main; \
403 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \
405 MB_MGR *m = ptd->mgr; \
406 vnet_crypto_op_chunk_t *chp; \
409 for (i = 0; i < n_ops; i++) \
411 struct gcm_key_data *kd; \
412 struct gcm_context_data ctx; \
413 vnet_crypto_op_t *op = ops[i]; \
415 kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
416 ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \
417 IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \
418 chp = chunks + op->chunk_index; \
419 for (j = 0; j < op->n_chunks; j++) \
421 IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \
425 IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len); \
427 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
433 static_always_inline u32 \
434 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
437 ipsecmb_main_t *imbm = &ipsecmb_main; \
438 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \
440 MB_MGR *m = ptd->mgr; \
443 for (i = 0; i < n_ops; i++) \
445 struct gcm_key_data *kd; \
446 struct gcm_context_data ctx; \
447 vnet_crypto_op_t *op = ops[i]; \
449 kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
450 IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \
451 op->aad, op->aad_len, op->tag, op->tag_len); \
453 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
459 static_always_inline u32 \
460 ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm, \
461 vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \
463 ipsecmb_main_t *imbm = &ipsecmb_main; \
464 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \
466 MB_MGR *m = ptd->mgr; \
467 vnet_crypto_op_chunk_t *chp; \
468 u32 i, j, n_failed = 0; \
470 for (i = 0; i < n_ops; i++) \
472 struct gcm_key_data *kd; \
473 struct gcm_context_data ctx; \
474 vnet_crypto_op_t *op = ops[i]; \
477 kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
478 ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \
479 IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \
480 chp = chunks + op->chunk_index; \
481 for (j = 0; j < op->n_chunks; j++) \
483 IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \
487 IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len); \
489 if ((memcmp (op->tag, scratch, op->tag_len))) \
491 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \
495 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
498 return n_ops - n_failed; \
501 static_always_inline u32 \
502 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
505 ipsecmb_main_t *imbm = &ipsecmb_main; \
506 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \
508 MB_MGR *m = ptd->mgr; \
509 u32 i, n_failed = 0; \
511 for (i = 0; i < n_ops; i++) \
513 struct gcm_key_data *kd; \
514 struct gcm_context_data ctx; \
515 vnet_crypto_op_t *op = ops[i]; \
518 kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
519 IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \
520 op->aad, op->aad_len, scratch, op->tag_len); \
522 if ((memcmp (op->tag, scratch, op->tag_len))) \
524 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \
528 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
531 return n_ops - n_failed; \
534 foreach_ipsecmb_gcm_cipher_op;
537 #ifdef HAVE_IPSECMB_CHACHA_POLY
539 ipsecmb_retire_aead_job (JOB_AES_HMAC *job, u32 *n_fail)
541 vnet_crypto_op_t *op = job->user_data;
542 u32 len = op->tag_len;
544 if (PREDICT_FALSE (STS_COMPLETED != job->status))
546 op->status = ipsecmb_status_job (job->status);
547 *n_fail = *n_fail + 1;
551 if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
553 if (memcmp (op->tag, job->auth_tag_output, len))
555 *n_fail = *n_fail + 1;
556 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
561 clib_memcpy_fast (op->tag, job->auth_tag_output, len);
563 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
566 static_always_inline u32
567 ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
568 IMB_CIPHER_DIRECTION dir)
570 ipsecmb_main_t *imbm = &ipsecmb_main;
571 ipsecmb_per_thread_data_t *ptd =
572 vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
574 MB_MGR *m = ptd->mgr;
575 u32 i, n_fail = 0, last_key_index = ~0;
576 u8 scratch[VLIB_FRAME_SIZE][16];
579 for (i = 0; i < n_ops; i++)
581 vnet_crypto_op_t *op = ops[i];
583 job = IMB_GET_NEXT_JOB (m);
584 if (last_key_index != op->key_index)
586 vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
589 last_key_index = op->key_index;
592 job->cipher_direction = dir;
593 job->chain_order = IMB_ORDER_HASH_CIPHER;
594 job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
595 job->hash_alg = IMB_AUTH_CHACHA20_POLY1305;
596 job->enc_keys = job->dec_keys = key;
597 job->key_len_in_bytes = 32;
599 job->u.CHACHA20_POLY1305.aad = op->aad;
600 job->u.CHACHA20_POLY1305.aad_len_in_bytes = op->aad_len;
605 job->iv_len_in_bytes = 12;
606 job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes =
608 job->cipher_start_src_offset_in_bytes =
609 job->hash_start_src_offset_in_bytes = 0;
611 job->auth_tag_output = scratch[i];
612 job->auth_tag_output_len_in_bytes = 16;
616 job = IMB_SUBMIT_JOB_NOCHECK (ptd->mgr);
618 ipsecmb_retire_aead_job (job, &n_fail);
623 while ((job = IMB_FLUSH_JOB (ptd->mgr)))
624 ipsecmb_retire_aead_job (job, &n_fail);
626 return n_ops - n_fail;
629 static_always_inline u32
630 ipsecmb_ops_chacha_poly_enc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
633 return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_ENCRYPT);
636 static_always_inline u32
637 ipsecmb_ops_chacha_poly_dec (vlib_main_t *vm, vnet_crypto_op_t *ops[],
640 return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_DECRYPT);
643 static_always_inline u32
644 ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
645 vnet_crypto_op_chunk_t *chunks, u32 n_ops,
646 IMB_CIPHER_DIRECTION dir)
648 ipsecmb_main_t *imbm = &ipsecmb_main;
649 ipsecmb_per_thread_data_t *ptd =
650 vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
651 MB_MGR *m = ptd->mgr;
652 u32 i, n_fail = 0, last_key_index = ~0;
655 if (dir == IMB_DIR_ENCRYPT)
657 for (i = 0; i < n_ops; i++)
659 vnet_crypto_op_t *op = ops[i];
660 struct chacha20_poly1305_context_data ctx;
661 vnet_crypto_op_chunk_t *chp;
664 ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
666 if (last_key_index != op->key_index)
668 vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
671 last_key_index = op->key_index;
674 IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
677 chp = chunks + op->chunk_index;
678 for (j = 0; j < op->n_chunks; j++)
680 IMB_CHACHA20_POLY1305_ENC_UPDATE (m, key, &ctx, chp->dst,
685 IMB_CHACHA20_POLY1305_ENC_FINALIZE (m, &ctx, op->tag, op->tag_len);
687 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
690 else /* dir == IMB_DIR_DECRYPT */
692 for (i = 0; i < n_ops; i++)
694 vnet_crypto_op_t *op = ops[i];
695 struct chacha20_poly1305_context_data ctx;
696 vnet_crypto_op_chunk_t *chp;
700 ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
702 if (last_key_index != op->key_index)
704 vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
707 last_key_index = op->key_index;
710 IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
713 chp = chunks + op->chunk_index;
714 for (j = 0; j < op->n_chunks; j++)
716 IMB_CHACHA20_POLY1305_DEC_UPDATE (m, key, &ctx, chp->dst,
721 IMB_CHACHA20_POLY1305_DEC_FINALIZE (m, &ctx, scratch, op->tag_len);
723 if (memcmp (op->tag, scratch, op->tag_len))
726 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
729 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
733 return n_ops - n_fail;
736 static_always_inline u32
737 ipsec_mb_ops_chacha_poly_enc_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
738 vnet_crypto_op_chunk_t *chunks,
741 return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
745 static_always_inline u32
746 ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
747 vnet_crypto_op_chunk_t *chunks,
750 return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
756 crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
757 vnet_crypto_key_index_t idx)
759 ipsecmb_main_t *imbm = &ipsecmb_main;
760 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
761 ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
765 /** TODO: add linked alg support **/
766 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
769 if (kop == VNET_CRYPTO_KEY_OP_DEL)
771 if (idx >= vec_len (imbm->key_data))
774 if (imbm->key_data[idx] == 0)
777 clib_mem_free_s (imbm->key_data[idx]);
778 imbm->key_data[idx] = 0;
782 if (ad->data_size == 0)
785 vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);
787 if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
789 clib_mem_free_s (imbm->key_data[idx]);
792 kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
793 CLIB_CACHE_LINE_BYTES);
795 /* AES CBC key expansion */
798 ad->keyexp (key->data, ((ipsecmb_aes_key_data_t *) kd)->enc_key_exp,
799 ((ipsecmb_aes_key_data_t *) kd)->dec_key_exp);
806 ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
811 if (ad->hash_one_block)
813 const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
814 u64 pad[block_qw], key_hash[block_qw];
816 clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
817 if (vec_len (key->data) <= ad->block_size)
818 clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
820 ad->hash_fn (key->data, vec_len (key->data), key_hash);
822 for (i = 0; i < block_qw; i++)
823 pad[i] = key_hash[i] ^ 0x3636363636363636;
824 ad->hash_one_block (pad, kd);
826 for (i = 0; i < block_qw; i++)
827 pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
828 ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));
834 static clib_error_t *
835 crypto_ipsecmb_init (vlib_main_t * vm)
837 ipsecmb_main_t *imbm = &ipsecmb_main;
838 ipsecmb_alg_data_t *ad;
839 ipsecmb_per_thread_data_t *ptd;
840 vlib_thread_main_t *tm = vlib_get_thread_main ();
845 if (!clib_cpu_supports_aes ())
849 * A priority that is better than OpenSSL but worse than VPP natvie
851 name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
853 eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
855 vec_validate_aligned (imbm->per_thread_data, tm->n_vlib_mains - 1,
856 CLIB_CACHE_LINE_BYTES);
859 vec_foreach (ptd, imbm->per_thread_data)
861 ptd->mgr = alloc_mb_mgr (0);
862 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
863 clib_memset_u8 (ptd->burst_jobs, 0,
864 sizeof (JOB_AES_HMAC) * IMB_MAX_BURST_SIZE);
866 if (clib_cpu_supports_avx512f ())
867 init_mb_mgr_avx512 (ptd->mgr);
868 else if (clib_cpu_supports_avx2 () && clib_cpu_supports_bmi2 ())
869 init_mb_mgr_avx2 (ptd->mgr);
871 init_mb_mgr_sse (ptd->mgr);
873 if (ptd == imbm->per_thread_data)
878 #define _(a, b, c, d, e, f) \
879 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
880 ipsecmb_ops_hmac_##a); \
881 ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a; \
882 ad->block_size = d; \
883 ad->data_size = e * 2; \
884 ad->hash_one_block = m-> c##_one_block; \
885 ad->hash_fn = m-> c; \
887 foreach_ipsecmb_hmac_op;
890 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
891 ipsecmb_ops_cipher_enc_##a); \
892 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
893 ipsecmb_ops_cipher_dec_##a); \
894 ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \
895 ad->data_size = sizeof (ipsecmb_aes_key_data_t); \
896 ad->keyexp = m->keyexp_##b;
898 foreach_ipsecmb_cipher_op;
901 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
902 ipsecmb_ops_gcm_cipher_enc_##a); \
903 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
904 ipsecmb_ops_gcm_cipher_dec_##a); \
905 vnet_crypto_register_chained_ops_handler \
906 (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
907 ipsecmb_ops_gcm_cipher_enc_##a##_chained); \
908 vnet_crypto_register_chained_ops_handler \
909 (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
910 ipsecmb_ops_gcm_cipher_dec_##a##_chained); \
911 ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \
912 ad->data_size = sizeof (struct gcm_key_data); \
913 ad->aes_gcm_pre = m->gcm##b##_pre; \
915 foreach_ipsecmb_gcm_cipher_op;
918 #ifdef HAVE_IPSECMB_CHACHA_POLY
919 vnet_crypto_register_ops_handler (vm, eidx,
920 VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
921 ipsecmb_ops_chacha_poly_enc);
922 vnet_crypto_register_ops_handler (vm, eidx,
923 VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
924 ipsecmb_ops_chacha_poly_dec);
925 vnet_crypto_register_chained_ops_handler (
926 vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
927 ipsec_mb_ops_chacha_poly_enc_chained);
928 vnet_crypto_register_chained_ops_handler (
929 vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
930 ipsec_mb_ops_chacha_poly_dec_chained);
931 ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305;
935 vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
940 VLIB_INIT_FUNCTION (crypto_ipsecmb_init) =
942 .runs_after = VLIB_INITS ("vnet_crypto_init"),
947 VLIB_PLUGIN_REGISTER () =
949 .version = VPP_BUILD_VER,
950 .description = "Intel IPSEC Multi-buffer Crypto Engine",
955 * fd.io coding-style-patch-verification: ON
958 * eval: (c-set-style "gnu")