2 * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
4 * Copyright (c) 2019 Cisco Systemss
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
20 #include <intel-ipsec-mb.h>
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
28 #define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE
29 #define EXPANDED_KEY_N_BYTES (16 * 15)
35 } ipsecmb_per_thread_data_t;
41 aes_gcm_pre_t aes_gcm_pre;
43 hash_one_block_t hash_one_block;
47 typedef struct ipsecmb_main_t_
49 ipsecmb_per_thread_data_t *per_thread_data;
50 ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
56 u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
57 u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
58 } ipsecmb_aes_cbc_key_data_t;
60 static ipsecmb_main_t ipsecmb_main = { };
63 * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
65 #define foreach_ipsecmb_hmac_op \
66 _(SHA1, SHA1, sha1, 64, 20, 20) \
67 _(SHA224, SHA_224, sha224, 64, 32, 28) \
68 _(SHA256, SHA_256, sha256, 64, 32, 32) \
69 _(SHA384, SHA_384, sha384, 128, 64, 48) \
70 _(SHA512, SHA_512, sha512, 128, 64, 64)
73 * (Alg, key-len-bits, iv-len-bytes)
75 #define foreach_ipsecmb_cbc_cipher_op \
76 _(AES_128_CBC, 128, 16) \
77 _(AES_192_CBC, 192, 16) \
78 _(AES_256_CBC, 256, 16)
81 * (Alg, key-len-bytes, iv-len-bytes)
83 #define foreach_ipsecmb_gcm_cipher_op \
84 _(AES_128_GCM, 128, 12) \
85 _(AES_192_GCM, 192, 12) \
86 _(AES_256_GCM, 256, 12)
89 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
91 vnet_crypto_op_t *op = job->user_data;
92 u32 len = op->digest_len ? op->digest_len : digest_size;
94 if (STS_COMPLETED != job->status)
96 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
97 *n_fail = *n_fail + 1;
101 if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
103 if ((memcmp (op->digest, job->auth_tag_output, len)))
105 *n_fail = *n_fail + 1;
106 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
110 else if (len == digest_size)
111 clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
113 clib_memcpy_fast (op->digest, job->auth_tag_output, len);
115 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
118 static_always_inline u32
119 ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
120 u32 n_ops, u32 block_size, u32 hash_size,
121 u32 digest_size, JOB_HASH_ALG alg)
123 ipsecmb_main_t *imbm = &ipsecmb_main;
124 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
128 u8 scratch[n_ops][digest_size];
131 * queue all the jobs first ...
133 for (i = 0; i < n_ops; i++)
135 vnet_crypto_op_t *op = ops[i];
136 u8 *kd = (u8 *) imbm->key_data[op->key_index];
138 job = IMB_GET_NEXT_JOB (ptd->mgr);
141 job->hash_start_src_offset_in_bytes = 0;
142 job->msg_len_to_hash_in_bytes = op->len;
144 job->auth_tag_output_len_in_bytes = digest_size;
145 job->auth_tag_output = scratch[i];
147 job->cipher_mode = NULL_CIPHER;
148 job->cipher_direction = DECRYPT;
149 job->chain_order = HASH_CIPHER;
151 job->u.HMAC._hashed_auth_key_xor_ipad = kd;
152 job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
155 job = IMB_SUBMIT_JOB (ptd->mgr);
158 ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
161 while ((job = IMB_FLUSH_JOB (ptd->mgr)))
162 ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
164 return n_ops - n_fail;
167 #define _(a, b, c, d, e, f) \
168 static_always_inline u32 \
169 ipsecmb_ops_hmac_##a (vlib_main_t * vm, \
170 vnet_crypto_op_t * ops[], \
172 { return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); } \
174 foreach_ipsecmb_hmac_op;
178 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
180 vnet_crypto_op_t *op = job->user_data;
182 if (STS_COMPLETED != job->status)
184 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
185 *n_fail = *n_fail + 1;
188 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
191 static_always_inline u32
192 ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm,
193 vnet_crypto_op_t * ops[],
194 u32 n_ops, u32 key_len, u32 iv_len,
195 JOB_CIPHER_DIRECTION direction)
197 ipsecmb_main_t *imbm = &ipsecmb_main;
198 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
204 * queue all the jobs first ...
206 for (i = 0; i < n_ops; i++)
208 ipsecmb_aes_cbc_key_data_t *kd;
209 vnet_crypto_op_t *op = ops[i];
210 kd = (ipsecmb_aes_cbc_key_data_t *) imbm->key_data[op->key_index];
213 job = IMB_GET_NEXT_JOB (ptd->mgr);
217 job->msg_len_to_cipher_in_bytes = op->len;
218 job->cipher_start_src_offset_in_bytes = 0;
220 job->hash_alg = NULL_HASH;
221 job->cipher_mode = CBC;
222 job->cipher_direction = direction;
223 job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
225 if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
228 _mm_storeu_si128 ((__m128i *) op->iv, iv);
229 ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
232 job->aes_key_len_in_bytes = key_len / 8;
233 job->aes_enc_key_expanded = kd->enc_key_exp;
234 job->aes_dec_key_expanded = kd->dec_key_exp;
236 job->iv_len_in_bytes = iv_len;
240 job = IMB_SUBMIT_JOB (ptd->mgr);
243 ipsecmb_retire_cipher_job (job, &n_fail);
247 * .. then flush (i.e. complete) them
248 * We will have queued enough to satisfy the 'multi' buffer
250 while ((job = IMB_FLUSH_JOB (ptd->mgr)))
252 ipsecmb_retire_cipher_job (job, &n_fail);
255 return n_ops - n_fail;
259 static_always_inline u32 \
260 ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm, \
261 vnet_crypto_op_t * ops[], \
263 { return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, c, ENCRYPT); } \
265 foreach_ipsecmb_cbc_cipher_op;
269 static_always_inline u32 \
270 ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm, \
271 vnet_crypto_op_t * ops[], \
273 { return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, c, DECRYPT); } \
275 foreach_ipsecmb_cbc_cipher_op;
279 ipsecmb_retire_gcm_cipher_job (JOB_AES_HMAC * job,
280 u32 * n_fail, JOB_CIPHER_DIRECTION direction)
282 vnet_crypto_op_t *op = job->user_data;
284 if (STS_COMPLETED != job->status)
286 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
287 *n_fail = *n_fail + 1;
290 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
292 if (DECRYPT == direction)
294 if ((memcmp (op->tag, job->auth_tag_output, op->tag_len)))
296 *n_fail = *n_fail + 1;
297 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
302 static_always_inline u32
303 ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm,
304 vnet_crypto_op_t * ops[],
305 u32 n_ops, u32 key_len, u32 iv_len,
306 JOB_CIPHER_DIRECTION direction)
308 ipsecmb_main_t *imbm = &ipsecmb_main;
309 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
313 u8 scratch[n_ops][64];
316 * queue all the jobs first ...
318 for (i = 0; i < n_ops; i++)
320 struct gcm_key_data *kd;
321 vnet_crypto_op_t *op = ops[i];
322 kd = (struct gcm_key_data *) imbm->key_data[op->key_index];
326 job = IMB_GET_NEXT_JOB (ptd->mgr);
330 job->msg_len_to_cipher_in_bytes = op->len;
331 job->cipher_start_src_offset_in_bytes = 0;
333 job->hash_alg = AES_GMAC;
334 job->cipher_mode = GCM;
335 job->cipher_direction = direction;
336 job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
338 if (direction == ENCRYPT)
340 if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
343 // only use 8 bytes of the IV
344 clib_memcpy_fast (op->iv, &iv, 8);
345 ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
348 clib_memcpy_fast (nonce + 1, op->iv, 8);
349 job->iv = (u8 *) nonce;
354 clib_memcpy_fast (nonce + 1, op->iv, 8);
358 job->aes_key_len_in_bytes = key_len / 8;
359 job->aes_enc_key_expanded = kd;
360 job->aes_dec_key_expanded = kd;
361 job->iv_len_in_bytes = iv_len;
363 job->u.GCM.aad = op->aad;
364 job->u.GCM.aad_len_in_bytes = op->aad_len;
365 job->auth_tag_output_len_in_bytes = op->tag_len;
366 if (DECRYPT == direction)
367 job->auth_tag_output = scratch[i];
369 job->auth_tag_output = op->tag;
372 job = IMB_SUBMIT_JOB (ptd->mgr);
375 ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
379 * .. then flush (i.e. complete) them
380 * We will have queued enough to satisfy the 'multi' buffer
382 while ((job = IMB_FLUSH_JOB (ptd->mgr)))
384 ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
387 return n_ops - n_fail;
391 static_always_inline u32 \
392 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, \
393 vnet_crypto_op_t * ops[], \
395 { return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, c, ENCRYPT); } \
397 foreach_ipsecmb_gcm_cipher_op;
401 static_always_inline u32 \
402 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, \
403 vnet_crypto_op_t * ops[], \
405 { return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, c, DECRYPT); } \
407 foreach_ipsecmb_gcm_cipher_op;
411 crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
413 ipsecmb_per_thread_data_t *ptd;
414 clib_error_t *err = 0;
417 if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
418 return clib_error_return_unix (0, "failed to open '/dev/urandom'");
420 vec_foreach (ptd, imbm->per_thread_data)
422 if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
424 err = clib_error_return_unix (0, "'/dev/urandom' read failure");
435 crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
436 vnet_crypto_key_index_t idx)
438 ipsecmb_main_t *imbm = &ipsecmb_main;
439 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
440 ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
444 if (kop == VNET_CRYPTO_KEY_OP_DEL)
446 if (idx >= vec_len (imbm->key_data))
449 if (imbm->key_data[idx] == 0)
452 clib_memset_u8 (imbm->key_data[idx], 0,
453 clib_mem_size (imbm->key_data[idx]));
454 clib_mem_free (imbm->key_data[idx]);
455 imbm->key_data[idx] = 0;
459 if (ad->data_size == 0)
462 vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);
464 if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
466 clib_memset_u8 (imbm->key_data[idx], 0,
467 clib_mem_size (imbm->key_data[idx]));
468 clib_mem_free (imbm->key_data[idx]);
471 kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
472 CLIB_CACHE_LINE_BYTES);
474 /* AES CBC key expansion */
477 ad->keyexp (key->data, ((ipsecmb_aes_cbc_key_data_t *) kd)->enc_key_exp,
478 ((ipsecmb_aes_cbc_key_data_t *) kd)->dec_key_exp);
485 ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
490 if (ad->hash_one_block)
492 const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
493 u64 pad[block_qw], key_hash[block_qw];
495 clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
496 if (vec_len (key->data) <= ad->block_size)
497 clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
499 ad->hash_fn (key->data, vec_len (key->data), key_hash);
501 for (i = 0; i < block_qw; i++)
502 pad[i] = key_hash[i] ^ 0x3636363636363636;
503 ad->hash_one_block (pad, kd);
505 for (i = 0; i < block_qw; i++)
506 pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
507 ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));
513 static clib_error_t *
514 crypto_ipsecmb_init (vlib_main_t * vm)
516 ipsecmb_main_t *imbm = &ipsecmb_main;
517 ipsecmb_alg_data_t *ad;
518 ipsecmb_per_thread_data_t *ptd;
519 vlib_thread_main_t *tm = vlib_get_thread_main ();
525 if ((error = vlib_call_init_function (vm, vnet_crypto_init)))
529 * A priority that is better than OpenSSL but worse than VPP natvie
531 name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
533 eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
535 vec_validate (imbm->per_thread_data, tm->n_vlib_mains - 1);
538 vec_foreach (ptd, imbm->per_thread_data)
540 ptd->mgr = alloc_mb_mgr (0);
541 if (clib_cpu_supports_avx512f ())
542 init_mb_mgr_avx512 (ptd->mgr);
543 else if (clib_cpu_supports_avx2 ())
544 init_mb_mgr_avx2 (ptd->mgr);
546 init_mb_mgr_sse (ptd->mgr);
548 if (ptd == imbm->per_thread_data)
553 if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
556 #define _(a, b, c, d, e, f) \
557 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
558 ipsecmb_ops_hmac_##a); \
559 ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a; \
560 ad->block_size = d; \
561 ad->data_size = e * 2; \
562 ad->hash_one_block = m-> c##_one_block; \
563 ad->hash_fn = m-> c; \
565 foreach_ipsecmb_hmac_op;
568 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
569 ipsecmb_ops_cbc_cipher_enc_##a); \
570 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
571 ipsecmb_ops_cbc_cipher_dec_##a); \
572 ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \
573 ad->data_size = sizeof (ipsecmb_aes_cbc_key_data_t); \
574 ad->keyexp = m->keyexp_##b; \
576 foreach_ipsecmb_cbc_cipher_op;
579 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
580 ipsecmb_ops_gcm_cipher_enc_##a); \
581 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
582 ipsecmb_ops_gcm_cipher_dec_##a); \
583 ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \
584 ad->data_size = sizeof (struct gcm_key_data); \
585 ad->aes_gcm_pre = m->gcm##b##_pre; \
587 foreach_ipsecmb_gcm_cipher_op;
590 vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
594 VLIB_INIT_FUNCTION (crypto_ipsecmb_init);
597 VLIB_PLUGIN_REGISTER () =
599 .version = VPP_BUILD_VER,
600 .description = "Intel IPSEC multi-buffer",
605 * fd.io coding-style-patch-verification: ON
608 * eval: (c-set-style "gnu")