2 *------------------------------------------------------------------
3 * Copyright (c) 2019 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <x86intrin.h>
22 #include <crypto_ia32/crypto_ia32.h>
23 #include <crypto_ia32/aesni.h>
24 #include <crypto_ia32/ghash.h>
26 #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
27 #pragma GCC optimize ("O3")
32 /* pre-calculated hash key values */
34 /* extracted AES key */
38 static const __m128i last_byte_one = { 0, 1ULL << 56 };
39 static const __m128i zero = { 0, 0 };
41 static const u8x16 bswap_mask = {
42 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
45 static const u8x16 byte_mask_scale = {
46 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
49 static_always_inline __m128i
50 aesni_gcm_bswap (__m128i x)
52 return _mm_shuffle_epi8 (x, (__m128i) bswap_mask);
55 static_always_inline __m128i
56 aesni_gcm_byte_mask (__m128i x, u8 n_bytes)
58 u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale);
60 return _mm_blendv_epi8 (zero, x, (__m128i) mask);
63 static_always_inline __m128i
64 aesni_gcm_load_partial (__m128i * p, int n_bytes)
67 return _mm_mask_loadu_epi8 (zero, (1 << n_bytes) - 1, p);
69 return aesni_gcm_byte_mask (_mm_loadu_si128 (p), n_bytes);
73 static_always_inline void
74 aesni_gcm_store_partial (void *p, __m128i r, int n_bytes)
77 _mm_mask_storeu_epi8 (p, (1 << n_bytes) - 1, r);
79 u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale);
80 _mm_maskmoveu_si128 (r, (__m128i) mask, p);
84 static_always_inline void
85 aesni_gcm_load (__m128i * d, __m128i * inv, int n, int n_bytes)
87 for (int i = 0; i < n - 1; i++)
88 d[i] = _mm_loadu_si128 (inv + i);
89 d[n - 1] = n_bytes ? aesni_gcm_load_partial (inv + n - 1, n_bytes) :
90 _mm_loadu_si128 (inv + n - 1);
93 static_always_inline void
94 aesni_gcm_store (__m128i * d, __m128i * outv, int n, int n_bytes)
96 for (int i = 0; i < n - 1; i++)
97 _mm_storeu_si128 (outv + i, d[i]);
99 aesni_gcm_store_partial (outv + n - 1, d[n - 1], n_bytes);
101 _mm_storeu_si128 (outv + n - 1, d[n - 1]);
104 static_always_inline void
105 aesni_gcm_enc_first_round (__m128i * r, __m128i * Y, u32 * ctr, __m128i k,
110 if (PREDICT_TRUE ((u8) ctr[0] < (256 - n_blocks)))
112 for (i = 0; i < n_blocks; i++)
114 Y[0] = _mm_add_epi32 (Y[0], last_byte_one);
121 for (i = 0; i < n_blocks; i++)
123 Y[0] = _mm_insert_epi32 (Y[0], clib_host_to_net_u32 (++ctr[0]), 3);
129 static_always_inline void
130 aesni_gcm_enc_round (__m128i * r, __m128i k, int n_blocks)
132 for (int i = 0; i < n_blocks; i++)
133 r[i] = _mm_aesenc_si128 (r[i], k);
136 static_always_inline void
137 aesni_gcm_enc_last_round (__m128i * r, __m128i * d, const __m128i * k,
138 int rounds, int n_blocks)
141 /* additional ronuds for AES-192 and AES-256 */
142 for (int i = 10; i < rounds; i++)
143 aesni_gcm_enc_round (r, k[i], n_blocks);
145 for (int i = 0; i < n_blocks; i++)
146 d[i] ^= _mm_aesenclast_si128 (r[i], k[rounds]);
149 static_always_inline __m128i
150 aesni_gcm_ghash_blocks (__m128i T, aes_gcm_key_data_t * kd,
151 const __m128i * in, int n_blocks)
153 ghash_data_t _gd, *gd = &_gd;
154 const __m128i *Hi = kd->Hi + n_blocks - 1;
155 ghash_mul_first (gd, aesni_gcm_bswap (_mm_loadu_si128 (in)) ^ T, Hi[0]);
156 for (int i = 1; i < n_blocks; i++)
157 ghash_mul_next (gd, aesni_gcm_bswap (_mm_loadu_si128 (in + i)), Hi[-i]);
160 return ghash_final (gd);
163 static_always_inline __m128i
164 aesni_gcm_ghash (__m128i T, aes_gcm_key_data_t * kd, const __m128i * in,
168 while (n_left >= 128)
170 T = aesni_gcm_ghash_blocks (T, kd, in, 8);
177 T = aesni_gcm_ghash_blocks (T, kd, in, 4);
184 T = aesni_gcm_ghash_blocks (T, kd, in, 2);
191 T = aesni_gcm_ghash_blocks (T, kd, in, 1);
198 __m128i r = aesni_gcm_load_partial ((__m128i *) in, n_left);
199 T = ghash_mul (aesni_gcm_bswap (r) ^ T, kd->Hi[0]);
204 static_always_inline __m128i
205 aesni_gcm_calc (__m128i T, aes_gcm_key_data_t * kd, __m128i * d,
206 __m128i * Y, u32 * ctr, __m128i * inv, __m128i * outv,
207 int rounds, int n, int last_block_bytes, int with_ghash,
211 ghash_data_t _gd = { }, *gd = &_gd;
212 const __m128i *k = kd->Ke;
213 int hidx = is_encrypt ? 4 : n, didx = 0;
215 _mm_prefetch (inv + 4, _MM_HINT_T0);
217 /* AES rounds 0 and 1 */
218 aesni_gcm_enc_first_round (r, Y, ctr, k[0], n);
219 aesni_gcm_enc_round (r, k[1], n);
221 /* load data - decrypt round */
223 aesni_gcm_load (d, inv, n, last_block_bytes);
225 /* GHASH multiply block 1 */
227 ghash_mul_first (gd, aesni_gcm_bswap (d[didx++]) ^ T, kd->Hi[--hidx]);
229 /* AES rounds 2 and 3 */
230 aesni_gcm_enc_round (r, k[2], n);
231 aesni_gcm_enc_round (r, k[3], n);
233 /* GHASH multiply block 2 */
234 if (with_ghash && hidx)
235 ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
237 /* AES rounds 4 and 5 */
238 aesni_gcm_enc_round (r, k[4], n);
239 aesni_gcm_enc_round (r, k[5], n);
241 /* GHASH multiply block 3 */
242 if (with_ghash && hidx)
243 ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
245 /* AES rounds 6 and 7 */
246 aesni_gcm_enc_round (r, k[6], n);
247 aesni_gcm_enc_round (r, k[7], n);
249 /* GHASH multiply block 4 */
250 if (with_ghash && hidx)
251 ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
253 /* AES rounds 8 and 9 */
254 aesni_gcm_enc_round (r, k[8], n);
255 aesni_gcm_enc_round (r, k[9], n);
257 /* GHASH reduce 1st step */
261 /* load data - encrypt round */
263 aesni_gcm_load (d, inv, n, last_block_bytes);
265 /* GHASH reduce 2nd step */
269 /* AES last round(s) */
270 aesni_gcm_enc_last_round (r, d, k, rounds, n);
273 aesni_gcm_store (d, outv, n, last_block_bytes);
275 /* GHASH final step */
277 T = ghash_final (gd);
282 static_always_inline __m128i
283 aesni_gcm_calc_double (__m128i T, aes_gcm_key_data_t * kd, __m128i * d,
284 __m128i * Y, u32 * ctr, __m128i * inv, __m128i * outv,
285 int rounds, int is_encrypt)
288 ghash_data_t _gd, *gd = &_gd;
289 const __m128i *k = kd->Ke;
291 /* AES rounds 0 and 1 */
292 aesni_gcm_enc_first_round (r, Y, ctr, k[0], 4);
293 aesni_gcm_enc_round (r, k[1], 4);
295 /* load 4 blocks of data - decrypt round */
297 aesni_gcm_load (d, inv, 4, 0);
299 /* GHASH multiply block 0 */
300 ghash_mul_first (gd, aesni_gcm_bswap (d[0]) ^ T, kd->Hi[7]);
302 /* AES rounds 2 and 3 */
303 aesni_gcm_enc_round (r, k[2], 4);
304 aesni_gcm_enc_round (r, k[3], 4);
306 /* GHASH multiply block 1 */
307 ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[6]);
309 /* AES rounds 4 and 5 */
310 aesni_gcm_enc_round (r, k[4], 4);
311 aesni_gcm_enc_round (r, k[5], 4);
313 /* GHASH multiply block 2 */
314 ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[5]);
316 /* AES rounds 6 and 7 */
317 aesni_gcm_enc_round (r, k[6], 4);
318 aesni_gcm_enc_round (r, k[7], 4);
320 /* GHASH multiply block 3 */
321 ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[4]);
323 /* AES rounds 8 and 9 */
324 aesni_gcm_enc_round (r, k[8], 4);
325 aesni_gcm_enc_round (r, k[9], 4);
327 /* load 4 blocks of data - encrypt round */
329 aesni_gcm_load (d, inv, 4, 0);
331 /* AES last round(s) */
332 aesni_gcm_enc_last_round (r, d, k, rounds, 4);
334 /* store 4 blocks of data */
335 aesni_gcm_store (d, outv, 4, 0);
337 /* load next 4 blocks of data data - decrypt round */
339 aesni_gcm_load (d, inv + 4, 4, 0);
341 /* GHASH multiply block 4 */
342 ghash_mul_next (gd, aesni_gcm_bswap (d[0]), kd->Hi[3]);
344 /* AES rounds 0, 1 and 2 */
345 aesni_gcm_enc_first_round (r, Y, ctr, k[0], 4);
346 aesni_gcm_enc_round (r, k[1], 4);
347 aesni_gcm_enc_round (r, k[2], 4);
349 /* GHASH multiply block 5 */
350 ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[2]);
352 /* AES rounds 3 and 4 */
353 aesni_gcm_enc_round (r, k[3], 4);
354 aesni_gcm_enc_round (r, k[4], 4);
356 /* GHASH multiply block 6 */
357 ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[1]);
359 /* AES rounds 5 and 6 */
360 aesni_gcm_enc_round (r, k[5], 4);
361 aesni_gcm_enc_round (r, k[6], 4);
363 /* GHASH multiply block 7 */
364 ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[0]);
366 /* AES rounds 7 and 8 */
367 aesni_gcm_enc_round (r, k[7], 4);
368 aesni_gcm_enc_round (r, k[8], 4);
370 /* GHASH reduce 1st step */
374 aesni_gcm_enc_round (r, k[9], 4);
376 /* load data - encrypt round */
378 aesni_gcm_load (d, inv + 4, 4, 0);
380 /* GHASH reduce 2nd step */
383 /* AES last round(s) */
384 aesni_gcm_enc_last_round (r, d, k, rounds, 4);
387 aesni_gcm_store (d, outv + 4, 4, 0);
389 /* GHASH final step */
390 return ghash_final (gd);
393 static_always_inline __m128i
394 aesni_gcm_ghash_last (__m128i T, aes_gcm_key_data_t * kd, __m128i * d,
395 int n_blocks, int n_bytes)
397 ghash_data_t _gd, *gd = &_gd;
400 d[n_blocks - 1] = aesni_gcm_byte_mask (d[n_blocks - 1], n_bytes);
402 ghash_mul_first (gd, aesni_gcm_bswap (d[0]) ^ T, kd->Hi[n_blocks - 1]);
404 ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[n_blocks - 2]);
406 ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[n_blocks - 3]);
408 ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[n_blocks - 4]);
411 return ghash_final (gd);
415 static_always_inline __m128i
416 aesni_gcm_enc (__m128i T, aes_gcm_key_data_t * kd, __m128i Y, const u8 * in,
417 const u8 * out, u32 n_left, int rounds)
419 __m128i *inv = (__m128i *) in, *outv = (__m128i *) out;
431 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, n_left,
432 /* with_ghash */ 0, /* is_encrypt */ 1);
433 return aesni_gcm_ghash_last (T, kd, d, 4, n_left);
435 else if (n_left > 32)
438 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3, n_left,
439 /* with_ghash */ 0, /* is_encrypt */ 1);
440 return aesni_gcm_ghash_last (T, kd, d, 3, n_left);
442 else if (n_left > 16)
445 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2, n_left,
446 /* with_ghash */ 0, /* is_encrypt */ 1);
447 return aesni_gcm_ghash_last (T, kd, d, 2, n_left);
452 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
453 /* with_ghash */ 0, /* is_encrypt */ 1);
454 return aesni_gcm_ghash_last (T, kd, d, 1, n_left);
458 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0,
459 /* with_ghash */ 0, /* is_encrypt */ 1);
466 while (n_left >= 128)
468 T = aesni_gcm_calc_double (T, kd, d, &Y, &ctr, inv, outv, rounds,
479 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0,
480 /* with_ghash */ 1, /* is_encrypt */ 1);
489 return aesni_gcm_ghash_last (T, kd, d, 4, 0);
494 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, n_left,
495 /* with_ghash */ 1, /* is_encrypt */ 1);
496 return aesni_gcm_ghash_last (T, kd, d, 4, n_left);
502 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3, n_left,
503 /* with_ghash */ 1, /* is_encrypt */ 1);
504 return aesni_gcm_ghash_last (T, kd, d, 3, n_left);
510 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2, n_left,
511 /* with_ghash */ 1, /* is_encrypt */ 1);
512 return aesni_gcm_ghash_last (T, kd, d, 2, n_left);
516 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
517 /* with_ghash */ 1, /* is_encrypt */ 1);
518 return aesni_gcm_ghash_last (T, kd, d, 1, n_left);
521 static_always_inline __m128i
522 aesni_gcm_dec (__m128i T, aes_gcm_key_data_t * kd, __m128i Y, const u8 * in,
523 const u8 * out, u32 n_left, int rounds)
525 __m128i *inv = (__m128i *) in, *outv = (__m128i *) out;
529 while (n_left >= 128)
531 T = aesni_gcm_calc_double (T, kd, d, &Y, &ctr, inv, outv, rounds,
542 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0, 1, 0);
554 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4,
556 /* with_ghash */ 1, /* is_encrypt */ 0);
559 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3,
561 /* with_ghash */ 1, /* is_encrypt */ 0);
564 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2,
566 /* with_ghash */ 1, /* is_encrypt */ 0);
568 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
569 /* with_ghash */ 1, /* is_encrypt */ 0);
572 static_always_inline int
573 aes_gcm (const u8 * in, u8 * out, const u8 * addt, const u8 * iv, u8 * tag,
574 u32 data_bytes, u32 aad_bytes, u8 tag_len, aes_gcm_key_data_t * kd,
575 int aes_rounds, int is_encrypt)
578 __m128i r, Y0, T = { };
579 ghash_data_t _gd, *gd = &_gd;
581 _mm_prefetch (iv, _MM_HINT_T0);
582 _mm_prefetch (in, _MM_HINT_T0);
583 _mm_prefetch (in + CLIB_CACHE_LINE_BYTES, _MM_HINT_T0);
585 /* calculate ghash for AAD - optimized for ipsec common cases */
587 T = aesni_gcm_ghash (T, kd, (__m128i *) addt, 8);
588 else if (aad_bytes == 12)
589 T = aesni_gcm_ghash (T, kd, (__m128i *) addt, 12);
591 T = aesni_gcm_ghash (T, kd, (__m128i *) addt, aad_bytes);
593 /* initalize counter */
594 Y0 = _mm_loadu_si128 ((__m128i *) iv);
595 Y0 = _mm_insert_epi32 (Y0, clib_host_to_net_u32 (1), 3);
597 /* ghash and encrypt/edcrypt */
599 T = aesni_gcm_enc (T, kd, Y0, in, out, data_bytes, aes_rounds);
601 T = aesni_gcm_dec (T, kd, Y0, in, out, data_bytes, aes_rounds);
603 _mm_prefetch (tag, _MM_HINT_T0);
612 /* interleaved computation of final ghash and E(Y0, k) */
613 ghash_mul_first (gd, r ^ T, kd->Hi[0]);
615 for (i = 1; i < 5; i += 1)
616 r = _mm_aesenc_si128 (r, kd->Ke[i]);
619 for (; i < 9; i += 1)
620 r = _mm_aesenc_si128 (r, kd->Ke[i]);
621 T = ghash_final (gd);
622 for (; i < aes_rounds; i += 1)
623 r = _mm_aesenc_si128 (r, kd->Ke[i]);
624 r = _mm_aesenclast_si128 (r, kd->Ke[aes_rounds]);
625 T = aesni_gcm_bswap (T) ^ r;
627 /* tag_len 16 -> 0 */
634 aesni_gcm_store_partial ((__m128i *) tag, T, (1 << tag_len) - 1);
636 _mm_storeu_si128 ((__m128i *) tag, T);
641 u16 tag_mask = tag_len ? (1 << tag_len) - 1 : 0xffff;
642 r = _mm_loadu_si128 ((__m128i *) tag);
643 if (_mm_movemask_epi8 (r == T) != tag_mask)
649 static_always_inline u32
650 aesni_ops_enc_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
651 u32 n_ops, aesni_key_size_t ks)
653 crypto_ia32_main_t *cm = &crypto_ia32_main;
654 vnet_crypto_op_t *op = ops[0];
655 aes_gcm_key_data_t *kd;
660 kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
661 aes_gcm (op->src, op->dst, op->aad, op->iv, op->tag, op->len, op->aad_len,
662 op->tag_len, kd, AESNI_KEY_ROUNDS (ks), /* is_encrypt */ 1);
663 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
674 static_always_inline u32
675 aesni_ops_dec_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
676 u32 n_ops, aesni_key_size_t ks)
678 crypto_ia32_main_t *cm = &crypto_ia32_main;
679 vnet_crypto_op_t *op = ops[0];
680 aes_gcm_key_data_t *kd;
685 kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
686 rv = aes_gcm (op->src, op->dst, op->aad, op->iv, op->tag, op->len,
687 op->aad_len, op->tag_len, kd, AESNI_KEY_ROUNDS (ks),
692 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
696 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
709 static_always_inline void *
710 aesni_gcm_key_exp (vnet_crypto_key_t * key, aesni_key_size_t ks)
712 aes_gcm_key_data_t *kd;
716 kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
719 aes_key_expand ((__m128i *) kd->Ke, key->data, ks);
721 /* pre-calculate H */
723 for (i = 1; i < AESNI_KEY_ROUNDS (ks); i += 1)
724 H = _mm_aesenc_si128 (H, kd->Ke[i]);
725 H = _mm_aesenclast_si128 (H, kd->Ke[i]);
726 H = aesni_gcm_bswap (H);
727 ghash_precompute (H, (__m128i *) kd->Hi, 8);
731 #define foreach_aesni_gcm_handler_type _(128) _(192) _(256)
734 static u32 aesni_ops_dec_aes_gcm_##x \
735 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
736 { return aesni_ops_dec_aes_gcm (vm, ops, n_ops, AESNI_KEY_##x); } \
737 static u32 aesni_ops_enc_aes_gcm_##x \
738 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
739 { return aesni_ops_enc_aes_gcm (vm, ops, n_ops, AESNI_KEY_##x); } \
740 static void * aesni_gcm_key_exp_##x (vnet_crypto_key_t *key) \
741 { return aesni_gcm_key_exp (key, AESNI_KEY_##x); }
743 foreach_aesni_gcm_handler_type;
748 crypto_ia32_aesni_gcm_init_avx512 (vlib_main_t * vm)
750 crypto_ia32_aesni_gcm_init_avx2 (vlib_main_t * vm)
752 crypto_ia32_aesni_gcm_init_sse42 (vlib_main_t * vm)
755 crypto_ia32_main_t *cm = &crypto_ia32_main;
758 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
759 VNET_CRYPTO_OP_AES_##x##_GCM_ENC, \
760 aesni_ops_enc_aes_gcm_##x); \
761 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
762 VNET_CRYPTO_OP_AES_##x##_GCM_DEC, \
763 aesni_ops_dec_aes_gcm_##x); \
764 cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_GCM] = aesni_gcm_key_exp_##x;
765 foreach_aesni_gcm_handler_type;
771 * fd.io coding-style-patch-verification: ON
774 * eval: (c-set-style "gnu")