2 *------------------------------------------------------------------
3 * Copyright (c) 2019 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <x86intrin.h>
22 #include <crypto_native/crypto_native.h>
23 #include <crypto_native/aes.h>
24 #include <crypto_native/ghash.h>
26 #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
27 #pragma GCC optimize ("O3")
32 /* pre-calculated hash key values */
34 /* extracted AES key */
38 static const u32x4 last_byte_one = { 0, 0, 0, 1 << 24 };
40 static const u8x16 bswap_mask = {
41 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
44 static_always_inline u8x16
45 aesni_gcm_bswap (u8x16 x)
47 return (u8x16) _mm_shuffle_epi8 ((__m128i) x, (__m128i) bswap_mask);
50 static_always_inline void
51 aesni_gcm_load (u8x16 * d, u8x16u * inv, int n, int n_bytes)
53 for (int i = 0; i < n - 1; i++)
55 d[n - 1] = n_bytes ? aes_load_partial (inv + n - 1, n_bytes) : inv[n - 1];
58 static_always_inline void
59 aesni_gcm_store (u8x16 * d, u8x16u * outv, int n, int n_bytes)
61 for (int i = 0; i < n - 1; i++)
64 aes_store_partial (outv + n - 1, d[n - 1], n_bytes);
66 outv[n - 1] = d[n - 1];
69 static_always_inline void
70 aesni_gcm_enc_first_round (u8x16 * r, u32x4 * Y, u32 * ctr, u8x16 k,
73 if (PREDICT_TRUE ((u8) ctr[0] < (256 - n_blocks)))
75 for (int i = 0; i < n_blocks; i++)
77 Y[0] += last_byte_one;
78 r[i] = k ^ (u8x16) Y[0];
84 for (int i = 0; i < n_blocks; i++)
86 Y[0][3] = clib_host_to_net_u32 (++ctr[0]);
87 r[i] = k ^ (u8x16) Y[0];
92 static_always_inline void
93 aesni_gcm_enc_round (u8x16 * r, u8x16 k, int n_blocks)
95 for (int i = 0; i < n_blocks; i++)
96 r[i] = aes_enc_round (r[i], k);
99 static_always_inline void
100 aesni_gcm_enc_last_round (u8x16 * r, u8x16 * d, u8x16 const *k,
101 int rounds, int n_blocks)
104 /* additional ronuds for AES-192 and AES-256 */
105 for (int i = 10; i < rounds; i++)
106 aesni_gcm_enc_round (r, k[i], n_blocks);
108 for (int i = 0; i < n_blocks; i++)
109 d[i] ^= aes_enc_last_round (r[i], k[rounds]);
112 static_always_inline u8x16
113 aesni_gcm_ghash_blocks (u8x16 T, aes_gcm_key_data_t * kd,
114 u8x16u * in, int n_blocks)
116 ghash_data_t _gd, *gd = &_gd;
117 const u8x16 *Hi = kd->Hi + n_blocks - 1;
118 ghash_mul_first (gd, aesni_gcm_bswap (in[0]) ^ T, Hi[0]);
119 for (int i = 1; i < n_blocks; i++)
120 ghash_mul_next (gd, aesni_gcm_bswap ((in[i])), Hi[-i]);
123 return ghash_final (gd);
126 static_always_inline u8x16
127 aesni_gcm_ghash (u8x16 T, aes_gcm_key_data_t * kd, u8x16u * in, u32 n_left)
130 while (n_left >= 128)
132 T = aesni_gcm_ghash_blocks (T, kd, in, 8);
139 T = aesni_gcm_ghash_blocks (T, kd, in, 4);
146 T = aesni_gcm_ghash_blocks (T, kd, in, 2);
153 T = aesni_gcm_ghash_blocks (T, kd, in, 1);
160 u8x16 r = aes_load_partial (in, n_left);
161 T = ghash_mul (aesni_gcm_bswap (r) ^ T, kd->Hi[0]);
166 static_always_inline u8x16
167 aesni_gcm_calc (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d,
168 u32x4 * Y, u32 * ctr, u8x16u * inv, u8x16u * outv,
169 int rounds, int n, int last_block_bytes, int with_ghash,
173 ghash_data_t _gd = { }, *gd = &_gd;
174 const u8x16 *rk = (u8x16 *) kd->Ke;
175 int hidx = is_encrypt ? 4 : n, didx = 0;
177 _mm_prefetch (inv + 4, _MM_HINT_T0);
179 /* AES rounds 0 and 1 */
180 aesni_gcm_enc_first_round (r, Y, ctr, rk[0], n);
181 aesni_gcm_enc_round (r, rk[1], n);
183 /* load data - decrypt round */
185 aesni_gcm_load (d, inv, n, last_block_bytes);
187 /* GHASH multiply block 1 */
189 ghash_mul_first (gd, aesni_gcm_bswap (d[didx++]) ^ T, kd->Hi[--hidx]);
191 /* AES rounds 2 and 3 */
192 aesni_gcm_enc_round (r, rk[2], n);
193 aesni_gcm_enc_round (r, rk[3], n);
195 /* GHASH multiply block 2 */
196 if (with_ghash && hidx)
197 ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
199 /* AES rounds 4 and 5 */
200 aesni_gcm_enc_round (r, rk[4], n);
201 aesni_gcm_enc_round (r, rk[5], n);
203 /* GHASH multiply block 3 */
204 if (with_ghash && hidx)
205 ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
207 /* AES rounds 6 and 7 */
208 aesni_gcm_enc_round (r, rk[6], n);
209 aesni_gcm_enc_round (r, rk[7], n);
211 /* GHASH multiply block 4 */
212 if (with_ghash && hidx)
213 ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
215 /* AES rounds 8 and 9 */
216 aesni_gcm_enc_round (r, rk[8], n);
217 aesni_gcm_enc_round (r, rk[9], n);
219 /* GHASH reduce 1st step */
223 /* load data - encrypt round */
225 aesni_gcm_load (d, inv, n, last_block_bytes);
227 /* GHASH reduce 2nd step */
231 /* AES last round(s) */
232 aesni_gcm_enc_last_round (r, d, rk, rounds, n);
235 aesni_gcm_store (d, outv, n, last_block_bytes);
237 /* GHASH final step */
239 T = ghash_final (gd);
244 static_always_inline u8x16
245 aesni_gcm_calc_double (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d,
246 u32x4 * Y, u32 * ctr, u8x16u * inv, u8x16u * outv,
247 int rounds, int is_encrypt)
250 ghash_data_t _gd, *gd = &_gd;
251 const u8x16 *rk = (u8x16 *) kd->Ke;
253 /* AES rounds 0 and 1 */
254 aesni_gcm_enc_first_round (r, Y, ctr, rk[0], 4);
255 aesni_gcm_enc_round (r, rk[1], 4);
257 /* load 4 blocks of data - decrypt round */
259 aesni_gcm_load (d, inv, 4, 0);
261 /* GHASH multiply block 0 */
262 ghash_mul_first (gd, aesni_gcm_bswap (d[0]) ^ T, kd->Hi[7]);
264 /* AES rounds 2 and 3 */
265 aesni_gcm_enc_round (r, rk[2], 4);
266 aesni_gcm_enc_round (r, rk[3], 4);
268 /* GHASH multiply block 1 */
269 ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[6]);
271 /* AES rounds 4 and 5 */
272 aesni_gcm_enc_round (r, rk[4], 4);
273 aesni_gcm_enc_round (r, rk[5], 4);
275 /* GHASH multiply block 2 */
276 ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[5]);
278 /* AES rounds 6 and 7 */
279 aesni_gcm_enc_round (r, rk[6], 4);
280 aesni_gcm_enc_round (r, rk[7], 4);
282 /* GHASH multiply block 3 */
283 ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[4]);
285 /* AES rounds 8 and 9 */
286 aesni_gcm_enc_round (r, rk[8], 4);
287 aesni_gcm_enc_round (r, rk[9], 4);
289 /* load 4 blocks of data - encrypt round */
291 aesni_gcm_load (d, inv, 4, 0);
293 /* AES last round(s) */
294 aesni_gcm_enc_last_round (r, d, rk, rounds, 4);
296 /* store 4 blocks of data */
297 aesni_gcm_store (d, outv, 4, 0);
299 /* load next 4 blocks of data data - decrypt round */
301 aesni_gcm_load (d, inv + 4, 4, 0);
303 /* GHASH multiply block 4 */
304 ghash_mul_next (gd, aesni_gcm_bswap (d[0]), kd->Hi[3]);
306 /* AES rounds 0, 1 and 2 */
307 aesni_gcm_enc_first_round (r, Y, ctr, rk[0], 4);
308 aesni_gcm_enc_round (r, rk[1], 4);
309 aesni_gcm_enc_round (r, rk[2], 4);
311 /* GHASH multiply block 5 */
312 ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[2]);
314 /* AES rounds 3 and 4 */
315 aesni_gcm_enc_round (r, rk[3], 4);
316 aesni_gcm_enc_round (r, rk[4], 4);
318 /* GHASH multiply block 6 */
319 ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[1]);
321 /* AES rounds 5 and 6 */
322 aesni_gcm_enc_round (r, rk[5], 4);
323 aesni_gcm_enc_round (r, rk[6], 4);
325 /* GHASH multiply block 7 */
326 ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[0]);
328 /* AES rounds 7 and 8 */
329 aesni_gcm_enc_round (r, rk[7], 4);
330 aesni_gcm_enc_round (r, rk[8], 4);
332 /* GHASH reduce 1st step */
336 aesni_gcm_enc_round (r, rk[9], 4);
338 /* load data - encrypt round */
340 aesni_gcm_load (d, inv + 4, 4, 0);
342 /* GHASH reduce 2nd step */
345 /* AES last round(s) */
346 aesni_gcm_enc_last_round (r, d, rk, rounds, 4);
349 aesni_gcm_store (d, outv + 4, 4, 0);
351 /* GHASH final step */
352 return ghash_final (gd);
355 static_always_inline u8x16
356 aesni_gcm_ghash_last (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d,
357 int n_blocks, int n_bytes)
359 ghash_data_t _gd, *gd = &_gd;
362 d[n_blocks - 1] = aes_byte_mask (d[n_blocks - 1], n_bytes);
364 ghash_mul_first (gd, aesni_gcm_bswap (d[0]) ^ T, kd->Hi[n_blocks - 1]);
366 ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[n_blocks - 2]);
368 ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[n_blocks - 3]);
370 ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[n_blocks - 4]);
373 return ghash_final (gd);
377 static_always_inline u8x16
378 aesni_gcm_enc (u8x16 T, aes_gcm_key_data_t * kd, u32x4 Y, u8x16u * inv,
379 u8x16u * outv, u32 n_left, int rounds)
392 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, n_left,
393 /* with_ghash */ 0, /* is_encrypt */ 1);
394 return aesni_gcm_ghash_last (T, kd, d, 4, n_left);
396 else if (n_left > 32)
399 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3, n_left,
400 /* with_ghash */ 0, /* is_encrypt */ 1);
401 return aesni_gcm_ghash_last (T, kd, d, 3, n_left);
403 else if (n_left > 16)
406 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2, n_left,
407 /* with_ghash */ 0, /* is_encrypt */ 1);
408 return aesni_gcm_ghash_last (T, kd, d, 2, n_left);
413 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
414 /* with_ghash */ 0, /* is_encrypt */ 1);
415 return aesni_gcm_ghash_last (T, kd, d, 1, n_left);
419 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0,
420 /* with_ghash */ 0, /* is_encrypt */ 1);
427 while (n_left >= 128)
429 T = aesni_gcm_calc_double (T, kd, d, &Y, &ctr, inv, outv, rounds,
440 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0,
441 /* with_ghash */ 1, /* is_encrypt */ 1);
450 return aesni_gcm_ghash_last (T, kd, d, 4, 0);
455 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, n_left,
456 /* with_ghash */ 1, /* is_encrypt */ 1);
457 return aesni_gcm_ghash_last (T, kd, d, 4, n_left);
463 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3, n_left,
464 /* with_ghash */ 1, /* is_encrypt */ 1);
465 return aesni_gcm_ghash_last (T, kd, d, 3, n_left);
471 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2, n_left,
472 /* with_ghash */ 1, /* is_encrypt */ 1);
473 return aesni_gcm_ghash_last (T, kd, d, 2, n_left);
477 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
478 /* with_ghash */ 1, /* is_encrypt */ 1);
479 return aesni_gcm_ghash_last (T, kd, d, 1, n_left);
482 static_always_inline u8x16
483 aesni_gcm_dec (u8x16 T, aes_gcm_key_data_t * kd, u32x4 Y, u8x16u * inv,
484 u8x16u * outv, u32 n_left, int rounds)
489 while (n_left >= 128)
491 T = aesni_gcm_calc_double (T, kd, d, &Y, &ctr, inv, outv, rounds,
502 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0, 1, 0);
514 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4,
516 /* with_ghash */ 1, /* is_encrypt */ 0);
519 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3,
521 /* with_ghash */ 1, /* is_encrypt */ 0);
524 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2,
526 /* with_ghash */ 1, /* is_encrypt */ 0);
528 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
529 /* with_ghash */ 1, /* is_encrypt */ 0);
532 static_always_inline int
533 aes_gcm (u8x16u * in, u8x16u * out, u8x16u * addt, u8x16u * iv, u8x16u * tag,
534 u32 data_bytes, u32 aad_bytes, u8 tag_len, aes_gcm_key_data_t * kd,
535 int aes_rounds, int is_encrypt)
540 ghash_data_t _gd, *gd = &_gd;
542 _mm_prefetch (iv, _MM_HINT_T0);
543 _mm_prefetch (in, _MM_HINT_T0);
544 _mm_prefetch (in + CLIB_CACHE_LINE_BYTES, _MM_HINT_T0);
546 /* calculate ghash for AAD - optimized for ipsec common cases */
548 T = aesni_gcm_ghash (T, kd, addt, 8);
549 else if (aad_bytes == 12)
550 T = aesni_gcm_ghash (T, kd, addt, 12);
552 T = aesni_gcm_ghash (T, kd, addt, aad_bytes);
554 /* initalize counter */
555 Y0 = (u32x4) aes_load_partial (iv, 12);
556 Y0[3] = clib_host_to_net_u32 (1);
558 /* ghash and encrypt/edcrypt */
560 T = aesni_gcm_enc (T, kd, Y0, in, out, data_bytes, aes_rounds);
562 T = aesni_gcm_dec (T, kd, Y0, in, out, data_bytes, aes_rounds);
564 _mm_prefetch (tag, _MM_HINT_T0);
566 /* Finalize ghash - data bytes and aad bytes converted to bits */
568 r = (u8x16) ((u64x2) {data_bytes, aad_bytes} << 3);
571 /* interleaved computation of final ghash and E(Y0, k) */
572 ghash_mul_first (gd, r ^ T, kd->Hi[0]);
573 r = kd->Ke[0] ^ (u8x16) Y0;
574 for (i = 1; i < 5; i += 1)
575 r = aes_enc_round (r, kd->Ke[i]);
578 for (; i < 9; i += 1)
579 r = aes_enc_round (r, kd->Ke[i]);
580 T = ghash_final (gd);
581 for (; i < aes_rounds; i += 1)
582 r = aes_enc_round (r, kd->Ke[i]);
583 r = aes_enc_last_round (r, kd->Ke[aes_rounds]);
584 T = aesni_gcm_bswap (T) ^ r;
586 /* tag_len 16 -> 0 */
593 aes_store_partial (tag, T, (1 << tag_len) - 1);
600 u16 tag_mask = tag_len ? (1 << tag_len) - 1 : 0xffff;
601 if ((u8x16_msb_mask (tag[0] == T) & tag_mask) != tag_mask)
607 static_always_inline u32
608 aesni_ops_enc_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
609 u32 n_ops, aes_key_size_t ks)
611 crypto_native_main_t *cm = &crypto_native_main;
612 vnet_crypto_op_t *op = ops[0];
613 aes_gcm_key_data_t *kd;
618 kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
619 aes_gcm ((u8x16u *) op->src, (u8x16u *) op->dst, (u8x16u *) op->aad,
620 (u8x16u *) op->iv, (u8x16u *) op->tag, op->len, op->aad_len,
621 op->tag_len, kd, AES_KEY_ROUNDS (ks), /* is_encrypt */ 1);
622 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
633 static_always_inline u32
634 aesni_ops_dec_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
635 u32 n_ops, aes_key_size_t ks)
637 crypto_native_main_t *cm = &crypto_native_main;
638 vnet_crypto_op_t *op = ops[0];
639 aes_gcm_key_data_t *kd;
644 kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
645 rv = aes_gcm ((u8x16u *) op->src, (u8x16u *) op->dst, (u8x16u *) op->aad,
646 (u8x16u *) op->iv, (u8x16u *) op->tag, op->len,
647 op->aad_len, op->tag_len, kd, AES_KEY_ROUNDS (ks),
652 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
656 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
669 static_always_inline void *
670 aesni_gcm_key_exp (vnet_crypto_key_t * key, aes_key_size_t ks)
672 aes_gcm_key_data_t *kd;
675 kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
678 aes_key_expand ((u8x16 *) kd->Ke, key->data, ks);
680 /* pre-calculate H */
681 H = aes_encrypt_block (u8x16_splat (0), kd->Ke, ks);
682 H = aesni_gcm_bswap (H);
683 ghash_precompute (H, (u8x16 *) kd->Hi, 8);
687 #define foreach_aesni_gcm_handler_type _(128) _(192) _(256)
690 static u32 aesni_ops_dec_aes_gcm_##x \
691 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
692 { return aesni_ops_dec_aes_gcm (vm, ops, n_ops, AES_KEY_##x); } \
693 static u32 aesni_ops_enc_aes_gcm_##x \
694 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
695 { return aesni_ops_enc_aes_gcm (vm, ops, n_ops, AES_KEY_##x); } \
696 static void * aesni_gcm_key_exp_##x (vnet_crypto_key_t *key) \
697 { return aesni_gcm_key_exp (key, AES_KEY_##x); }
699 foreach_aesni_gcm_handler_type;
704 crypto_native_aes_gcm_init_vaes (vlib_main_t * vm)
706 crypto_native_aes_gcm_init_avx512 (vlib_main_t * vm)
708 crypto_native_aes_gcm_init_avx2 (vlib_main_t * vm)
710 crypto_native_aes_gcm_init_sse42 (vlib_main_t * vm)
713 crypto_native_main_t *cm = &crypto_native_main;
716 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
717 VNET_CRYPTO_OP_AES_##x##_GCM_ENC, \
718 aesni_ops_enc_aes_gcm_##x); \
719 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
720 VNET_CRYPTO_OP_AES_##x##_GCM_DEC, \
721 aesni_ops_dec_aes_gcm_##x); \
722 cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_GCM] = aesni_gcm_key_exp_##x;
723 foreach_aesni_gcm_handler_type;
729 * fd.io coding-style-patch-verification: ON
732 * eval: (c-set-style "gnu")