2 *------------------------------------------------------------------
3 * Copyright (c) 2019 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <crypto_native/crypto_native.h>
22 #include <crypto_native/aes.h>
23 #include <crypto_native/ghash.h>
25 #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
26 #pragma GCC optimize ("O3")
32 /* pre-calculated hash key values */
33 const u8x16 Hi[NUM_HI];
34 /* extracted AES key */
38 static_always_inline void
39 aes_gcm_load (u8x16 * d, u8x16u * inv, int n, int n_bytes)
41 for (int i = 0; i < n - 1; i++)
43 d[n - 1] = n_bytes ? aes_load_partial (inv + n - 1, n_bytes) : inv[n - 1];
46 static_always_inline void
47 aes_gcm_store (u8x16 * d, u8x16u * outv, int n, int n_bytes)
49 for (int i = 0; i < n - 1; i++)
52 aes_store_partial (outv + n - 1, d[n - 1], n_bytes);
54 outv[n - 1] = d[n - 1];
57 static_always_inline void
58 aes_gcm_enc_first_round (u8x16 * r, u32x4 * Y, u32 * ctr, u8x16 k,
61 static const u32x4 last_byte_one = { 0, 0, 0, 1 << 24 };
63 if (PREDICT_TRUE ((u8) ctr[0] < (256 - n_blocks)))
65 for (int i = 0; i < n_blocks; i++)
67 Y[0] += last_byte_one;
68 r[i] = k ^ (u8x16) Y[0];
74 for (int i = 0; i < n_blocks; i++)
76 Y[0][3] = clib_host_to_net_u32 (++ctr[0]);
77 r[i] = k ^ (u8x16) Y[0];
82 static_always_inline void
83 aes_gcm_enc_round (u8x16 * r, u8x16 k, int n_blocks)
85 for (int i = 0; i < n_blocks; i++)
86 r[i] = aes_enc_round (r[i], k);
89 static_always_inline void
90 aes_gcm_enc_last_round (u8x16 * r, u8x16 * d, u8x16 const *k,
91 int rounds, int n_blocks)
94 /* additional ronuds for AES-192 and AES-256 */
95 for (int i = 10; i < rounds; i++)
96 aes_gcm_enc_round (r, k[i], n_blocks);
98 for (int i = 0; i < n_blocks; i++)
99 d[i] ^= aes_enc_last_round (r[i], k[rounds]);
102 static_always_inline u8x16
103 aes_gcm_ghash_blocks (u8x16 T, aes_gcm_key_data_t * kd,
104 u8x16u * in, int n_blocks)
106 ghash_data_t _gd, *gd = &_gd;
107 u8x16 *Hi = (u8x16 *) kd->Hi + NUM_HI - n_blocks;
108 ghash_mul_first (gd, u8x16_reflect (in[0]) ^ T, Hi[0]);
109 for (int i = 1; i < n_blocks; i++)
110 ghash_mul_next (gd, u8x16_reflect ((in[i])), Hi[i]);
113 return ghash_final (gd);
116 static_always_inline u8x16
117 aes_gcm_ghash (u8x16 T, aes_gcm_key_data_t * kd, u8x16u * in, u32 n_left)
120 while (n_left >= 128)
122 T = aes_gcm_ghash_blocks (T, kd, in, 8);
129 T = aes_gcm_ghash_blocks (T, kd, in, 4);
136 T = aes_gcm_ghash_blocks (T, kd, in, 2);
143 T = aes_gcm_ghash_blocks (T, kd, in, 1);
150 u8x16 r = aes_load_partial (in, n_left);
151 T = ghash_mul (u8x16_reflect (r) ^ T, kd->Hi[NUM_HI - 1]);
156 static_always_inline u8x16
157 aes_gcm_calc (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d,
158 u32x4 * Y, u32 * ctr, u8x16u * inv, u8x16u * outv,
159 int rounds, int n, int last_block_bytes, int with_ghash,
163 ghash_data_t _gd = { }, *gd = &_gd;
164 const u8x16 *rk = (u8x16 *) kd->Ke;
165 int ghash_blocks = is_encrypt ? 4 : n, gc = 1;
166 u8x16 *Hi = (u8x16 *) kd->Hi + NUM_HI - ghash_blocks;
168 clib_prefetch_load (inv + 4);
170 /* AES rounds 0 and 1 */
171 aes_gcm_enc_first_round (r, Y, ctr, rk[0], n);
172 aes_gcm_enc_round (r, rk[1], n);
174 /* load data - decrypt round */
176 aes_gcm_load (d, inv, n, last_block_bytes);
178 /* GHASH multiply block 1 */
180 ghash_mul_first (gd, u8x16_reflect (d[0]) ^ T, Hi[0]);
182 /* AES rounds 2 and 3 */
183 aes_gcm_enc_round (r, rk[2], n);
184 aes_gcm_enc_round (r, rk[3], n);
186 /* GHASH multiply block 2 */
187 if (with_ghash && gc++ < ghash_blocks)
188 ghash_mul_next (gd, u8x16_reflect (d[1]), Hi[1]);
190 /* AES rounds 4 and 5 */
191 aes_gcm_enc_round (r, rk[4], n);
192 aes_gcm_enc_round (r, rk[5], n);
194 /* GHASH multiply block 3 */
195 if (with_ghash && gc++ < ghash_blocks)
196 ghash_mul_next (gd, u8x16_reflect (d[2]), Hi[2]);
198 /* AES rounds 6 and 7 */
199 aes_gcm_enc_round (r, rk[6], n);
200 aes_gcm_enc_round (r, rk[7], n);
202 /* GHASH multiply block 4 */
203 if (with_ghash && gc++ < ghash_blocks)
204 ghash_mul_next (gd, u8x16_reflect (d[3]), Hi[3]);
206 /* AES rounds 8 and 9 */
207 aes_gcm_enc_round (r, rk[8], n);
208 aes_gcm_enc_round (r, rk[9], n);
210 /* GHASH reduce 1st step */
214 /* load data - encrypt round */
216 aes_gcm_load (d, inv, n, last_block_bytes);
218 /* GHASH reduce 2nd step */
222 /* AES last round(s) */
223 aes_gcm_enc_last_round (r, d, rk, rounds, n);
226 aes_gcm_store (d, outv, n, last_block_bytes);
228 /* GHASH final step */
230 T = ghash_final (gd);
235 static_always_inline u8x16
236 aes_gcm_calc_double (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d,
237 u32x4 * Y, u32 * ctr, u8x16u * inv, u8x16u * outv,
238 int rounds, int is_encrypt)
241 ghash_data_t _gd, *gd = &_gd;
242 const u8x16 *rk = (u8x16 *) kd->Ke;
243 u8x16 *Hi = (u8x16 *) kd->Hi + NUM_HI - 8;
245 /* AES rounds 0 and 1 */
246 aes_gcm_enc_first_round (r, Y, ctr, rk[0], 4);
247 aes_gcm_enc_round (r, rk[1], 4);
249 /* load 4 blocks of data - decrypt round */
251 aes_gcm_load (d, inv, 4, 0);
253 /* GHASH multiply block 0 */
254 ghash_mul_first (gd, u8x16_reflect (d[0]) ^ T, Hi[0]);
256 /* AES rounds 2 and 3 */
257 aes_gcm_enc_round (r, rk[2], 4);
258 aes_gcm_enc_round (r, rk[3], 4);
260 /* GHASH multiply block 1 */
261 ghash_mul_next (gd, u8x16_reflect (d[1]), Hi[1]);
263 /* AES rounds 4 and 5 */
264 aes_gcm_enc_round (r, rk[4], 4);
265 aes_gcm_enc_round (r, rk[5], 4);
267 /* GHASH multiply block 2 */
268 ghash_mul_next (gd, u8x16_reflect (d[2]), Hi[2]);
270 /* AES rounds 6 and 7 */
271 aes_gcm_enc_round (r, rk[6], 4);
272 aes_gcm_enc_round (r, rk[7], 4);
274 /* GHASH multiply block 3 */
275 ghash_mul_next (gd, u8x16_reflect (d[3]), Hi[3]);
277 /* AES rounds 8 and 9 */
278 aes_gcm_enc_round (r, rk[8], 4);
279 aes_gcm_enc_round (r, rk[9], 4);
281 /* load 4 blocks of data - encrypt round */
283 aes_gcm_load (d, inv, 4, 0);
285 /* AES last round(s) */
286 aes_gcm_enc_last_round (r, d, rk, rounds, 4);
288 /* store 4 blocks of data */
289 aes_gcm_store (d, outv, 4, 0);
291 /* load next 4 blocks of data data - decrypt round */
293 aes_gcm_load (d, inv + 4, 4, 0);
295 /* GHASH multiply block 4 */
296 ghash_mul_next (gd, u8x16_reflect (d[0]), Hi[4]);
298 /* AES rounds 0, 1 and 2 */
299 aes_gcm_enc_first_round (r, Y, ctr, rk[0], 4);
300 aes_gcm_enc_round (r, rk[1], 4);
301 aes_gcm_enc_round (r, rk[2], 4);
303 /* GHASH multiply block 5 */
304 ghash_mul_next (gd, u8x16_reflect (d[1]), Hi[5]);
306 /* AES rounds 3 and 4 */
307 aes_gcm_enc_round (r, rk[3], 4);
308 aes_gcm_enc_round (r, rk[4], 4);
310 /* GHASH multiply block 6 */
311 ghash_mul_next (gd, u8x16_reflect (d[2]), Hi[6]);
313 /* AES rounds 5 and 6 */
314 aes_gcm_enc_round (r, rk[5], 4);
315 aes_gcm_enc_round (r, rk[6], 4);
317 /* GHASH multiply block 7 */
318 ghash_mul_next (gd, u8x16_reflect (d[3]), Hi[7]);
320 /* AES rounds 7 and 8 */
321 aes_gcm_enc_round (r, rk[7], 4);
322 aes_gcm_enc_round (r, rk[8], 4);
324 /* GHASH reduce 1st step */
328 aes_gcm_enc_round (r, rk[9], 4);
330 /* load data - encrypt round */
332 aes_gcm_load (d, inv + 4, 4, 0);
334 /* GHASH reduce 2nd step */
337 /* AES last round(s) */
338 aes_gcm_enc_last_round (r, d, rk, rounds, 4);
341 aes_gcm_store (d, outv + 4, 4, 0);
343 /* GHASH final step */
344 return ghash_final (gd);
347 static_always_inline u8x16
348 aes_gcm_ghash_last (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d,
349 int n_blocks, int n_bytes)
351 ghash_data_t _gd, *gd = &_gd;
352 u8x16 *Hi = (u8x16 *) kd->Hi + NUM_HI - n_blocks;
355 d[n_blocks - 1] = aes_byte_mask (d[n_blocks - 1], n_bytes);
357 ghash_mul_first (gd, u8x16_reflect (d[0]) ^ T, Hi[0]);
359 ghash_mul_next (gd, u8x16_reflect (d[1]), Hi[1]);
361 ghash_mul_next (gd, u8x16_reflect (d[2]), Hi[2]);
363 ghash_mul_next (gd, u8x16_reflect (d[3]), Hi[3]);
366 return ghash_final (gd);
370 static_always_inline u8x16
371 aes_gcm_enc (u8x16 T, aes_gcm_key_data_t * kd, u32x4 Y, u8x16u * inv,
372 u8x16u * outv, u32 n_left, int rounds)
385 aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, n_left,
386 /* with_ghash */ 0, /* is_encrypt */ 1);
387 return aes_gcm_ghash_last (T, kd, d, 4, n_left);
389 else if (n_left > 32)
392 aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3, n_left,
393 /* with_ghash */ 0, /* is_encrypt */ 1);
394 return aes_gcm_ghash_last (T, kd, d, 3, n_left);
396 else if (n_left > 16)
399 aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2, n_left,
400 /* with_ghash */ 0, /* is_encrypt */ 1);
401 return aes_gcm_ghash_last (T, kd, d, 2, n_left);
406 aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
407 /* with_ghash */ 0, /* is_encrypt */ 1);
408 return aes_gcm_ghash_last (T, kd, d, 1, n_left);
412 aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0,
413 /* with_ghash */ 0, /* is_encrypt */ 1);
420 while (n_left >= 128)
422 T = aes_gcm_calc_double (T, kd, d, &Y, &ctr, inv, outv, rounds,
433 T = aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0,
434 /* with_ghash */ 1, /* is_encrypt */ 1);
443 return aes_gcm_ghash_last (T, kd, d, 4, 0);
448 T = aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, n_left,
449 /* with_ghash */ 1, /* is_encrypt */ 1);
450 return aes_gcm_ghash_last (T, kd, d, 4, n_left);
456 T = aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3, n_left,
457 /* with_ghash */ 1, /* is_encrypt */ 1);
458 return aes_gcm_ghash_last (T, kd, d, 3, n_left);
464 T = aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2, n_left,
465 /* with_ghash */ 1, /* is_encrypt */ 1);
466 return aes_gcm_ghash_last (T, kd, d, 2, n_left);
470 T = aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
471 /* with_ghash */ 1, /* is_encrypt */ 1);
472 return aes_gcm_ghash_last (T, kd, d, 1, n_left);
475 static_always_inline u8x16
476 aes_gcm_dec (u8x16 T, aes_gcm_key_data_t * kd, u32x4 Y, u8x16u * inv,
477 u8x16u * outv, u32 n_left, int rounds)
482 while (n_left >= 128)
484 T = aes_gcm_calc_double (T, kd, d, &Y, &ctr, inv, outv, rounds,
495 T = aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0, 1, 0);
507 return aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4,
508 n_left - 48, /* with_ghash */ 1, /* is_encrypt */ 0);
511 return aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3,
512 n_left - 32, /* with_ghash */ 1, /* is_encrypt */ 0);
515 return aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2,
516 n_left - 16, /* with_ghash */ 1, /* is_encrypt */ 0);
518 return aes_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
519 /* with_ghash */ 1, /* is_encrypt */ 0);
522 static_always_inline int
523 aes_gcm (u8x16u * in, u8x16u * out, u8x16u * addt, u8x16u * iv, u8x16u * tag,
524 u32 data_bytes, u32 aad_bytes, u8 tag_len, aes_gcm_key_data_t * kd,
525 int aes_rounds, int is_encrypt)
530 ghash_data_t _gd, *gd = &_gd;
532 clib_prefetch_load (iv);
533 clib_prefetch_load (in);
534 clib_prefetch_load (in + 4);
536 /* calculate ghash for AAD - optimized for ipsec common cases */
538 T = aes_gcm_ghash (T, kd, addt, 8);
539 else if (aad_bytes == 12)
540 T = aes_gcm_ghash (T, kd, addt, 12);
542 T = aes_gcm_ghash (T, kd, addt, aad_bytes);
544 /* initalize counter */
545 Y0 = (u32x4) aes_load_partial (iv, 12);
546 Y0[3] = clib_host_to_net_u32 (1);
548 /* ghash and encrypt/edcrypt */
550 T = aes_gcm_enc (T, kd, Y0, in, out, data_bytes, aes_rounds);
552 T = aes_gcm_dec (T, kd, Y0, in, out, data_bytes, aes_rounds);
554 clib_prefetch_load (tag);
556 /* Finalize ghash - data bytes and aad bytes converted to bits */
558 r = (u8x16) ((u64x2) {data_bytes, aad_bytes} << 3);
561 /* interleaved computation of final ghash and E(Y0, k) */
562 ghash_mul_first (gd, r ^ T, kd->Hi[NUM_HI - 1]);
563 r = kd->Ke[0] ^ (u8x16) Y0;
564 for (i = 1; i < 5; i += 1)
565 r = aes_enc_round (r, kd->Ke[i]);
568 for (; i < 9; i += 1)
569 r = aes_enc_round (r, kd->Ke[i]);
570 T = ghash_final (gd);
571 for (; i < aes_rounds; i += 1)
572 r = aes_enc_round (r, kd->Ke[i]);
573 r = aes_enc_last_round (r, kd->Ke[aes_rounds]);
574 T = u8x16_reflect (T) ^ r;
576 /* tag_len 16 -> 0 */
583 aes_store_partial (tag, T, tag_len);
590 u16 tag_mask = tag_len ? (1 << tag_len) - 1 : 0xffff;
591 if ((u8x16_msb_mask (tag[0] == T) & tag_mask) != tag_mask)
597 static_always_inline u32
598 aes_ops_enc_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
599 u32 n_ops, aes_key_size_t ks)
601 crypto_native_main_t *cm = &crypto_native_main;
602 vnet_crypto_op_t *op = ops[0];
603 aes_gcm_key_data_t *kd;
608 kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
609 aes_gcm ((u8x16u *) op->src, (u8x16u *) op->dst, (u8x16u *) op->aad,
610 (u8x16u *) op->iv, (u8x16u *) op->tag, op->len, op->aad_len,
611 op->tag_len, kd, AES_KEY_ROUNDS (ks), /* is_encrypt */ 1);
612 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
623 static_always_inline u32
624 aes_ops_dec_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
627 crypto_native_main_t *cm = &crypto_native_main;
628 vnet_crypto_op_t *op = ops[0];
629 aes_gcm_key_data_t *kd;
634 kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
635 rv = aes_gcm ((u8x16u *) op->src, (u8x16u *) op->dst, (u8x16u *) op->aad,
636 (u8x16u *) op->iv, (u8x16u *) op->tag, op->len,
637 op->aad_len, op->tag_len, kd, AES_KEY_ROUNDS (ks),
642 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
646 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
659 static_always_inline void *
660 aes_gcm_key_exp (vnet_crypto_key_t * key, aes_key_size_t ks)
662 aes_gcm_key_data_t *kd;
665 kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
668 aes_key_expand ((u8x16 *) kd->Ke, key->data, ks);
670 /* pre-calculate H */
671 H = aes_encrypt_block (u8x16_splat (0), kd->Ke, ks);
672 H = u8x16_reflect (H);
673 ghash_precompute (H, (u8x16 *) kd->Hi, NUM_HI);
677 #define foreach_aes_gcm_handler_type _(128) _(192) _(256)
680 static u32 aes_ops_dec_aes_gcm_##x \
681 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
682 { return aes_ops_dec_aes_gcm (vm, ops, n_ops, AES_KEY_##x); } \
683 static u32 aes_ops_enc_aes_gcm_##x \
684 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
685 { return aes_ops_enc_aes_gcm (vm, ops, n_ops, AES_KEY_##x); } \
686 static void * aes_gcm_key_exp_##x (vnet_crypto_key_t *key) \
687 { return aes_gcm_key_exp (key, AES_KEY_##x); }
689 foreach_aes_gcm_handler_type;
694 crypto_native_aes_gcm_init_vaes (vlib_main_t * vm)
696 crypto_native_aes_gcm_init_avx512 (vlib_main_t * vm)
698 crypto_native_aes_gcm_init_avx2 (vlib_main_t * vm)
700 crypto_native_aes_gcm_init_neon (vlib_main_t * vm)
702 crypto_native_aes_gcm_init_sse42 (vlib_main_t * vm)
705 crypto_native_main_t *cm = &crypto_native_main;
708 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
709 VNET_CRYPTO_OP_AES_##x##_GCM_ENC, \
710 aes_ops_enc_aes_gcm_##x); \
711 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
712 VNET_CRYPTO_OP_AES_##x##_GCM_DEC, \
713 aes_ops_dec_aes_gcm_##x); \
714 cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_GCM] = aes_gcm_key_exp_##x;
715 foreach_aes_gcm_handler_type;
721 * fd.io coding-style-patch-verification: ON
724 * eval: (c-set-style "gnu")