2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
28 #define AES_KEY_ROUNDS(x) (10 + x * 2)
29 #define AES_KEY_BYTES(x) (16 + x * 8)
31 static const u8x16 byte_mask_scale = {
32 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
35 static_always_inline u8x16
36 aes_block_load (u8 * p)
41 static_always_inline u8x16
42 aes_enc_round (u8x16 a, u8x16 k)
45 return (u8x16) _mm_aesenc_si128 ((__m128i) a, (__m128i) k);
46 #elif defined (__ARM_FEATURE_CRYPTO)
47 return vaesmcq_u8 (vaeseq_u8 (a, u8x16_splat (0))) ^ k;
51 #if defined(__VAES__) && defined(__AVX512F__)
52 static_always_inline u8x64
53 aes_enc_round_x4 (u8x64 a, u8x64 k)
55 return (u8x64) _mm512_aesenc_epi128 ((__m512i) a, (__m512i) k);
58 static_always_inline u8x64
59 aes_enc_last_round_x4 (u8x64 a, u8x64 k)
61 return (u8x64) _mm512_aesenclast_epi128 ((__m512i) a, (__m512i) k);
64 static_always_inline u8x64
65 aes_dec_round_x4 (u8x64 a, u8x64 k)
67 return (u8x64) _mm512_aesdec_epi128 ((__m512i) a, (__m512i) k);
70 static_always_inline u8x64
71 aes_dec_last_round_x4 (u8x64 a, u8x64 k)
73 return (u8x64) _mm512_aesdeclast_epi128 ((__m512i) a, (__m512i) k);
78 static_always_inline u8x32
79 aes_enc_round_x2 (u8x32 a, u8x32 k)
81 return (u8x32) _mm256_aesenc_epi128 ((__m256i) a, (__m256i) k);
84 static_always_inline u8x32
85 aes_enc_last_round_x2 (u8x32 a, u8x32 k)
87 return (u8x32) _mm256_aesenclast_epi128 ((__m256i) a, (__m256i) k);
90 static_always_inline u8x32
91 aes_dec_round_x2 (u8x32 a, u8x32 k)
93 return (u8x32) _mm256_aesdec_epi128 ((__m256i) a, (__m256i) k);
96 static_always_inline u8x32
97 aes_dec_last_round_x2 (u8x32 a, u8x32 k)
99 return (u8x32) _mm256_aesdeclast_epi128 ((__m256i) a, (__m256i) k);
103 static_always_inline u8x16
104 aes_enc_last_round (u8x16 a, u8x16 k)
106 #if defined (__AES__)
107 return (u8x16) _mm_aesenclast_si128 ((__m128i) a, (__m128i) k);
108 #elif defined (__ARM_FEATURE_CRYPTO)
109 return vaeseq_u8 (a, u8x16_splat (0)) ^ k;
115 static_always_inline u8x16
116 aes_dec_round (u8x16 a, u8x16 k)
118 return (u8x16) _mm_aesdec_si128 ((__m128i) a, (__m128i) k);
121 static_always_inline u8x16
122 aes_dec_last_round (u8x16 a, u8x16 k)
124 return (u8x16) _mm_aesdeclast_si128 ((__m128i) a, (__m128i) k);
128 static_always_inline void
129 aes_block_store (u8 * p, u8x16 r)
134 static_always_inline u8x16
135 aes_byte_mask (u8x16 x, u8 n_bytes)
137 return x & (u8x16_splat (n_bytes) > byte_mask_scale);
140 static_always_inline u8x16
141 aes_load_partial (u8x16u * p, int n_bytes)
143 ASSERT (n_bytes <= 16);
146 return (u8x16) _mm_mask_loadu_epi8 (zero, (1 << n_bytes) - 1, p);
149 CLIB_ASSUME (n_bytes < 16);
150 clib_memcpy_fast (&v, p, n_bytes);
155 static_always_inline void
156 aes_store_partial (void *p, u8x16 r, int n_bytes)
159 clib_memcpy_fast (p, &r, n_bytes);
162 _mm_mask_storeu_epi8 (p, (1 << n_bytes) - 1, (__m128i) r);
164 u8x16 mask = u8x16_splat (n_bytes) > byte_mask_scale;
165 _mm_maskmoveu_si128 ((__m128i) r, (__m128i) mask, p);
171 static_always_inline u8x16
172 aes_encrypt_block (u8x16 block, const u8x16 * round_keys, aes_key_size_t ks)
174 int rounds = AES_KEY_ROUNDS (ks);
175 block ^= round_keys[0];
176 for (int i = 1; i < rounds; i += 1)
177 block = aes_enc_round (block, round_keys[i]);
178 return aes_enc_last_round (block, round_keys[rounds]);
181 static_always_inline u8x16
182 aes_inv_mix_column (u8x16 a)
184 #if defined (__AES__)
185 return (u8x16) _mm_aesimc_si128 ((__m128i) a);
186 #elif defined (__ARM_FEATURE_CRYPTO)
187 return vaesimcq_u8 (a);
192 #define aes_keygen_assist(a, b) \
193 (u8x16) _mm_aeskeygenassist_si128((__m128i) a, b)
195 /* AES-NI based AES key expansion based on code samples from
196 Intel(r) Advanced Encryption Standard (AES) New Instructions White Paper
199 static_always_inline void
200 aes128_key_assist (u8x16 * rk, u8x16 r)
203 t ^= u8x16_word_shift_left (t, 4);
204 t ^= u8x16_word_shift_left (t, 4);
205 t ^= u8x16_word_shift_left (t, 4);
206 rk[0] = t ^ (u8x16) u32x4_shuffle ((u32x4) r, 3, 3, 3, 3);
209 static_always_inline void
210 aes128_key_expand (u8x16 *rk, u8x16u const *k)
213 aes128_key_assist (rk + 1, aes_keygen_assist (rk[0], 0x01));
214 aes128_key_assist (rk + 2, aes_keygen_assist (rk[1], 0x02));
215 aes128_key_assist (rk + 3, aes_keygen_assist (rk[2], 0x04));
216 aes128_key_assist (rk + 4, aes_keygen_assist (rk[3], 0x08));
217 aes128_key_assist (rk + 5, aes_keygen_assist (rk[4], 0x10));
218 aes128_key_assist (rk + 6, aes_keygen_assist (rk[5], 0x20));
219 aes128_key_assist (rk + 7, aes_keygen_assist (rk[6], 0x40));
220 aes128_key_assist (rk + 8, aes_keygen_assist (rk[7], 0x80));
221 aes128_key_assist (rk + 9, aes_keygen_assist (rk[8], 0x1b));
222 aes128_key_assist (rk + 10, aes_keygen_assist (rk[9], 0x36));
225 static_always_inline void
226 aes192_key_assist (u8x16 * r1, u8x16 * r2, u8x16 key_assist)
229 r1[0] ^= t = u8x16_word_shift_left (r1[0], 4);
230 r1[0] ^= t = u8x16_word_shift_left (t, 4);
231 r1[0] ^= u8x16_word_shift_left (t, 4);
232 r1[0] ^= (u8x16) _mm_shuffle_epi32 ((__m128i) key_assist, 0x55);
233 r2[0] ^= u8x16_word_shift_left (r2[0], 4);
234 r2[0] ^= (u8x16) _mm_shuffle_epi32 ((__m128i) r1[0], 0xff);
237 static_always_inline void
238 aes192_key_expand (u8x16 * rk, u8x16u const *k)
244 rk[1] = r2 = (u8x16) (u64x2) { *(u64 *) (k + 1), 0 };
247 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x1));
248 rk[1] = (u8x16) _mm_shuffle_pd ((__m128d) rk[1], (__m128d) r1, 0);
249 rk[2] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
251 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x2));
255 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x4));
256 rk[4] = (u8x16) _mm_shuffle_pd ((__m128d) rk[4], (__m128d) r1, 0);
257 rk[5] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
259 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x8));
263 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x10));
264 rk[7] = (u8x16) _mm_shuffle_pd ((__m128d) rk[7], (__m128d) r1, 0);
265 rk[8] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
267 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x20));
271 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x40));
272 rk[10] = (u8x16) _mm_shuffle_pd ((__m128d) rk[10], (__m128d) r1, 0);
273 rk[11] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
275 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x80));
279 static_always_inline void
280 aes256_key_assist (u8x16 * rk, int i, u8x16 key_assist)
285 r ^= t = u8x16_word_shift_left (r, 4);
286 r ^= t = u8x16_word_shift_left (t, 4);
287 r ^= u8x16_word_shift_left (t, 4);
288 r ^= (u8x16) u32x4_shuffle ((u32x4) key_assist, 3, 3, 3, 3);
294 key_assist = aes_keygen_assist (rk[0], 0x0);
296 r ^= t = u8x16_word_shift_left (r, 4);
297 r ^= t = u8x16_word_shift_left (t, 4);
298 r ^= u8x16_word_shift_left (t, 4);
299 r ^= (u8x16) u32x4_shuffle ((u32x4) key_assist, 2, 2, 2, 2);
303 static_always_inline void
304 aes256_key_expand (u8x16 * rk, u8x16u const *k)
308 aes256_key_assist (rk, 2, aes_keygen_assist (rk[1], 0x01));
309 aes256_key_assist (rk, 4, aes_keygen_assist (rk[3], 0x02));
310 aes256_key_assist (rk, 6, aes_keygen_assist (rk[5], 0x04));
311 aes256_key_assist (rk, 8, aes_keygen_assist (rk[7], 0x08));
312 aes256_key_assist (rk, 10, aes_keygen_assist (rk[9], 0x10));
313 aes256_key_assist (rk, 12, aes_keygen_assist (rk[11], 0x20));
314 aes256_key_assist (rk, 14, aes_keygen_assist (rk[13], 0x40));
320 static const u8x16 aese_prep_mask1 =
321 { 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12 };
322 static const u8x16 aese_prep_mask2 =
323 { 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15 };
325 static_always_inline void
326 aes128_key_expand_round_neon (u8x16 * rk, u32 rcon)
328 u8x16 r, t, last_round = rk[-1], z = { };
329 r = vqtbl1q_u8 (last_round, aese_prep_mask1);
330 r = vaeseq_u8 (r, z);
331 r ^= (u8x16) vdupq_n_u32 (rcon);
333 r ^= t = vextq_u8 (z, last_round, 12);
334 r ^= t = vextq_u8 (z, t, 12);
335 r ^= vextq_u8 (z, t, 12);
339 static_always_inline void
340 aes128_key_expand (u8x16 *rk, u8x16u const *k)
343 aes128_key_expand_round_neon (rk + 1, 0x01);
344 aes128_key_expand_round_neon (rk + 2, 0x02);
345 aes128_key_expand_round_neon (rk + 3, 0x04);
346 aes128_key_expand_round_neon (rk + 4, 0x08);
347 aes128_key_expand_round_neon (rk + 5, 0x10);
348 aes128_key_expand_round_neon (rk + 6, 0x20);
349 aes128_key_expand_round_neon (rk + 7, 0x40);
350 aes128_key_expand_round_neon (rk + 8, 0x80);
351 aes128_key_expand_round_neon (rk + 9, 0x1b);
352 aes128_key_expand_round_neon (rk + 10, 0x36);
355 static_always_inline void
356 aes192_key_expand_round_neon (u8x8 * rk, u32 rcon)
358 u8x8 r, last_round = rk[-1], z = { };
361 r2 = (u8x16) vdupq_lane_u64 ((uint64x1_t) last_round, 0);
362 r2 = vqtbl1q_u8 (r2, aese_prep_mask1);
363 r2 = vaeseq_u8 (r2, z2);
364 r2 ^= (u8x16) vdupq_n_u32 (rcon);
366 r = (u8x8) vdup_laneq_u64 ((u64x2) r2, 0);
368 r ^= vext_u8 (z, rk[-3], 4);
371 r = rk[-2] ^ vext_u8 (r, z, 4);
372 r ^= vext_u8 (z, r, 4);
378 r = rk[-1] ^ vext_u8 (r, z, 4);
379 r ^= vext_u8 (z, r, 4);
383 static_always_inline void
384 aes192_key_expand (u8x16 * ek, const u8x16u * k)
386 u8x8 *rk = (u8x8 *) ek;
388 rk[2] = *(u8x8u *) (k + 1);
389 aes192_key_expand_round_neon (rk + 3, 0x01);
390 aes192_key_expand_round_neon (rk + 6, 0x02);
391 aes192_key_expand_round_neon (rk + 9, 0x04);
392 aes192_key_expand_round_neon (rk + 12, 0x08);
393 aes192_key_expand_round_neon (rk + 15, 0x10);
394 aes192_key_expand_round_neon (rk + 18, 0x20);
395 aes192_key_expand_round_neon (rk + 21, 0x40);
396 aes192_key_expand_round_neon (rk + 24, 0x80);
400 static_always_inline void
401 aes256_key_expand_round_neon (u8x16 * rk, u32 rcon)
405 r = vqtbl1q_u8 (rk[-1], rcon ? aese_prep_mask1 : aese_prep_mask2);
406 r = vaeseq_u8 (r, z);
408 r ^= (u8x16) vdupq_n_u32 (rcon);
410 r ^= t = vextq_u8 (z, rk[-2], 12);
411 r ^= t = vextq_u8 (z, t, 12);
412 r ^= vextq_u8 (z, t, 12);
416 static_always_inline void
417 aes256_key_expand (u8x16 *rk, u8x16u const *k)
421 aes256_key_expand_round_neon (rk + 2, 0x01);
422 aes256_key_expand_round_neon (rk + 3, 0);
423 aes256_key_expand_round_neon (rk + 4, 0x02);
424 aes256_key_expand_round_neon (rk + 5, 0);
425 aes256_key_expand_round_neon (rk + 6, 0x04);
426 aes256_key_expand_round_neon (rk + 7, 0);
427 aes256_key_expand_round_neon (rk + 8, 0x08);
428 aes256_key_expand_round_neon (rk + 9, 0);
429 aes256_key_expand_round_neon (rk + 10, 0x10);
430 aes256_key_expand_round_neon (rk + 11, 0);
431 aes256_key_expand_round_neon (rk + 12, 0x20);
432 aes256_key_expand_round_neon (rk + 13, 0);
433 aes256_key_expand_round_neon (rk + 14, 0x40);
438 static_always_inline void
439 aes_key_expand (u8x16 * key_schedule, u8 const *key, aes_key_size_t ks)
444 aes128_key_expand (key_schedule, (u8x16u const *) key);
447 aes192_key_expand (key_schedule, (u8x16u const *) key);
450 aes256_key_expand (key_schedule, (u8x16u const *) key);
455 static_always_inline void
456 aes_key_enc_to_dec (u8x16 * ke, u8x16 * kd, aes_key_size_t ks)
458 int rounds = AES_KEY_ROUNDS (ks);
463 for (int i = 1; i < (rounds / 2); i++)
465 kd[rounds - i] = aes_inv_mix_column (ke[i]);
466 kd[i] = aes_inv_mix_column (ke[rounds - i]);
469 kd[rounds / 2] = aes_inv_mix_column (ke[rounds / 2]);
472 #endif /* __aesni_h__ */
475 * fd.io coding-style-patch-verification: ON
478 * eval: (c-set-style "gnu")