2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
28 #define AES_KEY_ROUNDS(x) (10 + x * 2)
29 #define AES_KEY_BYTES(x) (16 + x * 8)
31 static const u8x16 byte_mask_scale = {
32 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
35 static_always_inline u8x16
36 aes_block_load (u8 * p)
41 static_always_inline u8x16
42 aes_enc_round (u8x16 a, u8x16 k)
45 return (u8x16) _mm_aesenc_si128 ((__m128i) a, (__m128i) k);
46 #elif defined (__ARM_FEATURE_AES)
47 return vaesmcq_u8 (vaeseq_u8 (a, u8x16_splat (0))) ^ k;
51 static_always_inline u8x16
52 aes_enc_last_round (u8x16 a, u8x16 k)
55 return (u8x16) _mm_aesenclast_si128 ((__m128i) a, (__m128i) k);
56 #elif defined (__ARM_FEATURE_AES)
57 return vaeseq_u8 (a, u8x16_splat (0)) ^ k;
63 static_always_inline u8x16
64 aes_dec_round (u8x16 a, u8x16 k)
66 return (u8x16) _mm_aesdec_si128 ((__m128i) a, (__m128i) k);
69 static_always_inline u8x16
70 aes_dec_last_round (u8x16 a, u8x16 k)
72 return (u8x16) _mm_aesdeclast_si128 ((__m128i) a, (__m128i) k);
76 static_always_inline void
77 aes_block_store (u8 * p, u8x16 r)
82 static_always_inline u8x16
83 aes_byte_mask (u8x16 x, u8 n_bytes)
85 return x & u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale);
88 static_always_inline u8x16
89 aes_load_partial (u8x16u * p, int n_bytes)
91 ASSERT (n_bytes <= 16);
94 return (u8x16) _mm_mask_loadu_epi8 (zero, (1 << n_bytes) - 1, p);
96 return aes_byte_mask (CLIB_MEM_OVERFLOW_LOAD (*, p), n_bytes);
100 static_always_inline void
101 aes_store_partial (void *p, u8x16 r, int n_bytes)
104 clib_memcpy_fast (p, &r, n_bytes);
107 _mm_mask_storeu_epi8 (p, (1 << n_bytes) - 1, (__m128i) r);
109 u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale);
110 _mm_maskmoveu_si128 ((__m128i) r, (__m128i) mask, p);
116 static_always_inline u8x16
117 aes_encrypt_block (u8x16 block, const u8x16 * round_keys, aes_key_size_t ks)
119 int rounds = AES_KEY_ROUNDS (ks);
120 block ^= round_keys[0];
121 for (int i = 1; i < rounds; i += 1)
122 block = aes_enc_round (block, round_keys[i]);
123 return aes_enc_last_round (block, round_keys[rounds]);
126 static_always_inline u8x16
127 aes_inv_mix_column (u8x16 a)
129 #if defined (__AES__)
130 return (u8x16) _mm_aesimc_si128 ((__m128i) a);
131 #elif defined (__ARM_FEATURE_AES)
132 return vaesimcq_u8 (a);
137 #define aes_keygen_assist(a, b) \
138 (u8x16) _mm_aeskeygenassist_si128((__m128i) a, b)
140 /* AES-NI based AES key expansion based on code samples from
141 Intel(r) Advanced Encryption Standard (AES) New Instructions White Paper
144 static_always_inline void
145 aes128_key_assist (u8x16 * rk, u8x16 r)
148 t ^= u8x16_word_shift_left (t, 4);
149 t ^= u8x16_word_shift_left (t, 4);
150 t ^= u8x16_word_shift_left (t, 4);
151 rk[0] = t ^ (u8x16) u32x4_shuffle ((u32x4) r, 3, 3, 3, 3);
154 static_always_inline void
155 aes128_key_expand (u8x16 * rk, u8x16 const *k)
158 aes128_key_assist (rk + 1, aes_keygen_assist (rk[0], 0x01));
159 aes128_key_assist (rk + 2, aes_keygen_assist (rk[1], 0x02));
160 aes128_key_assist (rk + 3, aes_keygen_assist (rk[2], 0x04));
161 aes128_key_assist (rk + 4, aes_keygen_assist (rk[3], 0x08));
162 aes128_key_assist (rk + 5, aes_keygen_assist (rk[4], 0x10));
163 aes128_key_assist (rk + 6, aes_keygen_assist (rk[5], 0x20));
164 aes128_key_assist (rk + 7, aes_keygen_assist (rk[6], 0x40));
165 aes128_key_assist (rk + 8, aes_keygen_assist (rk[7], 0x80));
166 aes128_key_assist (rk + 9, aes_keygen_assist (rk[8], 0x1b));
167 aes128_key_assist (rk + 10, aes_keygen_assist (rk[9], 0x36));
170 static_always_inline void
171 aes192_key_assist (u8x16 * r1, u8x16 * r2, u8x16 key_assist)
174 r1[0] ^= t = u8x16_word_shift_left (r1[0], 4);
175 r1[0] ^= t = u8x16_word_shift_left (t, 4);
176 r1[0] ^= u8x16_word_shift_left (t, 4);
177 r1[0] ^= (u8x16) _mm_shuffle_epi32 ((__m128i) key_assist, 0x55);
178 r2[0] ^= u8x16_word_shift_left (r2[0], 4);
179 r2[0] ^= (u8x16) _mm_shuffle_epi32 ((__m128i) r1[0], 0xff);
182 static_always_inline void
183 aes192_key_expand (u8x16 * rk, u8x16u const *k)
189 rk[1] = r2 = (u8x16) (u64x2) { *(u64 *) (k + 1), 0 };
192 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x1));
193 rk[1] = (u8x16) _mm_shuffle_pd ((__m128d) rk[1], (__m128d) r1, 0);
194 rk[2] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
196 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x2));
200 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x4));
201 rk[4] = (u8x16) _mm_shuffle_pd ((__m128d) rk[4], (__m128d) r1, 0);
202 rk[5] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
204 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x8));
208 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x10));
209 rk[7] = (u8x16) _mm_shuffle_pd ((__m128d) rk[7], (__m128d) r1, 0);
210 rk[8] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
212 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x20));
216 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x40));
217 rk[10] = (u8x16) _mm_shuffle_pd ((__m128d) rk[10], (__m128d) r1, 0);
218 rk[11] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
220 aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x80));
224 static_always_inline void
225 aes256_key_assist (u8x16 * rk, int i, u8x16 key_assist)
230 r ^= t = u8x16_word_shift_left (r, 4);
231 r ^= t = u8x16_word_shift_left (t, 4);
232 r ^= u8x16_word_shift_left (t, 4);
233 r ^= (u8x16) u32x4_shuffle ((u32x4) key_assist, 3, 3, 3, 3);
239 key_assist = aes_keygen_assist (rk[0], 0x0);
241 r ^= t = u8x16_word_shift_left (r, 4);
242 r ^= t = u8x16_word_shift_left (t, 4);
243 r ^= u8x16_word_shift_left (t, 4);
244 r ^= (u8x16) u32x4_shuffle ((u32x4) key_assist, 2, 2, 2, 2);
248 static_always_inline void
249 aes256_key_expand (u8x16 * rk, u8x16u const *k)
253 aes256_key_assist (rk, 2, aes_keygen_assist (rk[1], 0x01));
254 aes256_key_assist (rk, 4, aes_keygen_assist (rk[3], 0x02));
255 aes256_key_assist (rk, 6, aes_keygen_assist (rk[5], 0x04));
256 aes256_key_assist (rk, 8, aes_keygen_assist (rk[7], 0x08));
257 aes256_key_assist (rk, 10, aes_keygen_assist (rk[9], 0x10));
258 aes256_key_assist (rk, 12, aes_keygen_assist (rk[11], 0x20));
259 aes256_key_assist (rk, 14, aes_keygen_assist (rk[13], 0x40));
265 static const u8x16 aese_prep_mask1 =
266 { 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12 };
267 static const u8x16 aese_prep_mask2 =
268 { 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15 };
270 static_always_inline void
271 aes128_key_expand_round_neon (u8x16 * rk, u32 rcon)
273 u8x16 r, t, last_round = rk[-1], z = { };
274 r = vqtbl1q_u8 (last_round, aese_prep_mask1);
275 r = vaeseq_u8 (r, z);
276 r ^= (u8x16) vdupq_n_u32 (rcon);
278 r ^= t = vextq_u8 (z, last_round, 12);
279 r ^= t = vextq_u8 (z, t, 12);
280 r ^= vextq_u8 (z, t, 12);
284 static_always_inline void
285 aes128_key_expand (u8x16 * rk, const u8x16 * k)
288 aes128_key_expand_round_neon (rk + 1, 0x01);
289 aes128_key_expand_round_neon (rk + 2, 0x02);
290 aes128_key_expand_round_neon (rk + 3, 0x04);
291 aes128_key_expand_round_neon (rk + 4, 0x08);
292 aes128_key_expand_round_neon (rk + 5, 0x10);
293 aes128_key_expand_round_neon (rk + 6, 0x20);
294 aes128_key_expand_round_neon (rk + 7, 0x40);
295 aes128_key_expand_round_neon (rk + 8, 0x80);
296 aes128_key_expand_round_neon (rk + 9, 0x1b);
297 aes128_key_expand_round_neon (rk + 10, 0x36);
300 static_always_inline void
301 aes192_key_expand_round_neon (u8x8 * rk, u32 rcon)
303 u8x8 r, last_round = rk[-1], z = { };
306 r2 = (u8x16) vdupq_lane_u64 ((uint64x1_t) last_round, 0);
307 r2 = vqtbl1q_u8 (r2, aese_prep_mask1);
308 r2 = vaeseq_u8 (r2, z2);
309 r2 ^= (u8x16) vdupq_n_u32 (rcon);
311 r = (u8x8) vdup_laneq_u64 ((u64x2) r2, 0);
313 r ^= vext_u8 (z, rk[-3], 4);
316 r = rk[-2] ^ vext_u8 (r, z, 4);
317 r ^= vext_u8 (z, r, 4);
323 r = rk[-1] ^ vext_u8 (r, z, 4);
324 r ^= vext_u8 (z, r, 4);
328 static_always_inline void
329 aes192_key_expand (u8x16 * ek, const u8x16u * k)
331 u8x8 *rk = (u8x8 *) ek;
333 rk[2] = *(u8x8u *) (k + 1);
334 aes192_key_expand_round_neon (rk + 3, 0x01);
335 aes192_key_expand_round_neon (rk + 6, 0x02);
336 aes192_key_expand_round_neon (rk + 9, 0x04);
337 aes192_key_expand_round_neon (rk + 12, 0x08);
338 aes192_key_expand_round_neon (rk + 15, 0x10);
339 aes192_key_expand_round_neon (rk + 18, 0x20);
340 aes192_key_expand_round_neon (rk + 21, 0x40);
341 aes192_key_expand_round_neon (rk + 24, 0x80);
345 static_always_inline void
346 aes256_key_expand_round_neon (u8x16 * rk, u32 rcon)
350 r = vqtbl1q_u8 (rk[-1], rcon ? aese_prep_mask1 : aese_prep_mask2);
351 r = vaeseq_u8 (r, z);
353 r ^= (u8x16) vdupq_n_u32 (rcon);
355 r ^= t = vextq_u8 (z, rk[-2], 12);
356 r ^= t = vextq_u8 (z, t, 12);
357 r ^= vextq_u8 (z, t, 12);
361 static_always_inline void
362 aes256_key_expand (u8x16 * rk, u8x16 const *k)
366 aes256_key_expand_round_neon (rk + 2, 0x01);
367 aes256_key_expand_round_neon (rk + 3, 0);
368 aes256_key_expand_round_neon (rk + 4, 0x02);
369 aes256_key_expand_round_neon (rk + 5, 0);
370 aes256_key_expand_round_neon (rk + 6, 0x04);
371 aes256_key_expand_round_neon (rk + 7, 0);
372 aes256_key_expand_round_neon (rk + 8, 0x08);
373 aes256_key_expand_round_neon (rk + 9, 0);
374 aes256_key_expand_round_neon (rk + 10, 0x10);
375 aes256_key_expand_round_neon (rk + 11, 0);
376 aes256_key_expand_round_neon (rk + 12, 0x20);
377 aes256_key_expand_round_neon (rk + 13, 0);
378 aes256_key_expand_round_neon (rk + 14, 0x40);
383 static_always_inline void
384 aes_key_expand (u8x16 * key_schedule, u8 const *key, aes_key_size_t ks)
389 aes128_key_expand (key_schedule, (u8x16u const *) key);
392 aes192_key_expand (key_schedule, (u8x16u const *) key);
395 aes256_key_expand (key_schedule, (u8x16u const *) key);
400 static_always_inline void
401 aes_key_enc_to_dec (u8x16 * ke, u8x16 * kd, aes_key_size_t ks)
403 int rounds = AES_KEY_ROUNDS (ks);
408 for (int i = 1; i < (rounds / 2); i++)
410 kd[rounds - i] = aes_inv_mix_column (ke[i]);
411 kd[i] = aes_inv_mix_column (ke[rounds - i]);
414 kd[rounds / 2] = aes_inv_mix_column (ke[rounds / 2]);
417 #endif /* __aesni_h__ */
420 * fd.io coding-style-patch-verification: ON
423 * eval: (c-set-style "gnu")