2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
28 #define AES_KEY_ROUNDS(x) (10 + x * 2)
29 #define AES_KEY_BYTES(x) (16 + x * 8)
33 static_always_inline u8x16
34 aes_block_load (u8 * p)
36 return (u8x16) _mm_loadu_si128 ((__m128i *) p);
39 static_always_inline u8x16
40 aes_enc_round (u8x16 a, u8x16 k)
42 return (u8x16) _mm_aesenc_si128 ((__m128i) a, (__m128i) k);
45 static_always_inline u8x16
46 aes_enc_last_round (u8x16 a, u8x16 k)
48 return (u8x16) _mm_aesenclast_si128 ((__m128i) a, (__m128i) k);
51 static_always_inline u8x16
52 aes_dec_round (u8x16 a, u8x16 k)
54 return (u8x16) _mm_aesdec_si128 ((__m128i) a, (__m128i) k);
57 static_always_inline u8x16
58 aes_dec_last_round (u8x16 a, u8x16 k)
60 return (u8x16) _mm_aesdeclast_si128 ((__m128i) a, (__m128i) k);
63 static_always_inline void
64 aes_block_store (u8 * p, u8x16 r)
66 _mm_storeu_si128 ((__m128i *) p, (__m128i) r);
69 static_always_inline u8x16
70 aes_inv_mix_column (u8x16 a)
72 return (u8x16) _mm_aesimc_si128 ((__m128i) a);
75 /* AES-NI based AES key expansion based on code samples from
76 Intel(r) Advanced Encryption Standard (AES) New Instructions White Paper
79 static_always_inline void
80 aes128_key_assist (__m128i * k, __m128i r)
83 t ^= _mm_slli_si128 (t, 4);
84 t ^= _mm_slli_si128 (t, 4);
85 t ^= _mm_slli_si128 (t, 4);
86 k[0] = t ^ _mm_shuffle_epi32 (r, 0xff);
89 static_always_inline void
90 aes128_key_expand (u8x16 * key_schedule, u8 * key)
92 __m128i *k = (__m128i *) key_schedule;
93 k[0] = _mm_loadu_si128 ((const __m128i *) key);
94 aes128_key_assist (k + 1, _mm_aeskeygenassist_si128 (k[0], 0x01));
95 aes128_key_assist (k + 2, _mm_aeskeygenassist_si128 (k[1], 0x02));
96 aes128_key_assist (k + 3, _mm_aeskeygenassist_si128 (k[2], 0x04));
97 aes128_key_assist (k + 4, _mm_aeskeygenassist_si128 (k[3], 0x08));
98 aes128_key_assist (k + 5, _mm_aeskeygenassist_si128 (k[4], 0x10));
99 aes128_key_assist (k + 6, _mm_aeskeygenassist_si128 (k[5], 0x20));
100 aes128_key_assist (k + 7, _mm_aeskeygenassist_si128 (k[6], 0x40));
101 aes128_key_assist (k + 8, _mm_aeskeygenassist_si128 (k[7], 0x80));
102 aes128_key_assist (k + 9, _mm_aeskeygenassist_si128 (k[8], 0x1b));
103 aes128_key_assist (k + 10, _mm_aeskeygenassist_si128 (k[9], 0x36));
106 static_always_inline void
107 aes192_key_assist (__m128i * r1, __m128i * r2, __m128i key_assist)
110 *r1 ^= t = _mm_slli_si128 (*r1, 0x4);
111 *r1 ^= t = _mm_slli_si128 (t, 0x4);
112 *r1 ^= _mm_slli_si128 (t, 0x4);
113 *r1 ^= _mm_shuffle_epi32 (key_assist, 0x55);
114 *r2 ^= _mm_slli_si128 (*r2, 0x4);
115 *r2 ^= _mm_shuffle_epi32 (*r1, 0xff);
118 static_always_inline void
119 aes192_key_expand (u8x16 * key_schedule, u8 * key)
121 __m128i r1, r2, *k = (__m128i *) key_schedule;
123 k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
124 /* load the 24-bytes key as 2 * 16-bytes (and ignore last 8-bytes) */
125 k[1] = r2 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) key + 1);
127 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x1));
128 k[1] = (__m128i) _mm_shuffle_pd ((__m128d) k[1], (__m128d) r1, 0);
129 k[2] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
131 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x2));
135 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x4));
136 k[4] = (__m128i) _mm_shuffle_pd ((__m128d) k[4], (__m128d) r1, 0);
137 k[5] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
139 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x8));
143 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x10));
144 k[7] = (__m128i) _mm_shuffle_pd ((__m128d) k[7], (__m128d) r1, 0);
145 k[8] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
147 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x20));
151 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x40));
152 k[10] = (__m128i) _mm_shuffle_pd ((__m128d) k[10], (__m128d) r1, 0);
153 k[11] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
155 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x80));
159 static_always_inline void
160 aes256_key_assist (__m128i * k, int i, __m128i key_assist)
165 r ^= t = _mm_slli_si128 (r, 0x4);
166 r ^= t = _mm_slli_si128 (t, 0x4);
167 r ^= _mm_slli_si128 (t, 0x4);
168 r ^= _mm_shuffle_epi32 (key_assist, 0xff);
175 r ^= t = _mm_slli_si128 (r, 0x4);
176 r ^= t = _mm_slli_si128 (t, 0x4);
177 r ^= _mm_slli_si128 (t, 0x4);
178 r ^= _mm_shuffle_epi32 (_mm_aeskeygenassist_si128 (k[0], 0x0), 0xaa);
182 static_always_inline void
183 aes256_key_expand (u8x16 * key_schedule, u8 * key)
185 __m128i *k = (__m128i *) key_schedule;
186 k[0] = _mm_loadu_si128 ((__m128i *) key);
187 k[1] = _mm_loadu_si128 ((__m128i *) (key + 16));
188 aes256_key_assist (k, 2, _mm_aeskeygenassist_si128 (k[1], 0x01));
189 aes256_key_assist (k, 4, _mm_aeskeygenassist_si128 (k[3], 0x02));
190 aes256_key_assist (k, 6, _mm_aeskeygenassist_si128 (k[5], 0x04));
191 aes256_key_assist (k, 8, _mm_aeskeygenassist_si128 (k[7], 0x08));
192 aes256_key_assist (k, 10, _mm_aeskeygenassist_si128 (k[9], 0x10));
193 aes256_key_assist (k, 12, _mm_aeskeygenassist_si128 (k[11], 0x20));
194 aes256_key_assist (k, 14, _mm_aeskeygenassist_si128 (k[13], 0x40));
200 static_always_inline u8x16
201 aes_inv_mix_column (u8x16 a)
203 return vaesimcq_u8 (a);
206 static const u8x16 aese_prep_mask1 =
207 { 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12 };
208 static const u8x16 aese_prep_mask2 =
209 { 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15 };
212 aes128_key_expand_round_neon (u8x16 * rk, u32 rcon)
214 u8x16 r, t, last_round = rk[-1], z = { };
215 r = vqtbl1q_u8 (last_round, aese_prep_mask1);
216 r = vaeseq_u8 (r, z);
217 r ^= (u8x16) vdupq_n_u32 (rcon);
219 r ^= t = vextq_u8 (z, last_round, 12);
220 r ^= t = vextq_u8 (z, t, 12);
221 r ^= vextq_u8 (z, t, 12);
226 aes128_key_expand (u8x16 * rk, const u8 * k)
228 rk[0] = vld1q_u8 (k);
229 aes128_key_expand_round_neon (rk + 1, 0x01);
230 aes128_key_expand_round_neon (rk + 2, 0x02);
231 aes128_key_expand_round_neon (rk + 3, 0x04);
232 aes128_key_expand_round_neon (rk + 4, 0x08);
233 aes128_key_expand_round_neon (rk + 5, 0x10);
234 aes128_key_expand_round_neon (rk + 6, 0x20);
235 aes128_key_expand_round_neon (rk + 7, 0x40);
236 aes128_key_expand_round_neon (rk + 8, 0x80);
237 aes128_key_expand_round_neon (rk + 9, 0x1b);
238 aes128_key_expand_round_neon (rk + 10, 0x36);
242 aes192_key_expand_round_neon (u8x8 * rk, u32 rcon)
244 u8x8 r, last_round = rk[-1], z = { };
247 r2 = (u8x16) vdupq_lane_u64 ((uint64x1_t) last_round, 0);
248 r2 = vqtbl1q_u8 (r2, aese_prep_mask1);
249 r2 = vaeseq_u8 (r2, z2);
250 r2 ^= (u8x16) vdupq_n_u32 (rcon);
252 r = (u8x8) vdup_laneq_u64 ((u64x2) r2, 0);
254 r ^= vext_u8 (z, rk[-3], 4);
257 r = rk[-2] ^ vext_u8 (r, z, 4);
258 r ^= vext_u8 (z, r, 4);
264 r = rk[-1] ^ vext_u8 (r, z, 4);
265 r ^= vext_u8 (z, r, 4);
270 aes192_key_expand (u8x16 * ek, const u8 * k)
272 u8x8 *rk = (u8x8 *) ek;
273 ek[0] = vld1q_u8 (k);
274 rk[2] = vld1_u8 (k + 16);
275 aes192_key_expand_round_neon (rk + 3, 0x01);
276 aes192_key_expand_round_neon (rk + 6, 0x02);
277 aes192_key_expand_round_neon (rk + 9, 0x04);
278 aes192_key_expand_round_neon (rk + 12, 0x08);
279 aes192_key_expand_round_neon (rk + 15, 0x10);
280 aes192_key_expand_round_neon (rk + 18, 0x20);
281 aes192_key_expand_round_neon (rk + 21, 0x40);
282 aes192_key_expand_round_neon (rk + 24, 0x80);
287 aes256_key_expand_round_neon (u8x16 * rk, u32 rcon)
291 r = vqtbl1q_u8 (rk[-1], rcon ? aese_prep_mask1 : aese_prep_mask2);
292 r = vaeseq_u8 (r, z);
294 r ^= (u8x16) vdupq_n_u32 (rcon);
296 r ^= t = vextq_u8 (z, rk[-2], 12);
297 r ^= t = vextq_u8 (z, t, 12);
298 r ^= vextq_u8 (z, t, 12);
303 aes256_key_expand (u8x16 * rk, const u8 * k)
305 rk[0] = vld1q_u8 (k);
306 rk[1] = vld1q_u8 (k + 16);
307 aes256_key_expand_round_neon (rk + 2, 0x01);
308 aes256_key_expand_round_neon (rk + 3, 0);
309 aes256_key_expand_round_neon (rk + 4, 0x02);
310 aes256_key_expand_round_neon (rk + 5, 0);
311 aes256_key_expand_round_neon (rk + 6, 0x04);
312 aes256_key_expand_round_neon (rk + 7, 0);
313 aes256_key_expand_round_neon (rk + 8, 0x08);
314 aes256_key_expand_round_neon (rk + 9, 0);
315 aes256_key_expand_round_neon (rk + 10, 0x10);
316 aes256_key_expand_round_neon (rk + 11, 0);
317 aes256_key_expand_round_neon (rk + 12, 0x20);
318 aes256_key_expand_round_neon (rk + 13, 0);
319 aes256_key_expand_round_neon (rk + 14, 0x40);
324 static_always_inline void
325 aes_key_expand (u8x16 * key_schedule, u8 * key, aes_key_size_t ks)
330 aes128_key_expand (key_schedule, key);
333 aes192_key_expand (key_schedule, key);
336 aes256_key_expand (key_schedule, key);
341 static_always_inline void
342 aes_key_enc_to_dec (u8x16 * ke, u8x16 * kd, aes_key_size_t ks)
344 int rounds = AES_KEY_ROUNDS (ks);
349 for (int i = 1; i < (rounds / 2); i++)
351 kd[rounds - i] = aes_inv_mix_column (ke[i]);
352 kd[i] = aes_inv_mix_column (ke[rounds - i]);
355 kd[rounds / 2] = aes_inv_mix_column (ke[rounds / 2]);
358 #endif /* __aesni_h__ */
361 * fd.io coding-style-patch-verification: ON
364 * eval: (c-set-style "gnu")