2 *------------------------------------------------------------------
3 * Copyright (c) 2019 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
28 #define AES_KEY_ROUNDS(x) (10 + x * 2)
29 #define AES_KEY_BYTES(x) (16 + x * 8)
31 static_always_inline u8x16
32 aes_block_load (u8 * p)
34 return (u8x16) _mm_loadu_si128 ((__m128i *) p);
37 static_always_inline u8x16
38 aes_enc_round (u8x16 a, u8x16 k)
40 return (u8x16) _mm_aesenc_si128 ((__m128i) a, (__m128i) k);
43 static_always_inline u8x16
44 aes_enc_last_round (u8x16 a, u8x16 k)
46 return (u8x16) _mm_aesenclast_si128 ((__m128i) a, (__m128i) k);
49 static_always_inline u8x16
50 aes_dec_round (u8x16 a, u8x16 k)
52 return (u8x16) _mm_aesdec_si128 ((__m128i) a, (__m128i) k);
55 static_always_inline u8x16
56 aes_dec_last_round (u8x16 a, u8x16 k)
58 return (u8x16) _mm_aesdeclast_si128 ((__m128i) a, (__m128i) k);
61 static_always_inline void
62 aes_block_store (u8 * p, u8x16 r)
64 _mm_storeu_si128 ((__m128i *) p, (__m128i) r);
67 static_always_inline u8x16
68 aes_inv_mix_column (u8x16 a)
70 return (u8x16) _mm_aesimc_si128 ((__m128i) a);
73 /* AES-NI based AES key expansion based on code samples from
74 Intel(r) Advanced Encryption Standard (AES) New Instructions White Paper
77 static_always_inline void
78 aes128_key_assist (__m128i * k, __m128i r)
81 t ^= _mm_slli_si128 (t, 4);
82 t ^= _mm_slli_si128 (t, 4);
83 t ^= _mm_slli_si128 (t, 4);
84 k[0] = t ^ _mm_shuffle_epi32 (r, 0xff);
87 static_always_inline void
88 aes128_key_expand (u8x16 * key_schedule, u8 * key)
90 __m128i *k = (__m128i *) key_schedule;
91 k[0] = _mm_loadu_si128 ((const __m128i *) key);
92 aes128_key_assist (k + 1, _mm_aeskeygenassist_si128 (k[0], 0x01));
93 aes128_key_assist (k + 2, _mm_aeskeygenassist_si128 (k[1], 0x02));
94 aes128_key_assist (k + 3, _mm_aeskeygenassist_si128 (k[2], 0x04));
95 aes128_key_assist (k + 4, _mm_aeskeygenassist_si128 (k[3], 0x08));
96 aes128_key_assist (k + 5, _mm_aeskeygenassist_si128 (k[4], 0x10));
97 aes128_key_assist (k + 6, _mm_aeskeygenassist_si128 (k[5], 0x20));
98 aes128_key_assist (k + 7, _mm_aeskeygenassist_si128 (k[6], 0x40));
99 aes128_key_assist (k + 8, _mm_aeskeygenassist_si128 (k[7], 0x80));
100 aes128_key_assist (k + 9, _mm_aeskeygenassist_si128 (k[8], 0x1b));
101 aes128_key_assist (k + 10, _mm_aeskeygenassist_si128 (k[9], 0x36));
104 static_always_inline void
105 aes192_key_assist (__m128i * r1, __m128i * r2, __m128i key_assist)
108 *r1 ^= t = _mm_slli_si128 (*r1, 0x4);
109 *r1 ^= t = _mm_slli_si128 (t, 0x4);
110 *r1 ^= _mm_slli_si128 (t, 0x4);
111 *r1 ^= _mm_shuffle_epi32 (key_assist, 0x55);
112 *r2 ^= _mm_slli_si128 (*r2, 0x4);
113 *r2 ^= _mm_shuffle_epi32 (*r1, 0xff);
116 static_always_inline void
117 aes192_key_expand (u8x16 * key_schedule, u8 * key)
119 __m128i r1, r2, *k = (__m128i *) key_schedule;
121 k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
122 /* load the 24-bytes key as 2 * 16-bytes (and ignore last 8-bytes) */
123 k[1] = r2 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) key + 1);
125 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x1));
126 k[1] = (__m128i) _mm_shuffle_pd ((__m128d) k[1], (__m128d) r1, 0);
127 k[2] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
129 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x2));
133 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x4));
134 k[4] = (__m128i) _mm_shuffle_pd ((__m128d) k[4], (__m128d) r1, 0);
135 k[5] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
137 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x8));
141 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x10));
142 k[7] = (__m128i) _mm_shuffle_pd ((__m128d) k[7], (__m128d) r1, 0);
143 k[8] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
145 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x20));
149 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x40));
150 k[10] = (__m128i) _mm_shuffle_pd ((__m128d) k[10], (__m128d) r1, 0);
151 k[11] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
153 aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x80));
157 static_always_inline void
158 aes256_key_assist (__m128i * k, int i, __m128i key_assist)
163 r ^= t = _mm_slli_si128 (r, 0x4);
164 r ^= t = _mm_slli_si128 (t, 0x4);
165 r ^= _mm_slli_si128 (t, 0x4);
166 r ^= _mm_shuffle_epi32 (key_assist, 0xff);
173 r ^= t = _mm_slli_si128 (r, 0x4);
174 r ^= t = _mm_slli_si128 (t, 0x4);
175 r ^= _mm_slli_si128 (t, 0x4);
176 r ^= _mm_shuffle_epi32 (_mm_aeskeygenassist_si128 (k[0], 0x0), 0xaa);
180 static_always_inline void
181 aes256_key_expand (u8x16 * key_schedule, u8 * key)
183 __m128i *k = (__m128i *) key_schedule;
184 k[0] = _mm_loadu_si128 ((__m128i *) key);
185 k[1] = _mm_loadu_si128 ((__m128i *) (key + 16));
186 aes256_key_assist (k, 2, _mm_aeskeygenassist_si128 (k[1], 0x01));
187 aes256_key_assist (k, 4, _mm_aeskeygenassist_si128 (k[3], 0x02));
188 aes256_key_assist (k, 6, _mm_aeskeygenassist_si128 (k[5], 0x04));
189 aes256_key_assist (k, 8, _mm_aeskeygenassist_si128 (k[7], 0x08));
190 aes256_key_assist (k, 10, _mm_aeskeygenassist_si128 (k[9], 0x10));
191 aes256_key_assist (k, 12, _mm_aeskeygenassist_si128 (k[11], 0x20));
192 aes256_key_assist (k, 14, _mm_aeskeygenassist_si128 (k[13], 0x40));
195 static_always_inline void
196 aes_key_expand (u8x16 * key_schedule, u8 * key, aes_key_size_t ks)
201 aes128_key_expand (key_schedule, key);
204 aes192_key_expand (key_schedule, key);
207 aes256_key_expand (key_schedule, key);
212 static_always_inline void
213 aes_key_enc_to_dec (u8x16 * ke, u8x16 * kd, aes_key_size_t ks)
215 int rounds = AES_KEY_ROUNDS (ks);
220 for (int i = 1; i < (rounds / 2); i++)
222 kd[rounds - i] = aes_inv_mix_column (ke[i]);
223 kd[i] = aes_inv_mix_column (ke[rounds - i]);
226 kd[rounds / 2] = aes_inv_mix_column (ke[rounds / 2]);
229 #endif /* __aesni_h__ */
232 * fd.io coding-style-patch-verification: ON
235 * eval: (c-set-style "gnu")