2 *------------------------------------------------------------------
3 * Copyright (c) 2019 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <x86intrin.h>
22 #include <crypto_ia32/crypto_ia32.h>
23 #include <crypto_ia32/aesni.h>
25 #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
26 #pragma GCC optimize ("O3")
31 __m128i encrypt_key[15];
33 __m512i decrypt_key[15];
35 __m128i decrypt_key[15];
39 static_always_inline __m128i
40 aes_block_load (u8 * p)
42 return _mm_loadu_si128 ((__m128i *) p);
45 static_always_inline void
46 aes_block_store (u8 * p, __m128i r)
48 _mm_storeu_si128 ((__m128i *) p, r);
51 static_always_inline __m128i __clib_unused
52 xor3 (__m128i a, __m128i b, __m128i c)
55 return _mm_ternarylogic_epi32 (a, b, c, 0x96);
61 static_always_inline __m512i
62 xor3_x4 (__m512i a, __m512i b, __m512i c)
64 return _mm512_ternarylogic_epi32 (a, b, c, 0x96);
67 static_always_inline __m512i
68 aes_block_load_x4 (u8 * src[], int i)
71 r = _mm512_inserti64x2 (r, aes_block_load (src[0] + i), 0);
72 r = _mm512_inserti64x2 (r, aes_block_load (src[1] + i), 1);
73 r = _mm512_inserti64x2 (r, aes_block_load (src[2] + i), 2);
74 r = _mm512_inserti64x2 (r, aes_block_load (src[3] + i), 3);
78 static_always_inline void
79 aes_block_store_x4 (u8 * dst[], int i, __m512i r)
81 aes_block_store (dst[0] + i, _mm512_extracti64x2_epi64 (r, 0));
82 aes_block_store (dst[1] + i, _mm512_extracti64x2_epi64 (r, 1));
83 aes_block_store (dst[2] + i, _mm512_extracti64x2_epi64 (r, 2));
84 aes_block_store (dst[3] + i, _mm512_extracti64x2_epi64 (r, 3));
88 static_always_inline void __clib_unused
89 aes_cbc_dec (__m128i * k, u8 * src, u8 * dst, u8 * iv, int count,
90 aesni_key_size_t rounds)
92 __m128i r0, r1, r2, r3, c0, c1, c2, c3, f;
95 f = aes_block_load (iv);
99 _mm_prefetch (src + 128, _MM_HINT_T0);
100 _mm_prefetch (dst + 128, _MM_HINT_T0);
102 c0 = aes_block_load (src);
103 c1 = aes_block_load (src + 16);
104 c2 = aes_block_load (src + 32);
105 c3 = aes_block_load (src + 48);
112 for (i = 1; i < rounds; i++)
114 r0 = _mm_aesdec_si128 (r0, k[i]);
115 r1 = _mm_aesdec_si128 (r1, k[i]);
116 r2 = _mm_aesdec_si128 (r2, k[i]);
117 r3 = _mm_aesdec_si128 (r3, k[i]);
120 r0 = _mm_aesdeclast_si128 (r0, k[i]);
121 r1 = _mm_aesdeclast_si128 (r1, k[i]);
122 r2 = _mm_aesdeclast_si128 (r2, k[i]);
123 r3 = _mm_aesdeclast_si128 (r3, k[i]);
125 aes_block_store (dst, r0 ^ f);
126 aes_block_store (dst + 16, r1 ^ c0);
127 aes_block_store (dst + 32, r2 ^ c1);
128 aes_block_store (dst + 48, r3 ^ c2);
139 c0 = aes_block_load (src);
141 for (i = 1; i < rounds; i++)
142 r0 = _mm_aesdec_si128 (r0, k[i]);
143 r0 = _mm_aesdeclast_si128 (r0, k[i]);
144 aes_block_store (dst, r0 ^ f);
153 static_always_inline void
154 vaes_cbc_dec (__m512i * k, u8 * src, u8 * dst, u8 * iv, int count,
155 aesni_key_size_t rounds)
157 __m512i permute = { 6, 7, 8, 9, 10, 11, 12, 13 };
158 __m512i r0, r1, r2, r3, c0, c1, c2, c3, f = { };
160 int i, n_blocks = count >> 4;
162 f = _mm512_mask_loadu_epi64 (f, 0xc0, (__m512i *) (iv - 48));
164 while (n_blocks >= 16)
166 c0 = _mm512_loadu_si512 ((__m512i *) src);
167 c1 = _mm512_loadu_si512 ((__m512i *) (src + 64));
168 c2 = _mm512_loadu_si512 ((__m512i *) (src + 128));
169 c3 = _mm512_loadu_si512 ((__m512i *) (src + 192));
176 for (i = 1; i < rounds; i++)
178 r0 = _mm512_aesdec_epi128 (r0, k[i]);
179 r1 = _mm512_aesdec_epi128 (r1, k[i]);
180 r2 = _mm512_aesdec_epi128 (r2, k[i]);
181 r3 = _mm512_aesdec_epi128 (r3, k[i]);
184 r0 = _mm512_aesdeclast_epi128 (r0, k[i]);
185 r1 = _mm512_aesdeclast_epi128 (r1, k[i]);
186 r2 = _mm512_aesdeclast_epi128 (r2, k[i]);
187 r3 = _mm512_aesdeclast_epi128 (r3, k[i]);
189 r0 ^= _mm512_permutex2var_epi64 (f, permute, c0);
190 _mm512_storeu_si512 ((__m512i *) dst, r0);
192 r1 ^= _mm512_permutex2var_epi64 (c0, permute, c1);
193 _mm512_storeu_si512 ((__m512i *) (dst + 64), r1);
195 r2 ^= _mm512_permutex2var_epi64 (c1, permute, c2);
196 _mm512_storeu_si512 ((__m512i *) (dst + 128), r2);
198 r3 ^= _mm512_permutex2var_epi64 (c2, permute, c3);
199 _mm512_storeu_si512 ((__m512i *) (dst + 192), r3);
209 m = (1 << (n_blocks * 2)) - 1;
210 c0 = _mm512_mask_loadu_epi64 (c0, m, (__m512i *) src);
211 f = _mm512_permutex2var_epi64 (f, permute, c0);
213 for (i = 1; i < rounds; i++)
214 r0 = _mm512_aesdec_epi128 (r0, k[i]);
215 r0 = _mm512_aesdeclast_epi128 (r0, k[i]);
216 _mm512_mask_storeu_epi64 ((__m512i *) dst, m, r0 ^ f);
228 #define u32xN_min_scalar u32x16_min_scalar
229 #define u32xN_is_all_zero u32x16_is_all_zero
233 #define u32xN_min_scalar u32x4_min_scalar
234 #define u32xN_is_all_zero u32x4_is_all_zero
237 static_always_inline u32
238 aesni_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
239 u32 n_ops, aesni_key_size_t ks)
241 crypto_ia32_main_t *cm = &crypto_ia32_main;
242 crypto_ia32_per_thread_data_t *ptd = vec_elt_at_index (cm->per_thread_data,
244 int rounds = AESNI_KEY_ROUNDS (ks);
246 u32 i, j, count, n_left = n_ops;
247 u32xN dummy_mask = { };
249 vnet_crypto_key_index_t key_index[N];
257 } r = { }, k[15] = { };
260 for (i = 0; i < N; i++)
264 for (i = 0; i < N; i++)
269 /* no more work to enqueue, so we are enqueueing dummy buffer */
270 src[i] = dst[i] = dummy;
271 len[i] = sizeof (dummy);
276 if (ops[0]->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
278 r.x1[i] = ptd->cbc_iv[i];
279 aes_block_store (ops[0]->iv, r.x1[i]);
280 ptd->cbc_iv[i] = _mm_aesenc_si128 (r.x1[i], r.x1[i]);
283 r.x1[i] = aes_block_load (ops[0]->iv);
285 src[i] = ops[0]->src;
286 dst[i] = ops[0]->dst;
287 len[i] = ops[0]->len;
289 if (key_index[i] != ops[0]->key_index)
291 aes_cbc_key_data_t *kd;
292 key_index[i] = ops[0]->key_index;
293 kd = (aes_cbc_key_data_t *) cm->key_data[key_index[i]];
294 for (j = 0; j < rounds + 1; j++)
295 k[j].x1[i] = kd->encrypt_key[j];
297 ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
303 count = u32xN_min_scalar (len);
305 ASSERT (count % 16 == 0);
307 for (i = 0; i < count; i += 16)
310 r.x4[0] = xor3_x4 (r.x4[0], aes_block_load_x4 (src, i), k[0].x4[0]);
311 r.x4[1] = xor3_x4 (r.x4[1], aes_block_load_x4 (src, i), k[0].x4[1]);
312 r.x4[2] = xor3_x4 (r.x4[2], aes_block_load_x4 (src, i), k[0].x4[2]);
313 r.x4[3] = xor3_x4 (r.x4[3], aes_block_load_x4 (src, i), k[0].x4[3]);
315 for (j = 1; j < rounds; j++)
317 r.x4[0] = _mm512_aesenc_epi128 (r.x4[0], k[j].x4[0]);
318 r.x4[1] = _mm512_aesenc_epi128 (r.x4[1], k[j].x4[1]);
319 r.x4[2] = _mm512_aesenc_epi128 (r.x4[2], k[j].x4[2]);
320 r.x4[3] = _mm512_aesenc_epi128 (r.x4[3], k[j].x4[3]);
322 r.x4[0] = _mm512_aesenclast_epi128 (r.x4[0], k[j].x4[0]);
323 r.x4[1] = _mm512_aesenclast_epi128 (r.x4[1], k[j].x4[1]);
324 r.x4[2] = _mm512_aesenclast_epi128 (r.x4[2], k[j].x4[2]);
325 r.x4[3] = _mm512_aesenclast_epi128 (r.x4[3], k[j].x4[3]);
327 aes_block_store_x4 (dst, i, r.x4[0]);
328 aes_block_store_x4 (dst + 4, i, r.x4[1]);
329 aes_block_store_x4 (dst + 8, i, r.x4[2]);
330 aes_block_store_x4 (dst + 12, i, r.x4[3]);
332 r.x1[0] = xor3 (r.x1[0], aes_block_load (src[0] + i), k[0].x1[0]);
333 r.x1[1] = xor3 (r.x1[1], aes_block_load (src[1] + i), k[0].x1[1]);
334 r.x1[2] = xor3 (r.x1[2], aes_block_load (src[2] + i), k[0].x1[2]);
335 r.x1[3] = xor3 (r.x1[3], aes_block_load (src[3] + i), k[0].x1[3]);
337 for (j = 1; j < rounds; j++)
339 r.x1[0] = _mm_aesenc_si128 (r.x1[0], k[j].x1[0]);
340 r.x1[1] = _mm_aesenc_si128 (r.x1[1], k[j].x1[1]);
341 r.x1[2] = _mm_aesenc_si128 (r.x1[2], k[j].x1[2]);
342 r.x1[3] = _mm_aesenc_si128 (r.x1[3], k[j].x1[3]);
345 r.x1[0] = _mm_aesenclast_si128 (r.x1[0], k[j].x1[0]);
346 r.x1[1] = _mm_aesenclast_si128 (r.x1[1], k[j].x1[1]);
347 r.x1[2] = _mm_aesenclast_si128 (r.x1[2], k[j].x1[2]);
348 r.x1[3] = _mm_aesenclast_si128 (r.x1[3], k[j].x1[3]);
350 aes_block_store (dst[0] + i, r.x1[0]);
351 aes_block_store (dst[1] + i, r.x1[1]);
352 aes_block_store (dst[2] + i, r.x1[2]);
353 aes_block_store (dst[3] + i, r.x1[3]);
357 for (i = 0; i < N; i++)
367 if (!u32xN_is_all_zero (len & dummy_mask))
373 static_always_inline u32
374 aesni_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
375 u32 n_ops, aesni_key_size_t ks)
377 crypto_ia32_main_t *cm = &crypto_ia32_main;
378 int rounds = AESNI_KEY_ROUNDS (ks);
379 vnet_crypto_op_t *op = ops[0];
380 aes_cbc_key_data_t *kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
387 vaes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds);
389 aes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds);
391 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
396 kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
403 static_always_inline void *
404 aesni_cbc_key_exp (vnet_crypto_key_t * key, aesni_key_size_t ks)
406 __m128i e[15], d[15];
407 aes_cbc_key_data_t *kd;
408 kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
409 aes_key_expand (e, key->data, ks);
410 aes_key_expand (d, key->data, ks);
411 aes_key_enc_to_dec (d, ks);
412 for (int i = 0; i < AESNI_KEY_ROUNDS (ks) + 1; i++)
415 kd->decrypt_key[i] = _mm512_broadcast_i64x2 (d[i]);
417 kd->decrypt_key[i] = d[i];
419 kd->encrypt_key[i] = e[i];
424 #define foreach_aesni_cbc_handler_type _(128) _(192) _(256)
427 static u32 aesni_ops_dec_aes_cbc_##x \
428 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
429 { return aesni_ops_dec_aes_cbc (vm, ops, n_ops, AESNI_KEY_##x); } \
430 static u32 aesni_ops_enc_aes_cbc_##x \
431 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
432 { return aesni_ops_enc_aes_cbc (vm, ops, n_ops, AESNI_KEY_##x); } \
433 static void * aesni_cbc_key_exp_##x (vnet_crypto_key_t *key) \
434 { return aesni_cbc_key_exp (key, AESNI_KEY_##x); }
436 foreach_aesni_cbc_handler_type;
443 crypto_ia32_aesni_cbc_init_vaes (vlib_main_t * vm)
445 crypto_ia32_aesni_cbc_init_avx512 (vlib_main_t * vm)
447 crypto_ia32_aesni_cbc_init_avx2 (vlib_main_t * vm)
449 crypto_ia32_aesni_cbc_init_sse42 (vlib_main_t * vm)
452 crypto_ia32_main_t *cm = &crypto_ia32_main;
453 crypto_ia32_per_thread_data_t *ptd;
454 clib_error_t *err = 0;
457 if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
458 return clib_error_return_unix (0, "failed to open '/dev/urandom'");
461 vec_foreach (ptd, cm->per_thread_data)
463 for (int i = 0; i < 4; i++)
465 if (read(fd, ptd->cbc_iv, sizeof (ptd->cbc_iv)) !=
466 sizeof (ptd->cbc_iv))
468 err = clib_error_return_unix (0, "'/dev/urandom' read failure");
476 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
477 VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \
478 aesni_ops_enc_aes_cbc_##x); \
479 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
480 VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \
481 aesni_ops_dec_aes_cbc_##x); \
482 cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_CBC] = aesni_cbc_key_exp_##x;
483 foreach_aesni_cbc_handler_type;
492 * fd.io coding-style-patch-verification: ON
495 * eval: (c-set-style "gnu")