2 *------------------------------------------------------------------
3 * Copyright (c) 2019 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <x86intrin.h>
22 #include <crypto_native/crypto_native.h>
23 #include <crypto_native/aes.h>
25 #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
26 #pragma GCC optimize ("O3")
31 u8x16 encrypt_key[15];
33 __m512i decrypt_key[15];
35 u8x16 decrypt_key[15];
39 static_always_inline u8x16 __clib_unused
40 xor3 (u8x16 a, u8x16 b, u8x16 c)
43 return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
50 static_always_inline __m512i
51 xor3_x4 (__m512i a, __m512i b, __m512i c)
53 return _mm512_ternarylogic_epi32 (a, b, c, 0x96);
56 static_always_inline __m512i
57 aes_block_load_x4 (u8 * src[], int i)
60 r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[0] + i), 0);
61 r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[1] + i), 1);
62 r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[2] + i), 2);
63 r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[3] + i), 3);
67 static_always_inline void
68 aes_block_store_x4 (u8 * dst[], int i, __m512i r)
70 aes_block_store (dst[0] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 0));
71 aes_block_store (dst[1] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 1));
72 aes_block_store (dst[2] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 2));
73 aes_block_store (dst[3] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 3));
77 static_always_inline void __clib_unused
78 aes_cbc_dec (u8x16 * k, u8 * src, u8 * dst, u8 * iv, int count,
79 aes_key_size_t rounds)
81 u8x16 r0, r1, r2, r3, c0, c1, c2, c3, f;
84 f = aes_block_load (iv);
88 _mm_prefetch (src + 128, _MM_HINT_T0);
89 _mm_prefetch (dst + 128, _MM_HINT_T0);
91 c0 = aes_block_load (src);
92 c1 = aes_block_load (src + 16);
93 c2 = aes_block_load (src + 32);
94 c3 = aes_block_load (src + 48);
101 for (i = 1; i < rounds; i++)
103 r0 = aes_dec_round (r0, k[i]);
104 r1 = aes_dec_round (r1, k[i]);
105 r2 = aes_dec_round (r2, k[i]);
106 r3 = aes_dec_round (r3, k[i]);
109 r0 = aes_dec_last_round (r0, k[i]);
110 r1 = aes_dec_last_round (r1, k[i]);
111 r2 = aes_dec_last_round (r2, k[i]);
112 r3 = aes_dec_last_round (r3, k[i]);
114 aes_block_store (dst, r0 ^ f);
115 aes_block_store (dst + 16, r1 ^ c0);
116 aes_block_store (dst + 32, r2 ^ c1);
117 aes_block_store (dst + 48, r3 ^ c2);
128 c0 = aes_block_load (src);
130 for (i = 1; i < rounds; i++)
131 r0 = aes_dec_round (r0, k[i]);
132 r0 = aes_dec_last_round (r0, k[i]);
133 aes_block_store (dst, r0 ^ f);
142 static_always_inline void
143 vaes_cbc_dec (__m512i * k, u8 * src, u8 * dst, u8 * iv, int count,
144 aes_key_size_t rounds)
146 __m512i permute = { 6, 7, 8, 9, 10, 11, 12, 13 };
147 __m512i r0, r1, r2, r3, c0, c1, c2, c3, f = { };
149 int i, n_blocks = count >> 4;
151 f = _mm512_mask_loadu_epi64 (f, 0xc0, (__m512i *) (iv - 48));
153 while (n_blocks >= 16)
155 c0 = _mm512_loadu_si512 ((__m512i *) src);
156 c1 = _mm512_loadu_si512 ((__m512i *) (src + 64));
157 c2 = _mm512_loadu_si512 ((__m512i *) (src + 128));
158 c3 = _mm512_loadu_si512 ((__m512i *) (src + 192));
165 for (i = 1; i < rounds; i++)
167 r0 = _mm512_aesdec_epi128 (r0, k[i]);
168 r1 = _mm512_aesdec_epi128 (r1, k[i]);
169 r2 = _mm512_aesdec_epi128 (r2, k[i]);
170 r3 = _mm512_aesdec_epi128 (r3, k[i]);
173 r0 = _mm512_aesdeclast_epi128 (r0, k[i]);
174 r1 = _mm512_aesdeclast_epi128 (r1, k[i]);
175 r2 = _mm512_aesdeclast_epi128 (r2, k[i]);
176 r3 = _mm512_aesdeclast_epi128 (r3, k[i]);
178 r0 ^= _mm512_permutex2var_epi64 (f, permute, c0);
179 _mm512_storeu_si512 ((__m512i *) dst, r0);
181 r1 ^= _mm512_permutex2var_epi64 (c0, permute, c1);
182 _mm512_storeu_si512 ((__m512i *) (dst + 64), r1);
184 r2 ^= _mm512_permutex2var_epi64 (c1, permute, c2);
185 _mm512_storeu_si512 ((__m512i *) (dst + 128), r2);
187 r3 ^= _mm512_permutex2var_epi64 (c2, permute, c3);
188 _mm512_storeu_si512 ((__m512i *) (dst + 192), r3);
198 m = (1 << (n_blocks * 2)) - 1;
199 c0 = _mm512_mask_loadu_epi64 (c0, m, (__m512i *) src);
200 f = _mm512_permutex2var_epi64 (f, permute, c0);
202 for (i = 1; i < rounds; i++)
203 r0 = _mm512_aesdec_epi128 (r0, k[i]);
204 r0 = _mm512_aesdeclast_epi128 (r0, k[i]);
205 _mm512_mask_storeu_epi64 ((__m512i *) dst, m, r0 ^ f);
217 #define u32xN_min_scalar u32x16_min_scalar
218 #define u32xN_is_all_zero u32x16_is_all_zero
222 #define u32xN_min_scalar u32x4_min_scalar
223 #define u32xN_is_all_zero u32x4_is_all_zero
226 static_always_inline u32
227 aesni_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
228 u32 n_ops, aes_key_size_t ks)
230 crypto_native_main_t *cm = &crypto_native_main;
231 crypto_native_per_thread_data_t *ptd =
232 vec_elt_at_index (cm->per_thread_data, vm->thread_index);
233 int rounds = AES_KEY_ROUNDS (ks);
235 u32 i, j, count, n_left = n_ops;
236 u32xN dummy_mask = { };
238 vnet_crypto_key_index_t key_index[N];
246 } r = { }, k[15] = { };
249 for (i = 0; i < N; i++)
253 for (i = 0; i < N; i++)
258 /* no more work to enqueue, so we are enqueueing dummy buffer */
259 src[i] = dst[i] = dummy;
260 len[i] = sizeof (dummy);
265 if (ops[0]->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
267 r.x1[i] = ptd->cbc_iv[i];
268 aes_block_store (ops[0]->iv, r.x1[i]);
269 ptd->cbc_iv[i] = aes_enc_round (r.x1[i], r.x1[i]);
272 r.x1[i] = aes_block_load (ops[0]->iv);
274 src[i] = ops[0]->src;
275 dst[i] = ops[0]->dst;
276 len[i] = ops[0]->len;
278 if (key_index[i] != ops[0]->key_index)
280 aes_cbc_key_data_t *kd;
281 key_index[i] = ops[0]->key_index;
282 kd = (aes_cbc_key_data_t *) cm->key_data[key_index[i]];
283 for (j = 0; j < rounds + 1; j++)
284 k[j].x1[i] = kd->encrypt_key[j];
286 ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
292 count = u32xN_min_scalar (len);
294 ASSERT (count % 16 == 0);
296 for (i = 0; i < count; i += 16)
299 r.x4[0] = xor3_x4 (r.x4[0], aes_block_load_x4 (src, i), k[0].x4[0]);
300 r.x4[1] = xor3_x4 (r.x4[1], aes_block_load_x4 (src, i), k[0].x4[1]);
301 r.x4[2] = xor3_x4 (r.x4[2], aes_block_load_x4 (src, i), k[0].x4[2]);
302 r.x4[3] = xor3_x4 (r.x4[3], aes_block_load_x4 (src, i), k[0].x4[3]);
304 for (j = 1; j < rounds; j++)
306 r.x4[0] = _mm512_aesenc_epi128 (r.x4[0], k[j].x4[0]);
307 r.x4[1] = _mm512_aesenc_epi128 (r.x4[1], k[j].x4[1]);
308 r.x4[2] = _mm512_aesenc_epi128 (r.x4[2], k[j].x4[2]);
309 r.x4[3] = _mm512_aesenc_epi128 (r.x4[3], k[j].x4[3]);
311 r.x4[0] = _mm512_aesenclast_epi128 (r.x4[0], k[j].x4[0]);
312 r.x4[1] = _mm512_aesenclast_epi128 (r.x4[1], k[j].x4[1]);
313 r.x4[2] = _mm512_aesenclast_epi128 (r.x4[2], k[j].x4[2]);
314 r.x4[3] = _mm512_aesenclast_epi128 (r.x4[3], k[j].x4[3]);
316 aes_block_store_x4 (dst, i, r.x4[0]);
317 aes_block_store_x4 (dst + 4, i, r.x4[1]);
318 aes_block_store_x4 (dst + 8, i, r.x4[2]);
319 aes_block_store_x4 (dst + 12, i, r.x4[3]);
321 r.x1[0] = xor3 (r.x1[0], aes_block_load (src[0] + i), k[0].x1[0]);
322 r.x1[1] = xor3 (r.x1[1], aes_block_load (src[1] + i), k[0].x1[1]);
323 r.x1[2] = xor3 (r.x1[2], aes_block_load (src[2] + i), k[0].x1[2]);
324 r.x1[3] = xor3 (r.x1[3], aes_block_load (src[3] + i), k[0].x1[3]);
326 for (j = 1; j < rounds; j++)
328 r.x1[0] = aes_enc_round (r.x1[0], k[j].x1[0]);
329 r.x1[1] = aes_enc_round (r.x1[1], k[j].x1[1]);
330 r.x1[2] = aes_enc_round (r.x1[2], k[j].x1[2]);
331 r.x1[3] = aes_enc_round (r.x1[3], k[j].x1[3]);
334 r.x1[0] = aes_enc_last_round (r.x1[0], k[j].x1[0]);
335 r.x1[1] = aes_enc_last_round (r.x1[1], k[j].x1[1]);
336 r.x1[2] = aes_enc_last_round (r.x1[2], k[j].x1[2]);
337 r.x1[3] = aes_enc_last_round (r.x1[3], k[j].x1[3]);
339 aes_block_store (dst[0] + i, r.x1[0]);
340 aes_block_store (dst[1] + i, r.x1[1]);
341 aes_block_store (dst[2] + i, r.x1[2]);
342 aes_block_store (dst[3] + i, r.x1[3]);
346 for (i = 0; i < N; i++)
356 if (!u32xN_is_all_zero (len & dummy_mask))
362 static_always_inline u32
363 aesni_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
364 u32 n_ops, aes_key_size_t ks)
366 crypto_native_main_t *cm = &crypto_native_main;
367 int rounds = AES_KEY_ROUNDS (ks);
368 vnet_crypto_op_t *op = ops[0];
369 aes_cbc_key_data_t *kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
376 vaes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds);
378 aes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds);
380 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
385 kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
392 static_always_inline void *
393 aesni_cbc_key_exp (vnet_crypto_key_t * key, aes_key_size_t ks)
396 aes_cbc_key_data_t *kd;
397 kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
398 aes_key_expand (e, key->data, ks);
399 aes_key_enc_to_dec (e, d, ks);
400 for (int i = 0; i < AES_KEY_ROUNDS (ks) + 1; i++)
403 kd->decrypt_key[i] = _mm512_broadcast_i64x2 ((__m128i) d[i]);
405 kd->decrypt_key[i] = d[i];
407 kd->encrypt_key[i] = e[i];
412 #define foreach_aesni_cbc_handler_type _(128) _(192) _(256)
415 static u32 aesni_ops_dec_aes_cbc_##x \
416 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
417 { return aesni_ops_dec_aes_cbc (vm, ops, n_ops, AES_KEY_##x); } \
418 static u32 aesni_ops_enc_aes_cbc_##x \
419 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
420 { return aesni_ops_enc_aes_cbc (vm, ops, n_ops, AES_KEY_##x); } \
421 static void * aesni_cbc_key_exp_##x (vnet_crypto_key_t *key) \
422 { return aesni_cbc_key_exp (key, AES_KEY_##x); }
424 foreach_aesni_cbc_handler_type;
431 crypto_native_aes_cbc_init_vaes (vlib_main_t * vm)
433 crypto_native_aes_cbc_init_avx512 (vlib_main_t * vm)
435 crypto_native_aes_cbc_init_avx2 (vlib_main_t * vm)
437 crypto_native_aes_cbc_init_sse42 (vlib_main_t * vm)
440 crypto_native_main_t *cm = &crypto_native_main;
441 crypto_native_per_thread_data_t *ptd;
442 clib_error_t *err = 0;
445 if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
446 return clib_error_return_unix (0, "failed to open '/dev/urandom'");
449 vec_foreach (ptd, cm->per_thread_data)
451 for (int i = 0; i < 4; i++)
453 if (read(fd, ptd->cbc_iv, sizeof (ptd->cbc_iv)) !=
454 sizeof (ptd->cbc_iv))
456 err = clib_error_return_unix (0, "'/dev/urandom' read failure");
464 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
465 VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \
466 aesni_ops_enc_aes_cbc_##x); \
467 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
468 VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \
469 aesni_ops_dec_aes_cbc_##x); \
470 cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_CBC] = aesni_cbc_key_exp_##x;
471 foreach_aesni_cbc_handler_type;
480 * fd.io coding-style-patch-verification: ON
483 * eval: (c-set-style "gnu")