2 *------------------------------------------------------------------
3 * Copyright (c) 2019 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <crypto_native/crypto_native.h>
22 #include <crypto_native/aes.h>
24 #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
25 #pragma GCC optimize ("O3")
30 u8x16 encrypt_key[15];
32 u8x64 decrypt_key[15];
34 u8x16 decrypt_key[15];
39 static_always_inline void __clib_unused
40 aes_cbc_dec (u8x16 * k, u8x16u * src, u8x16u * dst, u8x16u * iv, int count,
48 clib_prefetch_load (src + 8);
49 clib_prefetch_load (dst + 8);
62 for (int i = 1; i < rounds; i++)
64 r[0] = aes_dec_round (r[0], k[i]);
65 r[1] = aes_dec_round (r[1], k[i]);
66 r[2] = aes_dec_round (r[2], k[i]);
67 r[3] = aes_dec_round (r[3], k[i]);
70 r[0] = aes_dec_last_round (r[0], k[rounds]);
71 r[1] = aes_dec_last_round (r[1], k[rounds]);
72 r[2] = aes_dec_last_round (r[2], k[rounds]);
73 r[3] = aes_dec_last_round (r[3], k[rounds]);
75 for (int i = 0; i < rounds - 1; i++)
77 r[0] = vaesimcq_u8 (vaesdq_u8 (r[0], k[i]));
78 r[1] = vaesimcq_u8 (vaesdq_u8 (r[1], k[i]));
79 r[2] = vaesimcq_u8 (vaesdq_u8 (r[2], k[i]));
80 r[3] = vaesimcq_u8 (vaesdq_u8 (r[3], k[i]));
82 r[0] = vaesdq_u8 (r[0], k[rounds - 1]) ^ k[rounds];
83 r[1] = vaesdq_u8 (r[1], k[rounds - 1]) ^ k[rounds];
84 r[2] = vaesdq_u8 (r[2], k[rounds - 1]) ^ k[rounds];
85 r[3] = vaesdq_u8 (r[3], k[rounds - 1]) ^ k[rounds];
100 c[0] = r[0] = src[0];
103 for (int i = 1; i < rounds; i++)
104 r[0] = aes_dec_round (r[0], k[i]);
105 r[0] = aes_dec_last_round (r[0], k[rounds]);
107 c[0] = r[0] = src[0];
108 for (int i = 0; i < rounds - 1; i++)
109 r[0] = vaesimcq_u8 (vaesdq_u8 (r[0], k[i]));
110 r[0] = vaesdq_u8 (r[0], k[rounds - 1]) ^ k[rounds];
124 static_always_inline u8x64
125 aes_block_load_x4 (u8 * src[], int i)
128 r = u8x64_insert_u8x16 (r, aes_block_load (src[0] + i), 0);
129 r = u8x64_insert_u8x16 (r, aes_block_load (src[1] + i), 1);
130 r = u8x64_insert_u8x16 (r, aes_block_load (src[2] + i), 2);
131 r = u8x64_insert_u8x16 (r, aes_block_load (src[3] + i), 3);
135 static_always_inline void
136 aes_block_store_x4 (u8 * dst[], int i, u8x64 r)
138 aes_block_store (dst[0] + i, u8x64_extract_u8x16 (r, 0));
139 aes_block_store (dst[1] + i, u8x64_extract_u8x16 (r, 1));
140 aes_block_store (dst[2] + i, u8x64_extract_u8x16 (r, 2));
141 aes_block_store (dst[3] + i, u8x64_extract_u8x16 (r, 3));
144 static_always_inline u8x64
145 aes_cbc_dec_permute (u8x64 a, u8x64 b)
147 __m512i perm = { 6, 7, 8, 9, 10, 11, 12, 13 };
148 return (u8x64) _mm512_permutex2var_epi64 ((__m512i) a, perm, (__m512i) b);
151 static_always_inline void
152 vaes_cbc_dec (u8x64 * k, u8x64u * src, u8x64u * dst, u8x16 * iv, int count,
153 aes_key_size_t rounds)
155 u8x64 f, r[4], c[4] = { };
157 int i, n_blocks = count >> 4;
159 f = (u8x64) _mm512_mask_loadu_epi64 (_mm512_setzero_si512 (), 0xc0,
160 (__m512i *) (iv - 3));
162 while (n_blocks >= 16)
174 for (i = 1; i < rounds; i++)
176 r[0] = aes_dec_round_x4 (r[0], k[i]);
177 r[1] = aes_dec_round_x4 (r[1], k[i]);
178 r[2] = aes_dec_round_x4 (r[2], k[i]);
179 r[3] = aes_dec_round_x4 (r[3], k[i]);
182 r[0] = aes_dec_last_round_x4 (r[0], k[i]);
183 r[1] = aes_dec_last_round_x4 (r[1], k[i]);
184 r[2] = aes_dec_last_round_x4 (r[2], k[i]);
185 r[3] = aes_dec_last_round_x4 (r[3], k[i]);
187 dst[0] = r[0] ^= aes_cbc_dec_permute (f, c[0]);
188 dst[1] = r[1] ^= aes_cbc_dec_permute (c[0], c[1]);
189 dst[2] = r[2] ^= aes_cbc_dec_permute (c[1], c[2]);
190 dst[4] = r[3] ^= aes_cbc_dec_permute (c[2], c[3]);
200 m = (1 << (n_blocks * 2)) - 1;
201 c[0] = (u8x64) _mm512_mask_loadu_epi64 ((__m512i) c[0], m,
203 f = aes_cbc_dec_permute (f, c[0]);
205 for (i = 1; i < rounds; i++)
206 r[0] = aes_dec_round_x4 (r[0], k[i]);
207 r[0] = aes_dec_last_round_x4 (r[0], k[i]);
208 _mm512_mask_storeu_epi64 ((__m512i *) dst, m, (__m512i) (r[0] ^ f));
221 #define u32xN_min_scalar u32x16_min_scalar
222 #define u32xN_is_all_zero u32x16_is_all_zero
223 #define u32xN_splat u32x16_splat
227 #define u32xN_min_scalar u32x4_min_scalar
228 #define u32xN_is_all_zero u32x4_is_all_zero
229 #define u32xN_splat u32x4_splat
232 static_always_inline u32
233 aes_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
234 u32 n_ops, aes_key_size_t ks)
236 crypto_native_main_t *cm = &crypto_native_main;
237 crypto_native_per_thread_data_t *ptd =
238 vec_elt_at_index (cm->per_thread_data, vm->thread_index);
239 int rounds = AES_KEY_ROUNDS (ks);
241 u32 i, j, count, n_left = n_ops;
242 u32xN dummy_mask = { };
244 vnet_crypto_key_index_t key_index[N];
248 u8x64 r[N / 4] = { };
249 u8x64 k[15][N / 4] = { };
250 u8x16 *kq, *rq = (u8x16 *) r;
253 u8x16 k[15][N] = { };
256 for (i = 0; i < N; i++)
260 for (i = 0; i < N; i++)
265 /* no more work to enqueue, so we are enqueueing dummy buffer */
266 src[i] = dst[i] = dummy;
267 len[i] = sizeof (dummy);
273 if (ops[0]->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
276 *(u8x16u *) ops[0]->iv = t;
277 ptd->cbc_iv[i] = aes_enc_round (t, t);
280 t = aes_block_load (ops[0]->iv);
287 src[i] = ops[0]->src;
288 dst[i] = ops[0]->dst;
289 len[i] = ops[0]->len;
291 if (key_index[i] != ops[0]->key_index)
293 aes_cbc_key_data_t *kd;
294 key_index[i] = ops[0]->key_index;
295 kd = (aes_cbc_key_data_t *) cm->key_data[key_index[i]];
296 for (j = 0; j < rounds + 1; j++)
300 kq[i] = kd->encrypt_key[j];
302 k[j][i] = kd->encrypt_key[j];
306 ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
312 count = u32xN_min_scalar (len);
314 ASSERT (count % 16 == 0);
316 for (i = 0; i < count; i += 16)
319 r[0] = u8x64_xor3 (r[0], aes_block_load_x4 (src, i), k[0][0]);
320 r[1] = u8x64_xor3 (r[1], aes_block_load_x4 (src, i), k[0][1]);
321 r[2] = u8x64_xor3 (r[2], aes_block_load_x4 (src, i), k[0][2]);
322 r[3] = u8x64_xor3 (r[3], aes_block_load_x4 (src, i), k[0][3]);
324 for (j = 1; j < rounds; j++)
326 r[0] = aes_enc_round_x4 (r[0], k[j][0]);
327 r[1] = aes_enc_round_x4 (r[1], k[j][1]);
328 r[2] = aes_enc_round_x4 (r[2], k[j][2]);
329 r[3] = aes_enc_round_x4 (r[3], k[j][3]);
331 r[0] = aes_enc_last_round_x4 (r[0], k[j][0]);
332 r[1] = aes_enc_last_round_x4 (r[1], k[j][1]);
333 r[2] = aes_enc_last_round_x4 (r[2], k[j][2]);
334 r[3] = aes_enc_last_round_x4 (r[3], k[j][3]);
336 aes_block_store_x4 (dst, i, r[0]);
337 aes_block_store_x4 (dst + 4, i, r[1]);
338 aes_block_store_x4 (dst + 8, i, r[2]);
339 aes_block_store_x4 (dst + 12, i, r[3]);
342 r[0] = u8x16_xor3 (r[0], aes_block_load (src[0] + i), k[0][0]);
343 r[1] = u8x16_xor3 (r[1], aes_block_load (src[1] + i), k[0][1]);
344 r[2] = u8x16_xor3 (r[2], aes_block_load (src[2] + i), k[0][2]);
345 r[3] = u8x16_xor3 (r[3], aes_block_load (src[3] + i), k[0][3]);
347 for (j = 1; j < rounds; j++)
349 r[0] = aes_enc_round (r[0], k[j][0]);
350 r[1] = aes_enc_round (r[1], k[j][1]);
351 r[2] = aes_enc_round (r[2], k[j][2]);
352 r[3] = aes_enc_round (r[3], k[j][3]);
355 r[0] = aes_enc_last_round (r[0], k[j][0]);
356 r[1] = aes_enc_last_round (r[1], k[j][1]);
357 r[2] = aes_enc_last_round (r[2], k[j][2]);
358 r[3] = aes_enc_last_round (r[3], k[j][3]);
360 aes_block_store (dst[0] + i, r[0]);
361 aes_block_store (dst[1] + i, r[1]);
362 aes_block_store (dst[2] + i, r[2]);
363 aes_block_store (dst[3] + i, r[3]);
365 r[0] ^= aes_block_load (src[0] + i);
366 r[1] ^= aes_block_load (src[1] + i);
367 r[2] ^= aes_block_load (src[2] + i);
368 r[3] ^= aes_block_load (src[3] + i);
369 for (j = 0; j < rounds - 1; j++)
371 r[0] = vaesmcq_u8 (vaeseq_u8 (r[0], k[j][0]));
372 r[1] = vaesmcq_u8 (vaeseq_u8 (r[1], k[j][1]));
373 r[2] = vaesmcq_u8 (vaeseq_u8 (r[2], k[j][2]));
374 r[3] = vaesmcq_u8 (vaeseq_u8 (r[3], k[j][3]));
376 r[0] = vaeseq_u8 (r[0], k[j][0]) ^ k[rounds][0];
377 r[1] = vaeseq_u8 (r[1], k[j][1]) ^ k[rounds][1];
378 r[2] = vaeseq_u8 (r[2], k[j][2]) ^ k[rounds][2];
379 r[3] = vaeseq_u8 (r[3], k[j][3]) ^ k[rounds][3];
380 aes_block_store (dst[0] + i, r[0]);
381 aes_block_store (dst[1] + i, r[1]);
382 aes_block_store (dst[2] + i, r[2]);
383 aes_block_store (dst[3] + i, r[3]);
388 len -= u32xN_splat (count);
390 for (i = 0; i < N; i++)
399 if (!u32xN_is_all_zero (len & dummy_mask))
406 static_always_inline u32
407 aes_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
408 u32 n_ops, aes_key_size_t ks)
410 crypto_native_main_t *cm = &crypto_native_main;
411 int rounds = AES_KEY_ROUNDS (ks);
412 vnet_crypto_op_t *op = ops[0];
413 aes_cbc_key_data_t *kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
420 vaes_cbc_dec (kd->decrypt_key, (u8x64u *) op->src, (u8x64u *) op->dst,
421 (u8x16u *) op->iv, op->len, rounds);
423 aes_cbc_dec (kd->decrypt_key, (u8x16u *) op->src, (u8x16u *) op->dst,
424 (u8x16u *) op->iv, op->len, rounds);
426 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
431 kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
438 static_always_inline void *
439 aes_cbc_key_exp (vnet_crypto_key_t * key, aes_key_size_t ks)
442 aes_cbc_key_data_t *kd;
443 kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
444 aes_key_expand (e, key->data, ks);
445 aes_key_enc_to_dec (e, d, ks);
446 for (int i = 0; i < AES_KEY_ROUNDS (ks) + 1; i++)
449 kd->decrypt_key[i] = (u8x64) _mm512_broadcast_i64x2 ((__m128i) d[i]);
451 kd->decrypt_key[i] = d[i];
453 kd->encrypt_key[i] = e[i];
458 #define foreach_aes_cbc_handler_type _(128) _(192) _(256)
461 static u32 aes_ops_dec_aes_cbc_##x \
462 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
463 { return aes_ops_dec_aes_cbc (vm, ops, n_ops, AES_KEY_##x); } \
464 static u32 aes_ops_enc_aes_cbc_##x \
465 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
466 { return aes_ops_enc_aes_cbc (vm, ops, n_ops, AES_KEY_##x); } \
467 static void * aes_cbc_key_exp_##x (vnet_crypto_key_t *key) \
468 { return aes_cbc_key_exp (key, AES_KEY_##x); }
470 foreach_aes_cbc_handler_type;
477 crypto_native_aes_cbc_init_vaes (vlib_main_t * vm)
479 crypto_native_aes_cbc_init_avx512 (vlib_main_t * vm)
481 crypto_native_aes_cbc_init_neon (vlib_main_t * vm)
483 crypto_native_aes_cbc_init_avx2 (vlib_main_t * vm)
485 crypto_native_aes_cbc_init_sse42 (vlib_main_t * vm)
488 crypto_native_main_t *cm = &crypto_native_main;
489 crypto_native_per_thread_data_t *ptd;
490 clib_error_t *err = 0;
493 if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
494 return clib_error_return_unix (0, "failed to open '/dev/urandom'");
497 vec_foreach (ptd, cm->per_thread_data)
499 for (int i = 0; i < 4; i++)
501 if (read(fd, ptd->cbc_iv, sizeof (ptd->cbc_iv)) !=
502 sizeof (ptd->cbc_iv))
504 err = clib_error_return_unix (0, "'/dev/urandom' read failure");
512 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
513 VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \
514 aes_ops_enc_aes_cbc_##x); \
515 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
516 VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \
517 aes_ops_dec_aes_cbc_##x); \
518 cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_CBC] = aes_cbc_key_exp_##x;
519 foreach_aes_cbc_handler_type;
528 * fd.io coding-style-patch-verification: ON
531 * eval: (c-set-style "gnu")