2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
20 static_always_inline u8x16 __clib_unused
21 xor3 (u8x16 a, u8x16 b, u8x16 c)
24 return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
31 static_always_inline __m512i
32 xor3_x4 (__m512i a, __m512i b, __m512i c)
34 return _mm512_ternarylogic_epi32 (a, b, c, 0x96);
37 static_always_inline __m512i
38 aes_block_load_x4 (u8 * src[], int i)
41 r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[0] + i), 0);
42 r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[1] + i), 1);
43 r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[2] + i), 2);
44 r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[3] + i), 3);
48 static_always_inline void
49 aes_block_store_x4 (u8 * dst[], int i, __m512i r)
51 aes_block_store (dst[0] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 0));
52 aes_block_store (dst[1] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 1));
53 aes_block_store (dst[2] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 2));
54 aes_block_store (dst[3] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 3));
58 static_always_inline void __clib_unused
59 aes_cbc_dec (u8x16 * k, u8 * src, u8 * dst, u8 * iv, int count,
60 aes_key_size_t rounds)
62 u8x16 r0, r1, r2, r3, c0, c1, c2, c3, f;
65 f = aes_block_load (iv);
69 _mm_prefetch (src + 128, _MM_HINT_T0);
70 _mm_prefetch (dst + 128, _MM_HINT_T0);
72 c0 = aes_block_load (src);
73 c1 = aes_block_load (src + 16);
74 c2 = aes_block_load (src + 32);
75 c3 = aes_block_load (src + 48);
82 for (i = 1; i < rounds; i++)
84 r0 = aes_dec_round (r0, k[i]);
85 r1 = aes_dec_round (r1, k[i]);
86 r2 = aes_dec_round (r2, k[i]);
87 r3 = aes_dec_round (r3, k[i]);
90 r0 = aes_dec_last_round (r0, k[i]);
91 r1 = aes_dec_last_round (r1, k[i]);
92 r2 = aes_dec_last_round (r2, k[i]);
93 r3 = aes_dec_last_round (r3, k[i]);
95 aes_block_store (dst, r0 ^ f);
96 aes_block_store (dst + 16, r1 ^ c0);
97 aes_block_store (dst + 32, r2 ^ c1);
98 aes_block_store (dst + 48, r3 ^ c2);
109 c0 = aes_block_load (src);
111 for (i = 1; i < rounds; i++)
112 r0 = aes_dec_round (r0, k[i]);
113 r0 = aes_dec_last_round (r0, k[i]);
114 aes_block_store (dst, r0 ^ f);
123 static_always_inline void
124 vaes_cbc_dec (__m512i * k, u8 * src, u8 * dst, u8 * iv, int count,
125 aes_key_size_t rounds)
127 __m512i permute = { 6, 7, 8, 9, 10, 11, 12, 13 };
128 __m512i r0, r1, r2, r3, c0, c1, c2, c3, f = { };
130 int i, n_blocks = count >> 4;
132 f = _mm512_mask_loadu_epi64 (f, 0xc0, (__m512i *) (iv - 48));
134 while (n_blocks >= 16)
136 c0 = _mm512_loadu_si512 ((__m512i *) src);
137 c1 = _mm512_loadu_si512 ((__m512i *) (src + 64));
138 c2 = _mm512_loadu_si512 ((__m512i *) (src + 128));
139 c3 = _mm512_loadu_si512 ((__m512i *) (src + 192));
146 for (i = 1; i < rounds; i++)
148 r0 = _mm512_aesdec_epi128 (r0, k[i]);
149 r1 = _mm512_aesdec_epi128 (r1, k[i]);
150 r2 = _mm512_aesdec_epi128 (r2, k[i]);
151 r3 = _mm512_aesdec_epi128 (r3, k[i]);
154 r0 = _mm512_aesdeclast_epi128 (r0, k[i]);
155 r1 = _mm512_aesdeclast_epi128 (r1, k[i]);
156 r2 = _mm512_aesdeclast_epi128 (r2, k[i]);
157 r3 = _mm512_aesdeclast_epi128 (r3, k[i]);
159 r0 ^= _mm512_permutex2var_epi64 (f, permute, c0);
160 _mm512_storeu_si512 ((__m512i *) dst, r0);
162 r1 ^= _mm512_permutex2var_epi64 (c0, permute, c1);
163 _mm512_storeu_si512 ((__m512i *) (dst + 64), r1);
165 r2 ^= _mm512_permutex2var_epi64 (c1, permute, c2);
166 _mm512_storeu_si512 ((__m512i *) (dst + 128), r2);
168 r3 ^= _mm512_permutex2var_epi64 (c2, permute, c3);
169 _mm512_storeu_si512 ((__m512i *) (dst + 192), r3);
179 m = (1 << (n_blocks * 2)) - 1;
180 c0 = _mm512_mask_loadu_epi64 (c0, m, (__m512i *) src);
181 f = _mm512_permutex2var_epi64 (f, permute, c0);
183 for (i = 1; i < rounds; i++)
184 r0 = _mm512_aesdec_epi128 (r0, k[i]);
185 r0 = _mm512_aesdeclast_epi128 (r0, k[i]);
186 _mm512_mask_storeu_epi64 ((__m512i *) dst, m, r0 ^ f);
198 #define u32xN_min_scalar u32x16_min_scalar
199 #define u32xN_is_all_zero u32x16_is_all_zero
203 #define u32xN_min_scalar u32x4_min_scalar
204 #define u32xN_is_all_zero u32x4_is_all_zero
207 static_always_inline u32
208 aesni_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
209 u32 n_ops, aes_key_size_t ks)
211 crypto_native_main_t *cm = &crypto_native_main;
212 crypto_native_per_thread_data_t *ptd =
213 vec_elt_at_index (cm->per_thread_data, vm->thread_index);
214 int rounds = AES_KEY_ROUNDS (ks);
216 u32 i, j, count, n_left = n_ops;
217 u32xN dummy_mask = { };
219 vnet_crypto_key_index_t key_index[N];
227 } r = { }, k[15] = { };
230 for (i = 0; i < N; i++)
234 for (i = 0; i < N; i++)
239 /* no more work to enqueue, so we are enqueueing dummy buffer */
240 src[i] = dst[i] = dummy;
241 len[i] = sizeof (dummy);
246 if (ops[0]->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
248 r.x1[i] = ptd->cbc_iv[i];
249 aes_block_store (ops[0]->iv, r.x1[i]);
250 ptd->cbc_iv[i] = aes_enc_round (r.x1[i], r.x1[i]);
253 r.x1[i] = aes_block_load (ops[0]->iv);
255 src[i] = ops[0]->src;
256 dst[i] = ops[0]->dst;
257 len[i] = ops[0]->len;
259 if (key_index[i] != ops[0]->key_index)
261 aes_cbc_key_data_t *kd;
262 key_index[i] = ops[0]->key_index;
263 kd = (aes_cbc_key_data_t *) cm->key_data[key_index[i]];
264 for (j = 0; j < rounds + 1; j++)
265 k[j].x1[i] = kd->encrypt_key[j];
267 ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
273 count = u32xN_min_scalar (len);
275 ASSERT (count % 16 == 0);
277 for (i = 0; i < count; i += 16)
280 r.x4[0] = xor3_x4 (r.x4[0], aes_block_load_x4 (src, i), k[0].x4[0]);
281 r.x4[1] = xor3_x4 (r.x4[1], aes_block_load_x4 (src, i), k[0].x4[1]);
282 r.x4[2] = xor3_x4 (r.x4[2], aes_block_load_x4 (src, i), k[0].x4[2]);
283 r.x4[3] = xor3_x4 (r.x4[3], aes_block_load_x4 (src, i), k[0].x4[3]);
285 for (j = 1; j < rounds; j++)
287 r.x4[0] = _mm512_aesenc_epi128 (r.x4[0], k[j].x4[0]);
288 r.x4[1] = _mm512_aesenc_epi128 (r.x4[1], k[j].x4[1]);
289 r.x4[2] = _mm512_aesenc_epi128 (r.x4[2], k[j].x4[2]);
290 r.x4[3] = _mm512_aesenc_epi128 (r.x4[3], k[j].x4[3]);
292 r.x4[0] = _mm512_aesenclast_epi128 (r.x4[0], k[j].x4[0]);
293 r.x4[1] = _mm512_aesenclast_epi128 (r.x4[1], k[j].x4[1]);
294 r.x4[2] = _mm512_aesenclast_epi128 (r.x4[2], k[j].x4[2]);
295 r.x4[3] = _mm512_aesenclast_epi128 (r.x4[3], k[j].x4[3]);
297 aes_block_store_x4 (dst, i, r.x4[0]);
298 aes_block_store_x4 (dst + 4, i, r.x4[1]);
299 aes_block_store_x4 (dst + 8, i, r.x4[2]);
300 aes_block_store_x4 (dst + 12, i, r.x4[3]);
302 r.x1[0] = xor3 (r.x1[0], aes_block_load (src[0] + i), k[0].x1[0]);
303 r.x1[1] = xor3 (r.x1[1], aes_block_load (src[1] + i), k[0].x1[1]);
304 r.x1[2] = xor3 (r.x1[2], aes_block_load (src[2] + i), k[0].x1[2]);
305 r.x1[3] = xor3 (r.x1[3], aes_block_load (src[3] + i), k[0].x1[3]);
307 for (j = 1; j < rounds; j++)
309 r.x1[0] = aes_enc_round (r.x1[0], k[j].x1[0]);
310 r.x1[1] = aes_enc_round (r.x1[1], k[j].x1[1]);
311 r.x1[2] = aes_enc_round (r.x1[2], k[j].x1[2]);
312 r.x1[3] = aes_enc_round (r.x1[3], k[j].x1[3]);
315 r.x1[0] = aes_enc_last_round (r.x1[0], k[j].x1[0]);
316 r.x1[1] = aes_enc_last_round (r.x1[1], k[j].x1[1]);
317 r.x1[2] = aes_enc_last_round (r.x1[2], k[j].x1[2]);
318 r.x1[3] = aes_enc_last_round (r.x1[3], k[j].x1[3]);
320 aes_block_store (dst[0] + i, r.x1[0]);
321 aes_block_store (dst[1] + i, r.x1[1]);
322 aes_block_store (dst[2] + i, r.x1[2]);
323 aes_block_store (dst[3] + i, r.x1[3]);
327 for (i = 0; i < N; i++)
337 if (!u32xN_is_all_zero (len & dummy_mask))
343 static_always_inline u32
344 aesni_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
345 u32 n_ops, aes_key_size_t ks)
347 crypto_native_main_t *cm = &crypto_native_main;
348 int rounds = AES_KEY_ROUNDS (ks);
349 vnet_crypto_op_t *op = ops[0];
350 aes_cbc_key_data_t *kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
357 vaes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds);
359 aes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds);
361 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
366 kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
376 * fd.io coding-style-patch-verification: ON
379 * eval: (c-set-style "gnu")