02e134567048b3855923d610d77766f8c943191e
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
1 /*
2  * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3  *
4  * Copyright (c) 2019 Cisco Systemss
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <fcntl.h>
19
20 #include <intel-ipsec-mb.h>
21
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
27
28 #define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE
29 #define EXPANDED_KEY_N_BYTES (16 * 15)
30
31 typedef struct
32 {
33   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
34   __m128i cbc_iv;
35   MB_MGR *mgr;
36 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
37   JOB_AES_HMAC burst_jobs[IMB_MAX_BURST_SIZE];
38 #endif
39 } ipsecmb_per_thread_data_t;
40
41 typedef struct
42 {
43   u16 data_size;
44   u8 block_size;
45   aes_gcm_pre_t aes_gcm_pre;
46   keyexp_t keyexp;
47   hash_one_block_t hash_one_block;
48   hash_fn_t hash_fn;
49 } ipsecmb_alg_data_t;
50
51 typedef struct ipsecmb_main_t_
52 {
53   ipsecmb_per_thread_data_t *per_thread_data;
54   ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
55   void **key_data;
56 } ipsecmb_main_t;
57
58 typedef struct
59 {
60   u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
61   u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
62 } ipsecmb_aes_key_data_t;
63
64 static ipsecmb_main_t ipsecmb_main = { };
65
66 /*
67  * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
68  */
69 #define foreach_ipsecmb_hmac_op                                \
70   _(SHA1,   SHA1,    sha1,   64,  20, 20)                      \
71   _(SHA224, SHA_224, sha224, 64,  32, 28)                      \
72   _(SHA256, SHA_256, sha256, 64,  32, 32)                      \
73   _(SHA384, SHA_384, sha384, 128, 64, 48)                      \
74   _(SHA512, SHA_512, sha512, 128, 64, 64)
75
76 /*
77  * (Alg, key-len-bits, JOB_CIPHER_MODE)
78  */
79 #define foreach_ipsecmb_cipher_op                                             \
80   _ (AES_128_CBC, 128, CBC)                                                   \
81   _ (AES_192_CBC, 192, CBC)                                                   \
82   _ (AES_256_CBC, 256, CBC)                                                   \
83   _ (AES_128_CTR, 128, CNTR)                                                  \
84   _ (AES_192_CTR, 192, CNTR)                                                  \
85   _ (AES_256_CTR, 256, CNTR)
86
87 /*
88  * (Alg, key-len-bytes, iv-len-bytes)
89  */
90 #define foreach_ipsecmb_gcm_cipher_op                          \
91   _(AES_128_GCM, 128)                                          \
92   _(AES_192_GCM, 192)                                          \
93   _(AES_256_GCM, 256)
94
95 static_always_inline vnet_crypto_op_status_t
96 ipsecmb_status_job (JOB_STS status)
97 {
98   switch (status)
99     {
100     case STS_COMPLETED:
101       return VNET_CRYPTO_OP_STATUS_COMPLETED;
102     case STS_BEING_PROCESSED:
103     case STS_COMPLETED_AES:
104     case STS_COMPLETED_HMAC:
105       return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS;
106     case STS_INVALID_ARGS:
107     case STS_INTERNAL_ERROR:
108     case STS_ERROR:
109       return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
110     }
111   ASSERT (0);
112   return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
113 }
114
115 always_inline void
116 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
117 {
118   vnet_crypto_op_t *op = job->user_data;
119   u32 len = op->digest_len ? op->digest_len : digest_size;
120
121   if (PREDICT_FALSE (STS_COMPLETED != job->status))
122     {
123       op->status = ipsecmb_status_job (job->status);
124       *n_fail = *n_fail + 1;
125       return;
126     }
127
128   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
129     {
130       if ((memcmp (op->digest, job->auth_tag_output, len)))
131         {
132           *n_fail = *n_fail + 1;
133           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
134           return;
135         }
136     }
137   else if (len == digest_size)
138     clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
139   else
140     clib_memcpy_fast (op->digest, job->auth_tag_output, len);
141
142   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
143 }
144
145 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
146 static_always_inline u32
147 ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
148                          u32 block_size, u32 hash_size, u32 digest_size,
149                          JOB_HASH_ALG alg)
150 {
151   ipsecmb_main_t *imbm = &ipsecmb_main;
152   ipsecmb_per_thread_data_t *ptd =
153     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
154   JOB_AES_HMAC *job;
155   u32 i, n_fail = 0, ops_index = 0;
156   u8 scratch[n_ops][digest_size];
157   const u32 burst_sz =
158     (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
159
160   while (n_ops)
161     {
162       const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
163       /*
164        * configure all the jobs first ...
165        */
166       for (i = 0; i < n; i++, ops_index++)
167         {
168           vnet_crypto_op_t *op = ops[ops_index];
169           const u8 *kd = (u8 *) imbm->key_data[op->key_index];
170
171           job = &ptd->burst_jobs[i];
172
173           job->src = op->src;
174           job->hash_start_src_offset_in_bytes = 0;
175           job->msg_len_to_hash_in_bytes = op->len;
176           job->auth_tag_output_len_in_bytes = digest_size;
177           job->auth_tag_output = scratch[ops_index];
178
179           job->u.HMAC._hashed_auth_key_xor_ipad = kd;
180           job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
181           job->user_data = op;
182         }
183
184       /*
185        * submit all jobs to be processed and retire completed jobs
186        */
187       IMB_SUBMIT_HASH_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, alg);
188
189       for (i = 0; i < n; i++)
190         {
191           job = &ptd->burst_jobs[i];
192           ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
193         }
194
195       n_ops -= n;
196     }
197
198   return ops_index - n_fail;
199 }
200 #else
201 static_always_inline u32
202 ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
203                          u32 block_size, u32 hash_size, u32 digest_size,
204                          JOB_HASH_ALG alg)
205 {
206   ipsecmb_main_t *imbm = &ipsecmb_main;
207   ipsecmb_per_thread_data_t *ptd =
208     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
209   JOB_AES_HMAC *job;
210   u32 i, n_fail = 0;
211   u8 scratch[n_ops][digest_size];
212
213   /*
214    * queue all the jobs first ...
215    */
216   for (i = 0; i < n_ops; i++)
217     {
218       vnet_crypto_op_t *op = ops[i];
219       u8 *kd = (u8 *) imbm->key_data[op->key_index];
220
221       job = IMB_GET_NEXT_JOB (ptd->mgr);
222
223       job->src = op->src;
224       job->hash_start_src_offset_in_bytes = 0;
225       job->msg_len_to_hash_in_bytes = op->len;
226       job->hash_alg = alg;
227       job->auth_tag_output_len_in_bytes = digest_size;
228       job->auth_tag_output = scratch[i];
229
230       job->cipher_mode = NULL_CIPHER;
231       job->cipher_direction = DECRYPT;
232       job->chain_order = HASH_CIPHER;
233
234       job->u.HMAC._hashed_auth_key_xor_ipad = kd;
235       job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
236       job->user_data = op;
237
238       job = IMB_SUBMIT_JOB (ptd->mgr);
239
240       if (job)
241         ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
242     }
243
244   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
245     ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
246
247   return n_ops - n_fail;
248 }
249 #endif
250
251 #define _(a, b, c, d, e, f)                                             \
252 static_always_inline u32                                                \
253 ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
254                       vnet_crypto_op_t * ops[],                         \
255                       u32 n_ops)                                        \
256 { return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); }        \
257
258 foreach_ipsecmb_hmac_op;
259 #undef _
260
261 always_inline void
262 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
263 {
264   vnet_crypto_op_t *op = job->user_data;
265
266   if (PREDICT_FALSE (STS_COMPLETED != job->status))
267     {
268       op->status = ipsecmb_status_job (job->status);
269       *n_fail = *n_fail + 1;
270     }
271   else
272     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
273 }
274
275 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
276 static_always_inline u32
277 ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
278                                u32 n_ops, u32 key_len,
279                                JOB_CIPHER_DIRECTION direction,
280                                JOB_CIPHER_MODE cipher_mode)
281 {
282   ipsecmb_main_t *imbm = &ipsecmb_main;
283   ipsecmb_per_thread_data_t *ptd =
284     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
285   JOB_AES_HMAC *job;
286   u32 i, n_fail = 0, ops_index = 0;
287   const u32 burst_sz =
288     (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
289
290   while (n_ops)
291     {
292       const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
293
294       for (i = 0; i < n; i++)
295         {
296           ipsecmb_aes_key_data_t *kd;
297           vnet_crypto_op_t *op = ops[ops_index++];
298           kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
299
300           job = &ptd->burst_jobs[i];
301
302           job->src = op->src;
303           job->dst = op->dst;
304           job->msg_len_to_cipher_in_bytes = op->len;
305           job->cipher_start_src_offset_in_bytes = 0;
306
307           job->hash_alg = NULL_HASH;
308
309           if ((direction == ENCRYPT) &&
310               (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
311             {
312               const __m128i iv = ptd->cbc_iv;
313               _mm_storeu_si128 ((__m128i *) op->iv, iv);
314               ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
315             }
316
317           job->aes_enc_key_expanded = kd->enc_key_exp;
318           job->aes_dec_key_expanded = kd->dec_key_exp;
319           job->iv = op->iv;
320           job->iv_len_in_bytes = AES_BLOCK_SIZE;
321
322           job->user_data = op;
323         }
324
325       IMB_SUBMIT_CIPHER_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n,
326                                        cipher_mode, direction, key_len / 8);
327       for (i = 0; i < n; i++)
328         {
329           job = &ptd->burst_jobs[i];
330           ipsecmb_retire_cipher_job (job, &n_fail);
331         }
332
333       n_ops -= n;
334     }
335
336   return ops_index - n_fail;
337 }
338 #else
339 static_always_inline u32
340 ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
341                                u32 n_ops, u32 key_len,
342                                JOB_CIPHER_DIRECTION direction,
343                                JOB_CIPHER_MODE cipher_mode)
344 {
345   ipsecmb_main_t *imbm = &ipsecmb_main;
346   ipsecmb_per_thread_data_t *ptd =
347     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
348   JOB_AES_HMAC *job;
349   u32 i, n_fail = 0;
350
351   for (i = 0; i < n_ops; i++)
352     {
353       ipsecmb_aes_key_data_t *kd;
354       vnet_crypto_op_t *op = ops[i];
355       kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
356       __m128i iv;
357
358       job = IMB_GET_NEXT_JOB (ptd->mgr);
359
360       job->src = op->src;
361       job->dst = op->dst;
362       job->msg_len_to_cipher_in_bytes = op->len;
363       job->cipher_start_src_offset_in_bytes = 0;
364
365       job->hash_alg = NULL_HASH;
366       job->cipher_mode = cipher_mode;
367       job->cipher_direction = direction;
368       job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
369
370       if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
371         {
372           iv = ptd->cbc_iv;
373           _mm_storeu_si128 ((__m128i *) op->iv, iv);
374           ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
375         }
376
377       job->aes_key_len_in_bytes = key_len / 8;
378       job->aes_enc_key_expanded = kd->enc_key_exp;
379       job->aes_dec_key_expanded = kd->dec_key_exp;
380       job->iv = op->iv;
381       job->iv_len_in_bytes = AES_BLOCK_SIZE;
382
383       job->user_data = op;
384
385       job = IMB_SUBMIT_JOB (ptd->mgr);
386
387       if (job)
388         ipsecmb_retire_cipher_job (job, &n_fail);
389     }
390
391   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
392     ipsecmb_retire_cipher_job (job, &n_fail);
393
394   return n_ops - n_fail;
395 }
396 #endif
397
398 #define _(a, b, c)                                                            \
399   static_always_inline u32 ipsecmb_ops_cipher_enc_##a (                       \
400     vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
401   {                                                                           \
402     return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, ENCRYPT, c);     \
403   }                                                                           \
404                                                                               \
405   static_always_inline u32 ipsecmb_ops_cipher_dec_##a (                       \
406     vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
407   {                                                                           \
408     return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, DECRYPT, c);     \
409   }
410
411 foreach_ipsecmb_cipher_op;
412 #undef _
413
414 #define _(a, b)                                                              \
415 static_always_inline u32                                                     \
416 ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm,                  \
417     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
418 {                                                                            \
419   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
420   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
421                                                      vm->thread_index);      \
422   MB_MGR *m = ptd->mgr;                                                      \
423   vnet_crypto_op_chunk_t *chp;                                               \
424   u32 i, j;                                                                  \
425                                                                              \
426   for (i = 0; i < n_ops; i++)                                                \
427     {                                                                        \
428       struct gcm_key_data *kd;                                               \
429       struct gcm_context_data ctx;                                           \
430       vnet_crypto_op_t *op = ops[i];                                         \
431                                                                              \
432       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
433       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
434       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
435       chp = chunks + op->chunk_index;                                        \
436       for (j = 0; j < op->n_chunks; j++)                                     \
437         {                                                                    \
438           IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
439                                        chp->len);                            \
440           chp += 1;                                                          \
441         }                                                                    \
442       IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len);      \
443                                                                              \
444       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
445     }                                                                        \
446                                                                              \
447   return n_ops;                                                              \
448 }                                                                            \
449                                                                              \
450 static_always_inline u32                                                     \
451 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
452                                 u32 n_ops)                                   \
453 {                                                                            \
454   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
455   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
456                                                      vm->thread_index);      \
457   MB_MGR *m = ptd->mgr;                                                      \
458   u32 i;                                                                     \
459                                                                              \
460   for (i = 0; i < n_ops; i++)                                                \
461     {                                                                        \
462       struct gcm_key_data *kd;                                               \
463       struct gcm_context_data ctx;                                           \
464       vnet_crypto_op_t *op = ops[i];                                         \
465                                                                              \
466       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
467       IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
468                             op->aad, op->aad_len, op->tag, op->tag_len);     \
469                                                                              \
470       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
471     }                                                                        \
472                                                                              \
473   return n_ops;                                                              \
474 }                                                                            \
475                                                                              \
476 static_always_inline u32                                                     \
477 ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm,                  \
478     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
479 {                                                                            \
480   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
481   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
482                                                      vm->thread_index);      \
483   MB_MGR *m = ptd->mgr;                                                      \
484   vnet_crypto_op_chunk_t *chp;                                               \
485   u32 i, j, n_failed = 0;                                                    \
486                                                                              \
487   for (i = 0; i < n_ops; i++)                                                \
488     {                                                                        \
489       struct gcm_key_data *kd;                                               \
490       struct gcm_context_data ctx;                                           \
491       vnet_crypto_op_t *op = ops[i];                                         \
492       u8 scratch[64];                                                        \
493                                                                              \
494       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
495       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
496       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
497       chp = chunks + op->chunk_index;                                        \
498       for (j = 0; j < op->n_chunks; j++)                                     \
499         {                                                                    \
500           IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
501                                        chp->len);                            \
502           chp += 1;                                                          \
503         }                                                                    \
504       IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len);      \
505                                                                              \
506       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
507         {                                                                    \
508           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
509           n_failed++;                                                        \
510         }                                                                    \
511       else                                                                   \
512         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
513     }                                                                        \
514                                                                              \
515   return n_ops - n_failed;                                                   \
516 }                                                                            \
517                                                                              \
518 static_always_inline u32                                                     \
519 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
520                                  u32 n_ops)                                  \
521 {                                                                            \
522   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
523   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
524                                                      vm->thread_index);      \
525   MB_MGR *m = ptd->mgr;                                                      \
526   u32 i, n_failed = 0;                                                       \
527                                                                              \
528   for (i = 0; i < n_ops; i++)                                                \
529     {                                                                        \
530       struct gcm_key_data *kd;                                               \
531       struct gcm_context_data ctx;                                           \
532       vnet_crypto_op_t *op = ops[i];                                         \
533       u8 scratch[64];                                                        \
534                                                                              \
535       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
536       IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
537                             op->aad, op->aad_len, scratch, op->tag_len);     \
538                                                                              \
539       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
540         {                                                                    \
541           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
542           n_failed++;                                                        \
543         }                                                                    \
544       else                                                                   \
545         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
546     }                                                                        \
547                                                                              \
548   return n_ops - n_failed;                                                   \
549 }
550
551 foreach_ipsecmb_gcm_cipher_op;
552 #undef _
553
554 #ifdef HAVE_IPSECMB_CHACHA_POLY
555 always_inline void
556 ipsecmb_retire_aead_job (JOB_AES_HMAC *job, u32 *n_fail)
557 {
558   vnet_crypto_op_t *op = job->user_data;
559   u32 len = op->tag_len;
560
561   if (PREDICT_FALSE (STS_COMPLETED != job->status))
562     {
563       op->status = ipsecmb_status_job (job->status);
564       *n_fail = *n_fail + 1;
565       return;
566     }
567
568   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
569     {
570       if (memcmp (op->tag, job->auth_tag_output, len))
571         {
572           *n_fail = *n_fail + 1;
573           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
574           return;
575         }
576     }
577
578   clib_memcpy_fast (op->tag, job->auth_tag_output, len);
579
580   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
581 }
582
583 static_always_inline u32
584 ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
585                          IMB_CIPHER_DIRECTION dir)
586 {
587   ipsecmb_main_t *imbm = &ipsecmb_main;
588   ipsecmb_per_thread_data_t *ptd =
589     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
590   struct IMB_JOB *job;
591   MB_MGR *m = ptd->mgr;
592   u32 i, n_fail = 0, last_key_index = ~0;
593   u8 scratch[VLIB_FRAME_SIZE][16];
594   u8 iv_data[16];
595   u8 *key = 0;
596
597   for (i = 0; i < n_ops; i++)
598     {
599       vnet_crypto_op_t *op = ops[i];
600       __m128i iv;
601
602       job = IMB_GET_NEXT_JOB (m);
603       if (last_key_index != op->key_index)
604         {
605           vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
606
607           key = kd->data;
608           last_key_index = op->key_index;
609         }
610
611       job->cipher_direction = dir;
612       job->chain_order = IMB_ORDER_HASH_CIPHER;
613       job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
614       job->hash_alg = IMB_AUTH_CHACHA20_POLY1305;
615       job->enc_keys = job->dec_keys = key;
616       job->key_len_in_bytes = 32;
617
618       job->u.CHACHA20_POLY1305.aad = op->aad;
619       job->u.CHACHA20_POLY1305.aad_len_in_bytes = op->aad_len;
620       job->src = op->src;
621       job->dst = op->dst;
622
623       if ((dir == IMB_DIR_ENCRYPT) &&
624           (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
625         {
626           iv = ptd->cbc_iv;
627           _mm_storeu_si128 ((__m128i *) iv_data, iv);
628           clib_memcpy_fast (op->iv, iv_data, 12);
629           ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
630         }
631
632       job->iv = op->iv;
633       job->iv_len_in_bytes = 12;
634       job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes =
635         op->len;
636       job->cipher_start_src_offset_in_bytes =
637         job->hash_start_src_offset_in_bytes = 0;
638
639       job->auth_tag_output = scratch[i];
640       job->auth_tag_output_len_in_bytes = 16;
641
642       job->user_data = op;
643
644       job = IMB_SUBMIT_JOB_NOCHECK (ptd->mgr);
645       if (job)
646         ipsecmb_retire_aead_job (job, &n_fail);
647
648       op++;
649     }
650
651   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
652     ipsecmb_retire_aead_job (job, &n_fail);
653
654   return n_ops - n_fail;
655 }
656
657 static_always_inline u32
658 ipsecmb_ops_chacha_poly_enc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
659                              u32 n_ops)
660 {
661   return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_ENCRYPT);
662 }
663
664 static_always_inline u32
665 ipsecmb_ops_chacha_poly_dec (vlib_main_t *vm, vnet_crypto_op_t *ops[],
666                              u32 n_ops)
667 {
668   return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_DECRYPT);
669 }
670
671 static_always_inline u32
672 ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
673                                  vnet_crypto_op_chunk_t *chunks, u32 n_ops,
674                                  IMB_CIPHER_DIRECTION dir)
675 {
676   ipsecmb_main_t *imbm = &ipsecmb_main;
677   ipsecmb_per_thread_data_t *ptd =
678     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
679   MB_MGR *m = ptd->mgr;
680   u32 i, n_fail = 0, last_key_index = ~0;
681   u8 iv_data[16];
682   u8 *key = 0;
683
684   if (dir == IMB_DIR_ENCRYPT)
685     {
686       for (i = 0; i < n_ops; i++)
687         {
688           vnet_crypto_op_t *op = ops[i];
689           struct chacha20_poly1305_context_data ctx;
690           vnet_crypto_op_chunk_t *chp;
691           __m128i iv;
692           u32 j;
693
694           ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
695
696           if (last_key_index != op->key_index)
697             {
698               vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
699
700               key = kd->data;
701               last_key_index = op->key_index;
702             }
703
704           if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
705             {
706               iv = ptd->cbc_iv;
707               _mm_storeu_si128 ((__m128i *) iv_data, iv);
708               clib_memcpy_fast (op->iv, iv_data, 12);
709               ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
710             }
711
712           IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
713                                       op->aad_len);
714
715           chp = chunks + op->chunk_index;
716           for (j = 0; j < op->n_chunks; j++)
717             {
718               IMB_CHACHA20_POLY1305_ENC_UPDATE (m, key, &ctx, chp->dst,
719                                                 chp->src, chp->len);
720               chp += 1;
721             }
722
723           IMB_CHACHA20_POLY1305_ENC_FINALIZE (m, &ctx, op->tag, op->tag_len);
724
725           op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
726         }
727     }
728   else /* dir == IMB_DIR_DECRYPT */
729     {
730       for (i = 0; i < n_ops; i++)
731         {
732           vnet_crypto_op_t *op = ops[i];
733           struct chacha20_poly1305_context_data ctx;
734           vnet_crypto_op_chunk_t *chp;
735           u8 scratch[16];
736           u32 j;
737
738           ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
739
740           if (last_key_index != op->key_index)
741             {
742               vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
743
744               key = kd->data;
745               last_key_index = op->key_index;
746             }
747
748           IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
749                                       op->aad_len);
750
751           chp = chunks + op->chunk_index;
752           for (j = 0; j < op->n_chunks; j++)
753             {
754               IMB_CHACHA20_POLY1305_DEC_UPDATE (m, key, &ctx, chp->dst,
755                                                 chp->src, chp->len);
756               chp += 1;
757             }
758
759           IMB_CHACHA20_POLY1305_DEC_FINALIZE (m, &ctx, scratch, op->tag_len);
760
761           if (memcmp (op->tag, scratch, op->tag_len))
762             {
763               n_fail = n_fail + 1;
764               op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
765             }
766           else
767             op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
768         }
769     }
770
771   return n_ops - n_fail;
772 }
773
774 static_always_inline u32
775 ipsec_mb_ops_chacha_poly_enc_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
776                                       vnet_crypto_op_chunk_t *chunks,
777                                       u32 n_ops)
778 {
779   return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
780                                           IMB_DIR_ENCRYPT);
781 }
782
783 static_always_inline u32
784 ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
785                                       vnet_crypto_op_chunk_t *chunks,
786                                       u32 n_ops)
787 {
788   return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
789                                           IMB_DIR_DECRYPT);
790 }
791 #endif
792
793 clib_error_t *
794 crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
795 {
796   ipsecmb_per_thread_data_t *ptd;
797   clib_error_t *err = 0;
798   int fd;
799
800   if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
801     return clib_error_return_unix (0, "failed to open '/dev/urandom'");
802
803   vec_foreach (ptd, imbm->per_thread_data)
804   {
805     if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
806       {
807         err = clib_error_return_unix (0, "'/dev/urandom' read failure");
808         close (fd);
809         return (err);
810       }
811   }
812
813   close (fd);
814   return (NULL);
815 }
816
817 static void
818 crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
819                             vnet_crypto_key_index_t idx)
820 {
821   ipsecmb_main_t *imbm = &ipsecmb_main;
822   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
823   ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
824   u32 i;
825   void *kd;
826
827   /** TODO: add linked alg support **/
828   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
829     return;
830
831   if (kop == VNET_CRYPTO_KEY_OP_DEL)
832     {
833       if (idx >= vec_len (imbm->key_data))
834         return;
835
836       if (imbm->key_data[idx] == 0)
837         return;
838
839       clib_mem_free_s (imbm->key_data[idx]);
840       imbm->key_data[idx] = 0;
841       return;
842     }
843
844   if (ad->data_size == 0)
845     return;
846
847   vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);
848
849   if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
850     {
851       clib_mem_free_s (imbm->key_data[idx]);
852     }
853
854   kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
855                                                      CLIB_CACHE_LINE_BYTES);
856
857   /* AES CBC key expansion */
858   if (ad->keyexp)
859     {
860       ad->keyexp (key->data, ((ipsecmb_aes_key_data_t *) kd)->enc_key_exp,
861                   ((ipsecmb_aes_key_data_t *) kd)->dec_key_exp);
862       return;
863     }
864
865   /* AES GCM */
866   if (ad->aes_gcm_pre)
867     {
868       ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
869       return;
870     }
871
872   /* HMAC */
873   if (ad->hash_one_block)
874     {
875       const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
876       u64 pad[block_qw], key_hash[block_qw];
877
878       clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
879       if (vec_len (key->data) <= ad->block_size)
880         clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
881       else
882         ad->hash_fn (key->data, vec_len (key->data), key_hash);
883
884       for (i = 0; i < block_qw; i++)
885         pad[i] = key_hash[i] ^ 0x3636363636363636;
886       ad->hash_one_block (pad, kd);
887
888       for (i = 0; i < block_qw; i++)
889         pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
890       ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));
891
892       return;
893     }
894 }
895
896 static clib_error_t *
897 crypto_ipsecmb_init (vlib_main_t * vm)
898 {
899   ipsecmb_main_t *imbm = &ipsecmb_main;
900   ipsecmb_alg_data_t *ad;
901   ipsecmb_per_thread_data_t *ptd;
902   vlib_thread_main_t *tm = vlib_get_thread_main ();
903   clib_error_t *error;
904   MB_MGR *m = 0;
905   u32 eidx;
906   u8 *name;
907
908   if (!clib_cpu_supports_aes ())
909     return 0;
910
911   /*
912    * A priority that is better than OpenSSL but worse than VPP natvie
913    */
914   name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
915                  IMB_VERSION_STR, 0);
916   eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
917
918   vec_validate_aligned (imbm->per_thread_data, tm->n_vlib_mains - 1,
919                         CLIB_CACHE_LINE_BYTES);
920
921   /* *INDENT-OFF* */
922   vec_foreach (ptd, imbm->per_thread_data)
923     {
924         ptd->mgr = alloc_mb_mgr (0);
925 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
926         clib_memset_u8 (ptd->burst_jobs, 0,
927                         sizeof (JOB_AES_HMAC) * IMB_MAX_BURST_SIZE);
928 #endif
929         if (clib_cpu_supports_avx512f ())
930           init_mb_mgr_avx512 (ptd->mgr);
931         else if (clib_cpu_supports_avx2 () && clib_cpu_supports_bmi2 ())
932           init_mb_mgr_avx2 (ptd->mgr);
933         else
934           init_mb_mgr_sse (ptd->mgr);
935
936         if (ptd == imbm->per_thread_data)
937           m = ptd->mgr;
938     }
939   /* *INDENT-ON* */
940
941   if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
942     return (error);
943
944 #define _(a, b, c, d, e, f)                                              \
945   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
946                                     ipsecmb_ops_hmac_##a);               \
947   ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a;                        \
948   ad->block_size = d;                                                    \
949   ad->data_size = e * 2;                                                 \
950   ad->hash_one_block = m-> c##_one_block;                                \
951   ad->hash_fn = m-> c;                                                   \
952
953   foreach_ipsecmb_hmac_op;
954 #undef _
955 #define _(a, b, c)                                                            \
956   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,       \
957                                     ipsecmb_ops_cipher_enc_##a);              \
958   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,       \
959                                     ipsecmb_ops_cipher_dec_##a);              \
960   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                                  \
961   ad->data_size = sizeof (ipsecmb_aes_key_data_t);                            \
962   ad->keyexp = m->keyexp_##b;
963
964   foreach_ipsecmb_cipher_op;
965 #undef _
966 #define _(a, b)                                                         \
967   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
968                                     ipsecmb_ops_gcm_cipher_enc_##a);    \
969   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
970                                     ipsecmb_ops_gcm_cipher_dec_##a);    \
971   vnet_crypto_register_chained_ops_handler                              \
972       (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,                              \
973        ipsecmb_ops_gcm_cipher_enc_##a##_chained);                       \
974   vnet_crypto_register_chained_ops_handler                              \
975       (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,                              \
976        ipsecmb_ops_gcm_cipher_dec_##a##_chained);                       \
977   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                            \
978   ad->data_size = sizeof (struct gcm_key_data);                         \
979   ad->aes_gcm_pre = m->gcm##b##_pre;                                    \
980
981   foreach_ipsecmb_gcm_cipher_op;
982 #undef _
983
984 #ifdef HAVE_IPSECMB_CHACHA_POLY
985   vnet_crypto_register_ops_handler (vm, eidx,
986                                     VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
987                                     ipsecmb_ops_chacha_poly_enc);
988   vnet_crypto_register_ops_handler (vm, eidx,
989                                     VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
990                                     ipsecmb_ops_chacha_poly_dec);
991   vnet_crypto_register_chained_ops_handler (
992     vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
993     ipsec_mb_ops_chacha_poly_enc_chained);
994   vnet_crypto_register_chained_ops_handler (
995     vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
996     ipsec_mb_ops_chacha_poly_dec_chained);
997   ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305;
998   ad->data_size = 0;
999 #endif
1000
1001   vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
1002   return (NULL);
1003 }
1004
1005 /* *INDENT-OFF* */
1006 VLIB_INIT_FUNCTION (crypto_ipsecmb_init) =
1007 {
1008   .runs_after = VLIB_INITS ("vnet_crypto_init"),
1009 };
1010 /* *INDENT-ON* */
1011
1012 /* *INDENT-OFF* */
1013 VLIB_PLUGIN_REGISTER () =
1014 {
1015   .version = VPP_BUILD_VER,
1016   .description = "Intel IPSEC Multi-buffer Crypto Engine",
1017 };
1018 /* *INDENT-ON* */
1019
1020 /*
1021  * fd.io coding-style-patch-verification: ON
1022  *
1023  * Local Variables:
1024  * eval: (c-set-style "gnu")
1025  * End:
1026  */