crypto-ipsecmb: bump intel-ipsec-mb version to 1.4
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
1 /*
2  * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3  *
4  * Copyright (c) 2019 Cisco Systemss
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <fcntl.h>
19
20 #include <intel-ipsec-mb.h>
21
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
27
28 #define HMAC_MAX_BLOCK_SIZE  IMB_SHA_512_BLOCK_SIZE
29 #define EXPANDED_KEY_N_BYTES (16 * 15)
30
31 typedef struct
32 {
33   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
34   IMB_MGR *mgr;
35 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
36   IMB_JOB burst_jobs[IMB_MAX_BURST_SIZE];
37 #endif
38 } ipsecmb_per_thread_data_t;
39
40 typedef struct
41 {
42   u16 data_size;
43   u8 block_size;
44   aes_gcm_pre_t aes_gcm_pre;
45   keyexp_t keyexp;
46   hash_one_block_t hash_one_block;
47   hash_fn_t hash_fn;
48 } ipsecmb_alg_data_t;
49
50 typedef struct ipsecmb_main_t_
51 {
52   ipsecmb_per_thread_data_t *per_thread_data;
53   ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
54   void **key_data;
55 } ipsecmb_main_t;
56
57 typedef struct
58 {
59   u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
60   u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
61 } ipsecmb_aes_key_data_t;
62
63 static ipsecmb_main_t ipsecmb_main = { };
64
65 /* clang-format off */
66 /*
67  * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
68  */
69 #define foreach_ipsecmb_hmac_op                                \
70   _(SHA1,   SHA_1,   sha1,   64,  20, 20)                      \
71   _(SHA224, SHA_224, sha224, 64,  32, 28)                      \
72   _(SHA256, SHA_256, sha256, 64,  32, 32)                      \
73   _(SHA384, SHA_384, sha384, 128, 64, 48)                      \
74   _(SHA512, SHA_512, sha512, 128, 64, 64)
75
76 /*
77  * (Alg, key-len-bits, JOB_CIPHER_MODE)
78  */
79 #define foreach_ipsecmb_cipher_op                                             \
80   _ (AES_128_CBC, 128, CBC)                                                   \
81   _ (AES_192_CBC, 192, CBC)                                                   \
82   _ (AES_256_CBC, 256, CBC)                                                   \
83   _ (AES_128_CTR, 128, CNTR)                                                  \
84   _ (AES_192_CTR, 192, CNTR)                                                  \
85   _ (AES_256_CTR, 256, CNTR)
86
87 /*
88  * (Alg, key-len-bytes, iv-len-bytes)
89  */
90 #define foreach_ipsecmb_gcm_cipher_op                          \
91   _(AES_128_GCM, 128)                                          \
92   _(AES_192_GCM, 192)                                          \
93   _(AES_256_GCM, 256)
94 /* clang-format on */
95 static_always_inline vnet_crypto_op_status_t
96 ipsecmb_status_job (IMB_STATUS status)
97 {
98   switch (status)
99     {
100     case IMB_STATUS_COMPLETED:
101       return VNET_CRYPTO_OP_STATUS_COMPLETED;
102     case IMB_STATUS_BEING_PROCESSED:
103     case IMB_STATUS_COMPLETED_CIPHER:
104     case IMB_STATUS_COMPLETED_AUTH:
105       return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS;
106     case IMB_STATUS_INVALID_ARGS:
107     case IMB_STATUS_INTERNAL_ERROR:
108     case IMB_STATUS_ERROR:
109       return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
110     }
111   ASSERT (0);
112   return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
113 }
114
115 always_inline void
116 ipsecmb_retire_hmac_job (IMB_JOB *job, u32 *n_fail, u32 digest_size)
117 {
118   vnet_crypto_op_t *op = job->user_data;
119   u32 len = op->digest_len ? op->digest_len : digest_size;
120
121   if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
122     {
123       op->status = ipsecmb_status_job (job->status);
124       *n_fail = *n_fail + 1;
125       return;
126     }
127
128   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
129     {
130       if ((memcmp (op->digest, job->auth_tag_output, len)))
131         {
132           *n_fail = *n_fail + 1;
133           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
134           return;
135         }
136     }
137   else if (len == digest_size)
138     clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
139   else
140     clib_memcpy_fast (op->digest, job->auth_tag_output, len);
141
142   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
143 }
144
145 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
146 static_always_inline u32
147 ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
148                          u32 block_size, u32 hash_size, u32 digest_size,
149                          IMB_HASH_ALG alg)
150 {
151   ipsecmb_main_t *imbm = &ipsecmb_main;
152   ipsecmb_per_thread_data_t *ptd =
153     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
154   IMB_JOB *job;
155   u32 i, n_fail = 0, ops_index = 0;
156   u8 scratch[n_ops][digest_size];
157   const u32 burst_sz =
158     (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
159
160   while (n_ops)
161     {
162       const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
163       /*
164        * configure all the jobs first ...
165        */
166       for (i = 0; i < n; i++, ops_index++)
167         {
168           vnet_crypto_op_t *op = ops[ops_index];
169           const u8 *kd = (u8 *) imbm->key_data[op->key_index];
170
171           job = &ptd->burst_jobs[i];
172
173           job->src = op->src;
174           job->hash_start_src_offset_in_bytes = 0;
175           job->msg_len_to_hash_in_bytes = op->len;
176           job->auth_tag_output_len_in_bytes = digest_size;
177           job->auth_tag_output = scratch[ops_index];
178
179           job->u.HMAC._hashed_auth_key_xor_ipad = kd;
180           job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
181           job->user_data = op;
182         }
183
184       /*
185        * submit all jobs to be processed and retire completed jobs
186        */
187       IMB_SUBMIT_HASH_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, alg);
188
189       for (i = 0; i < n; i++)
190         {
191           job = &ptd->burst_jobs[i];
192           ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
193         }
194
195       n_ops -= n;
196     }
197
198   return ops_index - n_fail;
199 }
200 #else
201 static_always_inline u32
202 ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
203                          u32 block_size, u32 hash_size, u32 digest_size,
204                          JOB_HASH_ALG alg)
205 {
206   ipsecmb_main_t *imbm = &ipsecmb_main;
207   ipsecmb_per_thread_data_t *ptd =
208     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
209   IMB_JOB *job;
210   u32 i, n_fail = 0;
211   u8 scratch[n_ops][digest_size];
212
213   /*
214    * queue all the jobs first ...
215    */
216   for (i = 0; i < n_ops; i++)
217     {
218       vnet_crypto_op_t *op = ops[i];
219       u8 *kd = (u8 *) imbm->key_data[op->key_index];
220
221       job = IMB_GET_NEXT_JOB (ptd->mgr);
222
223       job->src = op->src;
224       job->hash_start_src_offset_in_bytes = 0;
225       job->msg_len_to_hash_in_bytes = op->len;
226       job->hash_alg = alg;
227       job->auth_tag_output_len_in_bytes = digest_size;
228       job->auth_tag_output = scratch[i];
229
230       job->cipher_mode = IMB_CIPHER_NULL;
231       job->cipher_direction = IMB_DIR_DECRYPT;
232       job->chain_order = IMB_ORDER_HASH_CIPHER;
233
234       job->u.HMAC._hashed_auth_key_xor_ipad = kd;
235       job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
236       job->user_data = op;
237
238       job = IMB_SUBMIT_JOB (ptd->mgr);
239
240       if (job)
241         ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
242     }
243
244   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
245     ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
246
247   return n_ops - n_fail;
248 }
249 #endif
250
251 /* clang-format off */
252 #define _(a, b, c, d, e, f)                                             \
253 static_always_inline u32                                                \
254 ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
255                       vnet_crypto_op_t * ops[],                         \
256                       u32 n_ops)                                        \
257 { return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f,              \
258                 IMB_AUTH_HMAC_##b); }                                   \
259
260 foreach_ipsecmb_hmac_op;
261 #undef _
262 /* clang-format on */
263
264 always_inline void
265 ipsecmb_retire_cipher_job (IMB_JOB *job, u32 *n_fail)
266 {
267   vnet_crypto_op_t *op = job->user_data;
268
269   if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
270     {
271       op->status = ipsecmb_status_job (job->status);
272       *n_fail = *n_fail + 1;
273     }
274   else
275     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
276 }
277
278 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
279 static_always_inline u32
280 ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
281                                u32 n_ops, u32 key_len,
282                                IMB_CIPHER_DIRECTION direction,
283                                IMB_CIPHER_MODE cipher_mode)
284 {
285   ipsecmb_main_t *imbm = &ipsecmb_main;
286   ipsecmb_per_thread_data_t *ptd =
287     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
288   IMB_JOB *job;
289   u32 i, n_fail = 0, ops_index = 0;
290   const u32 burst_sz =
291     (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
292
293   while (n_ops)
294     {
295       const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
296
297       for (i = 0; i < n; i++)
298         {
299           ipsecmb_aes_key_data_t *kd;
300           vnet_crypto_op_t *op = ops[ops_index++];
301           kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
302
303           job = &ptd->burst_jobs[i];
304
305           job->src = op->src;
306           job->dst = op->dst;
307           job->msg_len_to_cipher_in_bytes = op->len;
308           job->cipher_start_src_offset_in_bytes = 0;
309
310           job->hash_alg = IMB_AUTH_NULL;
311
312           job->enc_keys = kd->enc_key_exp;
313           job->dec_keys = kd->dec_key_exp;
314           job->iv = op->iv;
315           job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE;
316
317           job->user_data = op;
318         }
319
320       IMB_SUBMIT_CIPHER_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n,
321                                        cipher_mode, direction, key_len / 8);
322       for (i = 0; i < n; i++)
323         {
324           job = &ptd->burst_jobs[i];
325           ipsecmb_retire_cipher_job (job, &n_fail);
326         }
327
328       n_ops -= n;
329     }
330
331   return ops_index - n_fail;
332 }
333 #else
334 static_always_inline u32
335 ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
336                                u32 n_ops, u32 key_len,
337                                JOB_CIPHER_DIRECTION direction,
338                                JOB_CIPHER_MODE cipher_mode)
339 {
340   ipsecmb_main_t *imbm = &ipsecmb_main;
341   ipsecmb_per_thread_data_t *ptd =
342     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
343   IMB_JOB *job;
344   u32 i, n_fail = 0;
345
346   for (i = 0; i < n_ops; i++)
347     {
348       ipsecmb_aes_key_data_t *kd;
349       vnet_crypto_op_t *op = ops[i];
350       kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
351
352       job = IMB_GET_NEXT_JOB (ptd->mgr);
353
354       job->src = op->src;
355       job->dst = op->dst;
356       job->msg_len_to_cipher_in_bytes = op->len;
357       job->cipher_start_src_offset_in_bytes = 0;
358
359       job->hash_alg = IMB_AUTH_NULL;
360       job->cipher_mode = cipher_mode;
361       job->cipher_direction = direction;
362       job->chain_order =
363         (direction == IMB_DIR_ENCRYPT ? IMB_ORDER_CIPHER_HASH :
364                                               IMB_ORDER_HASH_CIPHER);
365
366       job->aes_key_len_in_bytes = key_len / 8;
367       job->enc_keys = kd->enc_key_exp;
368       job->dec_keys = kd->dec_key_exp;
369       job->iv = op->iv;
370       job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE;
371
372       job->user_data = op;
373
374       job = IMB_SUBMIT_JOB (ptd->mgr);
375
376       if (job)
377         ipsecmb_retire_cipher_job (job, &n_fail);
378     }
379
380   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
381     ipsecmb_retire_cipher_job (job, &n_fail);
382
383   return n_ops - n_fail;
384 }
385 #endif
386
387 /* clang-format off */
388 #define _(a, b, c)                                                            \
389   static_always_inline u32 ipsecmb_ops_cipher_enc_##a (                       \
390     vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
391   {                                                                           \
392     return ipsecmb_ops_aes_cipher_inline (                                    \
393                     vm, ops, n_ops, b, IMB_DIR_ENCRYPT, IMB_CIPHER_##c);      \
394   }                                                                           \
395                                                                               \
396   static_always_inline u32 ipsecmb_ops_cipher_dec_##a (                       \
397     vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
398   {                                                                           \
399     return ipsecmb_ops_aes_cipher_inline (                                    \
400                    vm, ops, n_ops, b, IMB_DIR_DECRYPT, IMB_CIPHER_##c);       \
401   }
402
403 foreach_ipsecmb_cipher_op;
404 #undef _
405
406 #define _(a, b)                                                              \
407 static_always_inline u32                                                     \
408 ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm,                  \
409     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
410 {                                                                            \
411   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
412   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
413                                                      vm->thread_index);      \
414   IMB_MGR *m = ptd->mgr;                                                     \
415   vnet_crypto_op_chunk_t *chp;                                               \
416   u32 i, j;                                                                  \
417                                                                              \
418   for (i = 0; i < n_ops; i++)                                                \
419     {                                                                        \
420       struct gcm_key_data *kd;                                               \
421       struct gcm_context_data ctx;                                           \
422       vnet_crypto_op_t *op = ops[i];                                         \
423                                                                              \
424       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
425       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
426       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
427       chp = chunks + op->chunk_index;                                        \
428       for (j = 0; j < op->n_chunks; j++)                                     \
429         {                                                                    \
430           IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
431                                        chp->len);                            \
432           chp += 1;                                                          \
433         }                                                                    \
434       IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len);      \
435                                                                              \
436       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
437     }                                                                        \
438                                                                              \
439   return n_ops;                                                              \
440 }                                                                            \
441                                                                              \
442 static_always_inline u32                                                     \
443 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
444                                 u32 n_ops)                                   \
445 {                                                                            \
446   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
447   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
448                                                      vm->thread_index);      \
449   IMB_MGR *m = ptd->mgr;                                                     \
450   u32 i;                                                                     \
451                                                                              \
452   for (i = 0; i < n_ops; i++)                                                \
453     {                                                                        \
454       struct gcm_key_data *kd;                                               \
455       struct gcm_context_data ctx;                                           \
456       vnet_crypto_op_t *op = ops[i];                                         \
457                                                                              \
458       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
459       IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
460                             op->aad, op->aad_len, op->tag, op->tag_len);     \
461                                                                              \
462       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
463     }                                                                        \
464                                                                              \
465   return n_ops;                                                              \
466 }                                                                            \
467                                                                              \
468 static_always_inline u32                                                     \
469 ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm,                  \
470     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
471 {                                                                            \
472   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
473   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
474                                                      vm->thread_index);      \
475   IMB_MGR *m = ptd->mgr;                                                     \
476   vnet_crypto_op_chunk_t *chp;                                               \
477   u32 i, j, n_failed = 0;                                                    \
478                                                                              \
479   for (i = 0; i < n_ops; i++)                                                \
480     {                                                                        \
481       struct gcm_key_data *kd;                                               \
482       struct gcm_context_data ctx;                                           \
483       vnet_crypto_op_t *op = ops[i];                                         \
484       u8 scratch[64];                                                        \
485                                                                              \
486       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
487       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
488       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
489       chp = chunks + op->chunk_index;                                        \
490       for (j = 0; j < op->n_chunks; j++)                                     \
491         {                                                                    \
492           IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
493                                        chp->len);                            \
494           chp += 1;                                                          \
495         }                                                                    \
496       IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len);      \
497                                                                              \
498       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
499         {                                                                    \
500           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
501           n_failed++;                                                        \
502         }                                                                    \
503       else                                                                   \
504         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
505     }                                                                        \
506                                                                              \
507   return n_ops - n_failed;                                                   \
508 }                                                                            \
509                                                                              \
510 static_always_inline u32                                                     \
511 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
512                                  u32 n_ops)                                  \
513 {                                                                            \
514   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
515   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
516                                                      vm->thread_index);      \
517   IMB_MGR *m = ptd->mgr;                                                     \
518   u32 i, n_failed = 0;                                                       \
519                                                                              \
520   for (i = 0; i < n_ops; i++)                                                \
521     {                                                                        \
522       struct gcm_key_data *kd;                                               \
523       struct gcm_context_data ctx;                                           \
524       vnet_crypto_op_t *op = ops[i];                                         \
525       u8 scratch[64];                                                        \
526                                                                              \
527       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
528       IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
529                             op->aad, op->aad_len, scratch, op->tag_len);     \
530                                                                              \
531       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
532         {                                                                    \
533           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
534           n_failed++;                                                        \
535         }                                                                    \
536       else                                                                   \
537         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
538     }                                                                        \
539                                                                              \
540   return n_ops - n_failed;                                                   \
541 }
542 /* clang-format on */
543 foreach_ipsecmb_gcm_cipher_op;
544 #undef _
545
546 #ifdef HAVE_IPSECMB_CHACHA_POLY
547 always_inline void
548 ipsecmb_retire_aead_job (IMB_JOB *job, u32 *n_fail)
549 {
550   vnet_crypto_op_t *op = job->user_data;
551   u32 len = op->tag_len;
552
553   if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
554     {
555       op->status = ipsecmb_status_job (job->status);
556       *n_fail = *n_fail + 1;
557       return;
558     }
559
560   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
561     {
562       if (memcmp (op->tag, job->auth_tag_output, len))
563         {
564           *n_fail = *n_fail + 1;
565           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
566           return;
567         }
568     }
569
570   clib_memcpy_fast (op->tag, job->auth_tag_output, len);
571
572   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
573 }
574
575 static_always_inline u32
576 ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
577                          IMB_CIPHER_DIRECTION dir)
578 {
579   ipsecmb_main_t *imbm = &ipsecmb_main;
580   ipsecmb_per_thread_data_t *ptd =
581     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
582   struct IMB_JOB *job;
583   IMB_MGR *m = ptd->mgr;
584   u32 i, n_fail = 0, last_key_index = ~0;
585   u8 scratch[VLIB_FRAME_SIZE][16];
586   u8 *key = 0;
587
588   for (i = 0; i < n_ops; i++)
589     {
590       vnet_crypto_op_t *op = ops[i];
591
592       job = IMB_GET_NEXT_JOB (m);
593       if (last_key_index != op->key_index)
594         {
595           vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
596
597           key = kd->data;
598           last_key_index = op->key_index;
599         }
600
601       job->cipher_direction = dir;
602       job->chain_order = IMB_ORDER_HASH_CIPHER;
603       job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
604       job->hash_alg = IMB_AUTH_CHACHA20_POLY1305;
605       job->enc_keys = job->dec_keys = key;
606       job->key_len_in_bytes = 32;
607
608       job->u.CHACHA20_POLY1305.aad = op->aad;
609       job->u.CHACHA20_POLY1305.aad_len_in_bytes = op->aad_len;
610       job->src = op->src;
611       job->dst = op->dst;
612
613       job->iv = op->iv;
614       job->iv_len_in_bytes = 12;
615       job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes =
616         op->len;
617       job->cipher_start_src_offset_in_bytes =
618         job->hash_start_src_offset_in_bytes = 0;
619
620       job->auth_tag_output = scratch[i];
621       job->auth_tag_output_len_in_bytes = 16;
622
623       job->user_data = op;
624
625       job = IMB_SUBMIT_JOB_NOCHECK (ptd->mgr);
626       if (job)
627         ipsecmb_retire_aead_job (job, &n_fail);
628
629       op++;
630     }
631
632   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
633     ipsecmb_retire_aead_job (job, &n_fail);
634
635   return n_ops - n_fail;
636 }
637
638 static_always_inline u32
639 ipsecmb_ops_chacha_poly_enc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
640                              u32 n_ops)
641 {
642   return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_ENCRYPT);
643 }
644
645 static_always_inline u32
646 ipsecmb_ops_chacha_poly_dec (vlib_main_t *vm, vnet_crypto_op_t *ops[],
647                              u32 n_ops)
648 {
649   return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_DECRYPT);
650 }
651
652 static_always_inline u32
653 ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
654                                  vnet_crypto_op_chunk_t *chunks, u32 n_ops,
655                                  IMB_CIPHER_DIRECTION dir)
656 {
657   ipsecmb_main_t *imbm = &ipsecmb_main;
658   ipsecmb_per_thread_data_t *ptd =
659     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
660   IMB_MGR *m = ptd->mgr;
661   u32 i, n_fail = 0, last_key_index = ~0;
662   u8 *key = 0;
663
664   if (dir == IMB_DIR_ENCRYPT)
665     {
666       for (i = 0; i < n_ops; i++)
667         {
668           vnet_crypto_op_t *op = ops[i];
669           struct chacha20_poly1305_context_data ctx;
670           vnet_crypto_op_chunk_t *chp;
671           u32 j;
672
673           ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
674
675           if (last_key_index != op->key_index)
676             {
677               vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
678
679               key = kd->data;
680               last_key_index = op->key_index;
681             }
682
683           IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
684                                       op->aad_len);
685
686           chp = chunks + op->chunk_index;
687           for (j = 0; j < op->n_chunks; j++)
688             {
689               IMB_CHACHA20_POLY1305_ENC_UPDATE (m, key, &ctx, chp->dst,
690                                                 chp->src, chp->len);
691               chp += 1;
692             }
693
694           IMB_CHACHA20_POLY1305_ENC_FINALIZE (m, &ctx, op->tag, op->tag_len);
695
696           op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
697         }
698     }
699   else /* dir == IMB_DIR_DECRYPT */
700     {
701       for (i = 0; i < n_ops; i++)
702         {
703           vnet_crypto_op_t *op = ops[i];
704           struct chacha20_poly1305_context_data ctx;
705           vnet_crypto_op_chunk_t *chp;
706           u8 scratch[16];
707           u32 j;
708
709           ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
710
711           if (last_key_index != op->key_index)
712             {
713               vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
714
715               key = kd->data;
716               last_key_index = op->key_index;
717             }
718
719           IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
720                                       op->aad_len);
721
722           chp = chunks + op->chunk_index;
723           for (j = 0; j < op->n_chunks; j++)
724             {
725               IMB_CHACHA20_POLY1305_DEC_UPDATE (m, key, &ctx, chp->dst,
726                                                 chp->src, chp->len);
727               chp += 1;
728             }
729
730           IMB_CHACHA20_POLY1305_DEC_FINALIZE (m, &ctx, scratch, op->tag_len);
731
732           if (memcmp (op->tag, scratch, op->tag_len))
733             {
734               n_fail = n_fail + 1;
735               op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
736             }
737           else
738             op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
739         }
740     }
741
742   return n_ops - n_fail;
743 }
744
745 static_always_inline u32
746 ipsec_mb_ops_chacha_poly_enc_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
747                                       vnet_crypto_op_chunk_t *chunks,
748                                       u32 n_ops)
749 {
750   return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
751                                           IMB_DIR_ENCRYPT);
752 }
753
754 static_always_inline u32
755 ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
756                                       vnet_crypto_op_chunk_t *chunks,
757                                       u32 n_ops)
758 {
759   return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
760                                           IMB_DIR_DECRYPT);
761 }
762 #endif
763
764 static void
765 crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
766                             vnet_crypto_key_index_t idx)
767 {
768   ipsecmb_main_t *imbm = &ipsecmb_main;
769   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
770   ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
771   u32 i;
772   void *kd;
773
774   /** TODO: add linked alg support **/
775   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
776     return;
777
778   if (kop == VNET_CRYPTO_KEY_OP_DEL)
779     {
780       if (idx >= vec_len (imbm->key_data))
781         return;
782
783       if (imbm->key_data[idx] == 0)
784         return;
785
786       clib_mem_free_s (imbm->key_data[idx]);
787       imbm->key_data[idx] = 0;
788       return;
789     }
790
791   if (ad->data_size == 0)
792     return;
793
794   vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);
795
796   if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
797     {
798       clib_mem_free_s (imbm->key_data[idx]);
799     }
800
801   kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
802                                                      CLIB_CACHE_LINE_BYTES);
803
804   /* AES CBC key expansion */
805   if (ad->keyexp)
806     {
807       ad->keyexp (key->data, ((ipsecmb_aes_key_data_t *) kd)->enc_key_exp,
808                   ((ipsecmb_aes_key_data_t *) kd)->dec_key_exp);
809       return;
810     }
811
812   /* AES GCM */
813   if (ad->aes_gcm_pre)
814     {
815       ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
816       return;
817     }
818
819   /* HMAC */
820   if (ad->hash_one_block)
821     {
822       const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
823       u64 pad[block_qw], key_hash[block_qw];
824
825       clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
826       if (vec_len (key->data) <= ad->block_size)
827         clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
828       else
829         ad->hash_fn (key->data, vec_len (key->data), key_hash);
830
831       for (i = 0; i < block_qw; i++)
832         pad[i] = key_hash[i] ^ 0x3636363636363636;
833       ad->hash_one_block (pad, kd);
834
835       for (i = 0; i < block_qw; i++)
836         pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
837       ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));
838
839       return;
840     }
841 }
842
843 static clib_error_t *
844 crypto_ipsecmb_init (vlib_main_t * vm)
845 {
846   ipsecmb_main_t *imbm = &ipsecmb_main;
847   ipsecmb_alg_data_t *ad;
848   ipsecmb_per_thread_data_t *ptd;
849   vlib_thread_main_t *tm = vlib_get_thread_main ();
850   IMB_MGR *m = 0;
851   u32 eidx;
852   u8 *name;
853
854   if (!clib_cpu_supports_aes ())
855     return 0;
856
857   /*
858    * A priority that is better than OpenSSL but worse than VPP natvie
859    */
860   name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
861                  IMB_VERSION_STR, 0);
862   eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
863
864   vec_validate_aligned (imbm->per_thread_data, tm->n_vlib_mains - 1,
865                         CLIB_CACHE_LINE_BYTES);
866
867   /* *INDENT-OFF* */
868   vec_foreach (ptd, imbm->per_thread_data)
869     {
870         ptd->mgr = alloc_mb_mgr (0);
871 #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
872         clib_memset_u8 (ptd->burst_jobs, 0,
873                         sizeof (IMB_JOB) * IMB_MAX_BURST_SIZE);
874 #endif
875         if (clib_cpu_supports_avx512f ())
876           init_mb_mgr_avx512 (ptd->mgr);
877         else if (clib_cpu_supports_avx2 () && clib_cpu_supports_bmi2 ())
878           init_mb_mgr_avx2 (ptd->mgr);
879         else
880           init_mb_mgr_sse (ptd->mgr);
881
882         if (ptd == imbm->per_thread_data)
883           m = ptd->mgr;
884     }
885   /* *INDENT-ON* */
886
887 #define _(a, b, c, d, e, f)                                              \
888   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
889                                     ipsecmb_ops_hmac_##a);               \
890   ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a;                        \
891   ad->block_size = d;                                                    \
892   ad->data_size = e * 2;                                                 \
893   ad->hash_one_block = m-> c##_one_block;                                \
894   ad->hash_fn = m-> c;                                                   \
895
896   foreach_ipsecmb_hmac_op;
897 #undef _
898 #define _(a, b, c)                                                            \
899   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,       \
900                                     ipsecmb_ops_cipher_enc_##a);              \
901   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,       \
902                                     ipsecmb_ops_cipher_dec_##a);              \
903   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                                  \
904   ad->data_size = sizeof (ipsecmb_aes_key_data_t);                            \
905   ad->keyexp = m->keyexp_##b;
906
907   foreach_ipsecmb_cipher_op;
908 #undef _
909 #define _(a, b)                                                         \
910   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
911                                     ipsecmb_ops_gcm_cipher_enc_##a);    \
912   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
913                                     ipsecmb_ops_gcm_cipher_dec_##a);    \
914   vnet_crypto_register_chained_ops_handler                              \
915       (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,                              \
916        ipsecmb_ops_gcm_cipher_enc_##a##_chained);                       \
917   vnet_crypto_register_chained_ops_handler                              \
918       (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,                              \
919        ipsecmb_ops_gcm_cipher_dec_##a##_chained);                       \
920   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                            \
921   ad->data_size = sizeof (struct gcm_key_data);                         \
922   ad->aes_gcm_pre = m->gcm##b##_pre;                                    \
923
924   foreach_ipsecmb_gcm_cipher_op;
925 #undef _
926
927 #ifdef HAVE_IPSECMB_CHACHA_POLY
928   vnet_crypto_register_ops_handler (vm, eidx,
929                                     VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
930                                     ipsecmb_ops_chacha_poly_enc);
931   vnet_crypto_register_ops_handler (vm, eidx,
932                                     VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
933                                     ipsecmb_ops_chacha_poly_dec);
934   vnet_crypto_register_chained_ops_handler (
935     vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
936     ipsec_mb_ops_chacha_poly_enc_chained);
937   vnet_crypto_register_chained_ops_handler (
938     vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
939     ipsec_mb_ops_chacha_poly_dec_chained);
940   ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305;
941   ad->data_size = 0;
942 #endif
943
944   vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
945   return (NULL);
946 }
947
948 /* *INDENT-OFF* */
949 VLIB_INIT_FUNCTION (crypto_ipsecmb_init) =
950 {
951   .runs_after = VLIB_INITS ("vnet_crypto_init"),
952 };
953 /* *INDENT-ON* */
954
955 /* *INDENT-OFF* */
956 VLIB_PLUGIN_REGISTER () =
957 {
958   .version = VPP_BUILD_VER,
959   .description = "Intel IPSEC Multi-buffer Crypto Engine",
960 };
961 /* *INDENT-ON* */
962
963 /*
964  * fd.io coding-style-patch-verification: ON
965  *
966  * Local Variables:
967  * eval: (c-set-style "gnu")
968  * End:
969  */