crypto-ipsecmb: bump ipsecmb library to v1.3
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
1 /*
2  * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3  *
4  * Copyright (c) 2019 Cisco Systemss
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <fcntl.h>
19
20 #include <intel-ipsec-mb.h>
21
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
27
28 #define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE
29 #define EXPANDED_KEY_N_BYTES (16 * 15)
30
31 typedef struct
32 {
33   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
34   __m128i cbc_iv;
35   MB_MGR *mgr;
36   JOB_AES_HMAC burst_jobs[IMB_MAX_BURST_SIZE];
37 } ipsecmb_per_thread_data_t;
38
39 typedef struct
40 {
41   u16 data_size;
42   u8 block_size;
43   aes_gcm_pre_t aes_gcm_pre;
44   keyexp_t keyexp;
45   hash_one_block_t hash_one_block;
46   hash_fn_t hash_fn;
47 } ipsecmb_alg_data_t;
48
49 typedef struct ipsecmb_main_t_
50 {
51   ipsecmb_per_thread_data_t *per_thread_data;
52   ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
53   void **key_data;
54 } ipsecmb_main_t;
55
56 typedef struct
57 {
58   u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
59   u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
60 } ipsecmb_aes_key_data_t;
61
62 static ipsecmb_main_t ipsecmb_main = { };
63
64 /*
65  * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
66  */
67 #define foreach_ipsecmb_hmac_op                                \
68   _(SHA1,   SHA1,    sha1,   64,  20, 20)                      \
69   _(SHA224, SHA_224, sha224, 64,  32, 28)                      \
70   _(SHA256, SHA_256, sha256, 64,  32, 32)                      \
71   _(SHA384, SHA_384, sha384, 128, 64, 48)                      \
72   _(SHA512, SHA_512, sha512, 128, 64, 64)
73
74 /*
75  * (Alg, key-len-bits, JOB_CIPHER_MODE)
76  */
77 #define foreach_ipsecmb_cipher_op                                             \
78   _ (AES_128_CBC, 128, CBC)                                                   \
79   _ (AES_192_CBC, 192, CBC)                                                   \
80   _ (AES_256_CBC, 256, CBC)                                                   \
81   _ (AES_128_CTR, 128, CNTR)                                                  \
82   _ (AES_192_CTR, 192, CNTR)                                                  \
83   _ (AES_256_CTR, 256, CNTR)
84
85 /*
86  * (Alg, key-len-bytes, iv-len-bytes)
87  */
88 #define foreach_ipsecmb_gcm_cipher_op                          \
89   _(AES_128_GCM, 128)                                          \
90   _(AES_192_GCM, 192)                                          \
91   _(AES_256_GCM, 256)
92
93 static_always_inline vnet_crypto_op_status_t
94 ipsecmb_status_job (JOB_STS status)
95 {
96   switch (status)
97     {
98     case STS_COMPLETED:
99       return VNET_CRYPTO_OP_STATUS_COMPLETED;
100     case STS_BEING_PROCESSED:
101     case STS_COMPLETED_AES:
102     case STS_COMPLETED_HMAC:
103       return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS;
104     case STS_INVALID_ARGS:
105     case STS_INTERNAL_ERROR:
106     case STS_ERROR:
107       return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
108     }
109   ASSERT (0);
110   return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
111 }
112
113 always_inline void
114 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
115 {
116   vnet_crypto_op_t *op = job->user_data;
117   u32 len = op->digest_len ? op->digest_len : digest_size;
118
119   if (PREDICT_FALSE (STS_COMPLETED != job->status))
120     {
121       op->status = ipsecmb_status_job (job->status);
122       *n_fail = *n_fail + 1;
123       return;
124     }
125
126   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
127     {
128       if ((memcmp (op->digest, job->auth_tag_output, len)))
129         {
130           *n_fail = *n_fail + 1;
131           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
132           return;
133         }
134     }
135   else if (len == digest_size)
136     clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
137   else
138     clib_memcpy_fast (op->digest, job->auth_tag_output, len);
139
140   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
141 }
142
143 static_always_inline u32
144 ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
145                          u32 n_ops, u32 block_size, u32 hash_size,
146                          u32 digest_size, JOB_HASH_ALG alg)
147 {
148   ipsecmb_main_t *imbm = &ipsecmb_main;
149   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
150                                                      vm->thread_index);
151   JOB_AES_HMAC *job;
152   u32 i, n_fail = 0, ops_index = 0;
153   u8 scratch[n_ops][digest_size];
154   const u32 burst_sz =
155     (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
156
157   while (n_ops)
158     {
159       const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
160       /*
161        * configure all the jobs first ...
162        */
163       for (i = 0; i < n; i++, ops_index++)
164         {
165           vnet_crypto_op_t *op = ops[ops_index];
166           const u8 *kd = (u8 *) imbm->key_data[op->key_index];
167
168           job = &ptd->burst_jobs[i];
169
170           job->src = op->src;
171           job->hash_start_src_offset_in_bytes = 0;
172           job->msg_len_to_hash_in_bytes = op->len;
173           job->auth_tag_output_len_in_bytes = digest_size;
174           job->auth_tag_output = scratch[ops_index];
175
176           job->u.HMAC._hashed_auth_key_xor_ipad = kd;
177           job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
178           job->user_data = op;
179         }
180
181       /*
182        * submit all jobs to be processed and retire completed jobs
183        */
184       IMB_SUBMIT_HASH_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, alg);
185
186       for (i = 0; i < n; i++)
187         {
188           job = &ptd->burst_jobs[i];
189           ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
190         }
191
192       n_ops -= n;
193     }
194
195   return ops_index - n_fail;
196 }
197
198 #define _(a, b, c, d, e, f)                                             \
199 static_always_inline u32                                                \
200 ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
201                       vnet_crypto_op_t * ops[],                         \
202                       u32 n_ops)                                        \
203 { return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); }        \
204
205 foreach_ipsecmb_hmac_op;
206 #undef _
207
208 always_inline void
209 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
210 {
211   vnet_crypto_op_t *op = job->user_data;
212
213   if (PREDICT_FALSE (STS_COMPLETED != job->status))
214     {
215       op->status = ipsecmb_status_job (job->status);
216       *n_fail = *n_fail + 1;
217     }
218   else
219     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
220 }
221
222 static_always_inline u32
223 ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
224                                u32 n_ops, u32 key_len,
225                                JOB_CIPHER_DIRECTION direction,
226                                JOB_CIPHER_MODE cipher_mode)
227 {
228   ipsecmb_main_t *imbm = &ipsecmb_main;
229   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
230                                                      vm->thread_index);
231   JOB_AES_HMAC *job;
232   u32 i, n_fail = 0, ops_index = 0;
233   const u32 burst_sz =
234     (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
235
236   while (n_ops)
237     {
238       const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
239
240       for (i = 0; i < n; i++)
241         {
242           ipsecmb_aes_key_data_t *kd;
243           vnet_crypto_op_t *op = ops[ops_index++];
244           kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
245
246           job = &ptd->burst_jobs[i];
247
248           job->src = op->src;
249           job->dst = op->dst;
250           job->msg_len_to_cipher_in_bytes = op->len;
251           job->cipher_start_src_offset_in_bytes = 0;
252
253           job->hash_alg = NULL_HASH;
254
255           if ((direction == ENCRYPT) &&
256               (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
257             {
258               const __m128i iv = ptd->cbc_iv;
259               _mm_storeu_si128 ((__m128i *) op->iv, iv);
260               ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
261             }
262
263           job->aes_enc_key_expanded = kd->enc_key_exp;
264           job->aes_dec_key_expanded = kd->dec_key_exp;
265           job->iv = op->iv;
266           job->iv_len_in_bytes = AES_BLOCK_SIZE;
267
268           job->user_data = op;
269         }
270
271       IMB_SUBMIT_CIPHER_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n,
272                                        cipher_mode, direction, key_len / 8);
273       for (i = 0; i < n; i++)
274         {
275           job = &ptd->burst_jobs[i];
276           ipsecmb_retire_cipher_job (job, &n_fail);
277         }
278
279       n_ops -= n;
280     }
281
282   return ops_index - n_fail;
283 }
284
285 #define _(a, b, c)                                                            \
286   static_always_inline u32 ipsecmb_ops_cipher_enc_##a (                       \
287     vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
288   {                                                                           \
289     return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, ENCRYPT, c);     \
290   }                                                                           \
291                                                                               \
292   static_always_inline u32 ipsecmb_ops_cipher_dec_##a (                       \
293     vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
294   {                                                                           \
295     return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, DECRYPT, c);     \
296   }
297
298 foreach_ipsecmb_cipher_op;
299 #undef _
300
301 #define _(a, b)                                                              \
302 static_always_inline u32                                                     \
303 ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm,                  \
304     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
305 {                                                                            \
306   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
307   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
308                                                      vm->thread_index);      \
309   MB_MGR *m = ptd->mgr;                                                      \
310   vnet_crypto_op_chunk_t *chp;                                               \
311   u32 i, j;                                                                  \
312                                                                              \
313   for (i = 0; i < n_ops; i++)                                                \
314     {                                                                        \
315       struct gcm_key_data *kd;                                               \
316       struct gcm_context_data ctx;                                           \
317       vnet_crypto_op_t *op = ops[i];                                         \
318                                                                              \
319       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
320       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
321       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
322       chp = chunks + op->chunk_index;                                        \
323       for (j = 0; j < op->n_chunks; j++)                                     \
324         {                                                                    \
325           IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
326                                        chp->len);                            \
327           chp += 1;                                                          \
328         }                                                                    \
329       IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len);      \
330                                                                              \
331       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
332     }                                                                        \
333                                                                              \
334   return n_ops;                                                              \
335 }                                                                            \
336                                                                              \
337 static_always_inline u32                                                     \
338 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
339                                 u32 n_ops)                                   \
340 {                                                                            \
341   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
342   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
343                                                      vm->thread_index);      \
344   MB_MGR *m = ptd->mgr;                                                      \
345   u32 i;                                                                     \
346                                                                              \
347   for (i = 0; i < n_ops; i++)                                                \
348     {                                                                        \
349       struct gcm_key_data *kd;                                               \
350       struct gcm_context_data ctx;                                           \
351       vnet_crypto_op_t *op = ops[i];                                         \
352                                                                              \
353       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
354       IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
355                             op->aad, op->aad_len, op->tag, op->tag_len);     \
356                                                                              \
357       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
358     }                                                                        \
359                                                                              \
360   return n_ops;                                                              \
361 }                                                                            \
362                                                                              \
363 static_always_inline u32                                                     \
364 ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm,                  \
365     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
366 {                                                                            \
367   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
368   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
369                                                      vm->thread_index);      \
370   MB_MGR *m = ptd->mgr;                                                      \
371   vnet_crypto_op_chunk_t *chp;                                               \
372   u32 i, j, n_failed = 0;                                                    \
373                                                                              \
374   for (i = 0; i < n_ops; i++)                                                \
375     {                                                                        \
376       struct gcm_key_data *kd;                                               \
377       struct gcm_context_data ctx;                                           \
378       vnet_crypto_op_t *op = ops[i];                                         \
379       u8 scratch[64];                                                        \
380                                                                              \
381       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
382       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
383       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
384       chp = chunks + op->chunk_index;                                        \
385       for (j = 0; j < op->n_chunks; j++)                                     \
386         {                                                                    \
387           IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
388                                        chp->len);                            \
389           chp += 1;                                                          \
390         }                                                                    \
391       IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len);      \
392                                                                              \
393       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
394         {                                                                    \
395           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
396           n_failed++;                                                        \
397         }                                                                    \
398       else                                                                   \
399         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
400     }                                                                        \
401                                                                              \
402   return n_ops - n_failed;                                                   \
403 }                                                                            \
404                                                                              \
405 static_always_inline u32                                                     \
406 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
407                                  u32 n_ops)                                  \
408 {                                                                            \
409   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
410   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
411                                                      vm->thread_index);      \
412   MB_MGR *m = ptd->mgr;                                                      \
413   u32 i, n_failed = 0;                                                       \
414                                                                              \
415   for (i = 0; i < n_ops; i++)                                                \
416     {                                                                        \
417       struct gcm_key_data *kd;                                               \
418       struct gcm_context_data ctx;                                           \
419       vnet_crypto_op_t *op = ops[i];                                         \
420       u8 scratch[64];                                                        \
421                                                                              \
422       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
423       IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
424                             op->aad, op->aad_len, scratch, op->tag_len);     \
425                                                                              \
426       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
427         {                                                                    \
428           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
429           n_failed++;                                                        \
430         }                                                                    \
431       else                                                                   \
432         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
433     }                                                                        \
434                                                                              \
435   return n_ops - n_failed;                                                   \
436 }
437
438 foreach_ipsecmb_gcm_cipher_op;
439 #undef _
440
441 #ifdef HAVE_IPSECMB_CHACHA_POLY
442 always_inline void
443 ipsecmb_retire_aead_job (JOB_AES_HMAC *job, u32 *n_fail)
444 {
445   vnet_crypto_op_t *op = job->user_data;
446   u32 len = op->tag_len;
447
448   if (PREDICT_FALSE (STS_COMPLETED != job->status))
449     {
450       op->status = ipsecmb_status_job (job->status);
451       *n_fail = *n_fail + 1;
452       return;
453     }
454
455   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
456     {
457       if (memcmp (op->tag, job->auth_tag_output, len))
458         {
459           *n_fail = *n_fail + 1;
460           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
461           return;
462         }
463     }
464
465   clib_memcpy_fast (op->tag, job->auth_tag_output, len);
466
467   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
468 }
469
470 static_always_inline u32
471 ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
472                          IMB_CIPHER_DIRECTION dir)
473 {
474   ipsecmb_main_t *imbm = &ipsecmb_main;
475   ipsecmb_per_thread_data_t *ptd =
476     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
477   struct JOB_AES_HMAC *job;
478   MB_MGR *m = ptd->mgr;
479   u32 i, n_fail = 0, last_key_index = ~0;
480   u8 scratch[VLIB_FRAME_SIZE][16];
481   u8 iv_data[16];
482   u8 *key = 0;
483
484   for (i = 0; i < n_ops; i++)
485     {
486       vnet_crypto_op_t *op = ops[i];
487       __m128i iv;
488
489       job = IMB_GET_NEXT_JOB (m);
490       if (last_key_index != op->key_index)
491         {
492           vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
493
494           key = kd->data;
495           last_key_index = op->key_index;
496         }
497
498       job->cipher_direction = dir;
499       job->chain_order = IMB_ORDER_HASH_CIPHER;
500       job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
501       job->hash_alg = IMB_AUTH_CHACHA20_POLY1305;
502       job->enc_keys = job->dec_keys = key;
503       job->key_len_in_bytes = 32;
504
505       job->u.CHACHA20_POLY1305.aad = op->aad;
506       job->u.CHACHA20_POLY1305.aad_len_in_bytes = op->aad_len;
507       job->src = op->src;
508       job->dst = op->dst;
509
510       if ((dir == IMB_DIR_ENCRYPT) &&
511           (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
512         {
513           iv = ptd->cbc_iv;
514           _mm_storeu_si128 ((__m128i *) iv_data, iv);
515           clib_memcpy_fast (op->iv, iv_data, 12);
516           ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
517         }
518
519       job->iv = op->iv;
520       job->iv_len_in_bytes = 12;
521       job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes =
522         op->len;
523       job->cipher_start_src_offset_in_bytes =
524         job->hash_start_src_offset_in_bytes = 0;
525
526       job->auth_tag_output = scratch[i];
527       job->auth_tag_output_len_in_bytes = 16;
528
529       job->user_data = op;
530
531       job = IMB_SUBMIT_JOB_NOCHECK (ptd->mgr);
532       if (job)
533         ipsecmb_retire_aead_job (job, &n_fail);
534
535       op++;
536     }
537
538   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
539     ipsecmb_retire_aead_job (job, &n_fail);
540
541   return n_ops - n_fail;
542 }
543
544 static_always_inline u32
545 ipsecmb_ops_chacha_poly_enc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
546                              u32 n_ops)
547 {
548   return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_ENCRYPT);
549 }
550
551 static_always_inline u32
552 ipsecmb_ops_chacha_poly_dec (vlib_main_t *vm, vnet_crypto_op_t *ops[],
553                              u32 n_ops)
554 {
555   return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_DECRYPT);
556 }
557
558 static_always_inline u32
559 ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
560                                  vnet_crypto_op_chunk_t *chunks, u32 n_ops,
561                                  IMB_CIPHER_DIRECTION dir)
562 {
563   ipsecmb_main_t *imbm = &ipsecmb_main;
564   ipsecmb_per_thread_data_t *ptd =
565     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
566   MB_MGR *m = ptd->mgr;
567   u32 i, n_fail = 0, last_key_index = ~0;
568   u8 iv_data[16];
569   u8 *key = 0;
570
571   if (dir == IMB_DIR_ENCRYPT)
572     {
573       for (i = 0; i < n_ops; i++)
574         {
575           vnet_crypto_op_t *op = ops[i];
576           struct chacha20_poly1305_context_data ctx;
577           vnet_crypto_op_chunk_t *chp;
578           __m128i iv;
579           u32 j;
580
581           ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
582
583           if (last_key_index != op->key_index)
584             {
585               vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
586
587               key = kd->data;
588               last_key_index = op->key_index;
589             }
590
591           if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
592             {
593               iv = ptd->cbc_iv;
594               _mm_storeu_si128 ((__m128i *) iv_data, iv);
595               clib_memcpy_fast (op->iv, iv_data, 12);
596               ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
597             }
598
599           IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
600                                       op->aad_len);
601
602           chp = chunks + op->chunk_index;
603           for (j = 0; j < op->n_chunks; j++)
604             {
605               IMB_CHACHA20_POLY1305_ENC_UPDATE (m, key, &ctx, chp->dst,
606                                                 chp->src, chp->len);
607               chp += 1;
608             }
609
610           IMB_CHACHA20_POLY1305_ENC_FINALIZE (m, &ctx, op->tag, op->tag_len);
611
612           op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
613         }
614     }
615   else /* dir == IMB_DIR_DECRYPT */
616     {
617       for (i = 0; i < n_ops; i++)
618         {
619           vnet_crypto_op_t *op = ops[i];
620           struct chacha20_poly1305_context_data ctx;
621           vnet_crypto_op_chunk_t *chp;
622           u8 scratch[16];
623           u32 j;
624
625           ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
626
627           if (last_key_index != op->key_index)
628             {
629               vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
630
631               key = kd->data;
632               last_key_index = op->key_index;
633             }
634
635           IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
636                                       op->aad_len);
637
638           chp = chunks + op->chunk_index;
639           for (j = 0; j < op->n_chunks; j++)
640             {
641               IMB_CHACHA20_POLY1305_DEC_UPDATE (m, key, &ctx, chp->dst,
642                                                 chp->src, chp->len);
643               chp += 1;
644             }
645
646           IMB_CHACHA20_POLY1305_DEC_FINALIZE (m, &ctx, scratch, op->tag_len);
647
648           if (memcmp (op->tag, scratch, op->tag_len))
649             {
650               n_fail = n_fail + 1;
651               op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
652             }
653           else
654             op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
655         }
656     }
657
658   return n_ops - n_fail;
659 }
660
661 static_always_inline u32
662 ipsec_mb_ops_chacha_poly_enc_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
663                                       vnet_crypto_op_chunk_t *chunks,
664                                       u32 n_ops)
665 {
666   return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
667                                           IMB_DIR_ENCRYPT);
668 }
669
670 static_always_inline u32
671 ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
672                                       vnet_crypto_op_chunk_t *chunks,
673                                       u32 n_ops)
674 {
675   return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
676                                           IMB_DIR_DECRYPT);
677 }
678 #endif
679
680 clib_error_t *
681 crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
682 {
683   ipsecmb_per_thread_data_t *ptd;
684   clib_error_t *err = 0;
685   int fd;
686
687   if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
688     return clib_error_return_unix (0, "failed to open '/dev/urandom'");
689
690   vec_foreach (ptd, imbm->per_thread_data)
691   {
692     if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
693       {
694         err = clib_error_return_unix (0, "'/dev/urandom' read failure");
695         close (fd);
696         return (err);
697       }
698   }
699
700   close (fd);
701   return (NULL);
702 }
703
704 static void
705 crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
706                             vnet_crypto_key_index_t idx)
707 {
708   ipsecmb_main_t *imbm = &ipsecmb_main;
709   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
710   ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
711   u32 i;
712   void *kd;
713
714   /** TODO: add linked alg support **/
715   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
716     return;
717
718   if (kop == VNET_CRYPTO_KEY_OP_DEL)
719     {
720       if (idx >= vec_len (imbm->key_data))
721         return;
722
723       if (imbm->key_data[idx] == 0)
724         return;
725
726       clib_mem_free_s (imbm->key_data[idx]);
727       imbm->key_data[idx] = 0;
728       return;
729     }
730
731   if (ad->data_size == 0)
732     return;
733
734   vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);
735
736   if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
737     {
738       clib_mem_free_s (imbm->key_data[idx]);
739     }
740
741   kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
742                                                      CLIB_CACHE_LINE_BYTES);
743
744   /* AES CBC key expansion */
745   if (ad->keyexp)
746     {
747       ad->keyexp (key->data, ((ipsecmb_aes_key_data_t *) kd)->enc_key_exp,
748                   ((ipsecmb_aes_key_data_t *) kd)->dec_key_exp);
749       return;
750     }
751
752   /* AES GCM */
753   if (ad->aes_gcm_pre)
754     {
755       ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
756       return;
757     }
758
759   /* HMAC */
760   if (ad->hash_one_block)
761     {
762       const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
763       u64 pad[block_qw], key_hash[block_qw];
764
765       clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
766       if (vec_len (key->data) <= ad->block_size)
767         clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
768       else
769         ad->hash_fn (key->data, vec_len (key->data), key_hash);
770
771       for (i = 0; i < block_qw; i++)
772         pad[i] = key_hash[i] ^ 0x3636363636363636;
773       ad->hash_one_block (pad, kd);
774
775       for (i = 0; i < block_qw; i++)
776         pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
777       ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));
778
779       return;
780     }
781 }
782
783 static clib_error_t *
784 crypto_ipsecmb_init (vlib_main_t * vm)
785 {
786   ipsecmb_main_t *imbm = &ipsecmb_main;
787   ipsecmb_alg_data_t *ad;
788   ipsecmb_per_thread_data_t *ptd;
789   vlib_thread_main_t *tm = vlib_get_thread_main ();
790   clib_error_t *error;
791   MB_MGR *m = 0;
792   u32 eidx;
793   u8 *name;
794   const u32 burst_jobs_sz = sizeof (JOB_AES_HMAC) * IMB_MAX_BURST_SIZE;
795
796   if (!clib_cpu_supports_aes ())
797     return 0;
798
799   /*
800    * A priority that is better than OpenSSL but worse than VPP natvie
801    */
802   name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
803                  IMB_VERSION_STR, 0);
804   eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
805
806   vec_validate_aligned (imbm->per_thread_data, tm->n_vlib_mains - 1,
807                         CLIB_CACHE_LINE_BYTES);
808
809   /* *INDENT-OFF* */
810   vec_foreach (ptd, imbm->per_thread_data)
811     {
812         ptd->mgr = alloc_mb_mgr (0);
813         memset (ptd->burst_jobs, 0, burst_jobs_sz);
814
815         if (clib_cpu_supports_avx512f ())
816           init_mb_mgr_avx512 (ptd->mgr);
817         else if (clib_cpu_supports_avx2 ())
818           init_mb_mgr_avx2 (ptd->mgr);
819         else
820           init_mb_mgr_sse (ptd->mgr);
821
822         if (ptd == imbm->per_thread_data)
823           m = ptd->mgr;
824     }
825   /* *INDENT-ON* */
826
827   if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
828     return (error);
829
830 #define _(a, b, c, d, e, f)                                              \
831   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
832                                     ipsecmb_ops_hmac_##a);               \
833   ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a;                        \
834   ad->block_size = d;                                                    \
835   ad->data_size = e * 2;                                                 \
836   ad->hash_one_block = m-> c##_one_block;                                \
837   ad->hash_fn = m-> c;                                                   \
838
839   foreach_ipsecmb_hmac_op;
840 #undef _
841 #define _(a, b, c)                                                            \
842   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,       \
843                                     ipsecmb_ops_cipher_enc_##a);              \
844   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,       \
845                                     ipsecmb_ops_cipher_dec_##a);              \
846   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                                  \
847   ad->data_size = sizeof (ipsecmb_aes_key_data_t);                            \
848   ad->keyexp = m->keyexp_##b;
849
850   foreach_ipsecmb_cipher_op;
851 #undef _
852 #define _(a, b)                                                         \
853   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
854                                     ipsecmb_ops_gcm_cipher_enc_##a);    \
855   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
856                                     ipsecmb_ops_gcm_cipher_dec_##a);    \
857   vnet_crypto_register_chained_ops_handler                              \
858       (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,                              \
859        ipsecmb_ops_gcm_cipher_enc_##a##_chained);                       \
860   vnet_crypto_register_chained_ops_handler                              \
861       (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,                              \
862        ipsecmb_ops_gcm_cipher_dec_##a##_chained);                       \
863   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                            \
864   ad->data_size = sizeof (struct gcm_key_data);                         \
865   ad->aes_gcm_pre = m->gcm##b##_pre;                                    \
866
867   foreach_ipsecmb_gcm_cipher_op;
868 #undef _
869
870 #ifdef HAVE_IPSECMB_CHACHA_POLY
871   vnet_crypto_register_ops_handler (vm, eidx,
872                                     VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
873                                     ipsecmb_ops_chacha_poly_enc);
874   vnet_crypto_register_ops_handler (vm, eidx,
875                                     VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
876                                     ipsecmb_ops_chacha_poly_dec);
877   vnet_crypto_register_chained_ops_handler (
878     vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
879     ipsec_mb_ops_chacha_poly_enc_chained);
880   vnet_crypto_register_chained_ops_handler (
881     vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
882     ipsec_mb_ops_chacha_poly_dec_chained);
883   ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305;
884   ad->data_size = 0;
885 #endif
886
887   vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
888   return (NULL);
889 }
890
891 /* *INDENT-OFF* */
892 VLIB_INIT_FUNCTION (crypto_ipsecmb_init) =
893 {
894   .runs_after = VLIB_INITS ("vnet_crypto_init"),
895 };
896 /* *INDENT-ON* */
897
898 /* *INDENT-OFF* */
899 VLIB_PLUGIN_REGISTER () =
900 {
901   .version = VPP_BUILD_VER,
902   .description = "Intel IPSEC Multi-buffer Crypto Engine",
903 };
904 /* *INDENT-ON* */
905
906 /*
907  * fd.io coding-style-patch-verification: ON
908  *
909  * Local Variables:
910  * eval: (c-set-style "gnu")
911  * End:
912  */