build: Allow ipsec-mb plugin to build with libipsec_mb 0.55
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
1 /*
2  * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3  *
4  * Copyright (c) 2019 Cisco Systemss
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <fcntl.h>
19
20 #include <intel-ipsec-mb.h>
21
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
27
28 #define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE
29 #define EXPANDED_KEY_N_BYTES (16 * 15)
30
31 typedef struct
32 {
33   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
34   MB_MGR *mgr;
35   __m128i cbc_iv;
36 } ipsecmb_per_thread_data_t;
37
38 typedef struct
39 {
40   u16 data_size;
41   u8 block_size;
42   aes_gcm_pre_t aes_gcm_pre;
43   keyexp_t keyexp;
44   hash_one_block_t hash_one_block;
45   hash_fn_t hash_fn;
46 } ipsecmb_alg_data_t;
47
48 typedef struct ipsecmb_main_t_
49 {
50   ipsecmb_per_thread_data_t *per_thread_data;
51   ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
52   void **key_data;
53 } ipsecmb_main_t;
54
55 typedef struct
56 {
57   u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
58   u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
59 } ipsecmb_aes_key_data_t;
60
61 static ipsecmb_main_t ipsecmb_main = { };
62
63 /*
64  * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
65  */
66 #define foreach_ipsecmb_hmac_op                                \
67   _(SHA1,   SHA1,    sha1,   64,  20, 20)                      \
68   _(SHA224, SHA_224, sha224, 64,  32, 28)                      \
69   _(SHA256, SHA_256, sha256, 64,  32, 32)                      \
70   _(SHA384, SHA_384, sha384, 128, 64, 48)                      \
71   _(SHA512, SHA_512, sha512, 128, 64, 64)
72
73 /*
74  * (Alg, key-len-bits, JOB_CIPHER_MODE)
75  */
76 #define foreach_ipsecmb_cipher_op                                             \
77   _ (AES_128_CBC, 128, CBC)                                                   \
78   _ (AES_192_CBC, 192, CBC)                                                   \
79   _ (AES_256_CBC, 256, CBC)                                                   \
80   _ (AES_128_CTR, 128, CNTR)                                                  \
81   _ (AES_192_CTR, 192, CNTR)                                                  \
82   _ (AES_256_CTR, 256, CNTR)
83
84 /*
85  * (Alg, key-len-bytes, iv-len-bytes)
86  */
87 #define foreach_ipsecmb_gcm_cipher_op                          \
88   _(AES_128_GCM, 128)                                          \
89   _(AES_192_GCM, 192)                                          \
90   _(AES_256_GCM, 256)
91
92 static_always_inline vnet_crypto_op_status_t
93 ipsecmb_status_job (JOB_STS status)
94 {
95   switch (status)
96     {
97     case STS_COMPLETED:
98       return VNET_CRYPTO_OP_STATUS_COMPLETED;
99     case STS_BEING_PROCESSED:
100     case STS_COMPLETED_AES:
101     case STS_COMPLETED_HMAC:
102       return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS;
103     case STS_INVALID_ARGS:
104     case STS_INTERNAL_ERROR:
105     case STS_ERROR:
106       return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
107     }
108   ASSERT (0);
109   return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
110 }
111
112 always_inline void
113 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
114 {
115   vnet_crypto_op_t *op = job->user_data;
116   u32 len = op->digest_len ? op->digest_len : digest_size;
117
118   if (PREDICT_FALSE (STS_COMPLETED != job->status))
119     {
120       op->status = ipsecmb_status_job (job->status);
121       *n_fail = *n_fail + 1;
122       return;
123     }
124
125   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
126     {
127       if ((memcmp (op->digest, job->auth_tag_output, len)))
128         {
129           *n_fail = *n_fail + 1;
130           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
131           return;
132         }
133     }
134   else if (len == digest_size)
135     clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
136   else
137     clib_memcpy_fast (op->digest, job->auth_tag_output, len);
138
139   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
140 }
141
142 static_always_inline u32
143 ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
144                          u32 n_ops, u32 block_size, u32 hash_size,
145                          u32 digest_size, JOB_HASH_ALG alg)
146 {
147   ipsecmb_main_t *imbm = &ipsecmb_main;
148   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
149                                                      vm->thread_index);
150   JOB_AES_HMAC *job;
151   u32 i, n_fail = 0;
152   u8 scratch[n_ops][digest_size];
153
154   /*
155    * queue all the jobs first ...
156    */
157   for (i = 0; i < n_ops; i++)
158     {
159       vnet_crypto_op_t *op = ops[i];
160       u8 *kd = (u8 *) imbm->key_data[op->key_index];
161
162       job = IMB_GET_NEXT_JOB (ptd->mgr);
163
164       job->src = op->src;
165       job->hash_start_src_offset_in_bytes = 0;
166       job->msg_len_to_hash_in_bytes = op->len;
167       job->hash_alg = alg;
168       job->auth_tag_output_len_in_bytes = digest_size;
169       job->auth_tag_output = scratch[i];
170
171       job->cipher_mode = NULL_CIPHER;
172       job->cipher_direction = DECRYPT;
173       job->chain_order = HASH_CIPHER;
174
175       job->u.HMAC._hashed_auth_key_xor_ipad = kd;
176       job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
177       job->user_data = op;
178
179       job = IMB_SUBMIT_JOB (ptd->mgr);
180
181       if (job)
182         ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
183     }
184
185   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
186     ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
187
188   return n_ops - n_fail;
189 }
190
191 #define _(a, b, c, d, e, f)                                             \
192 static_always_inline u32                                                \
193 ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
194                       vnet_crypto_op_t * ops[],                         \
195                       u32 n_ops)                                        \
196 { return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); }        \
197
198 foreach_ipsecmb_hmac_op;
199 #undef _
200
201 always_inline void
202 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
203 {
204   vnet_crypto_op_t *op = job->user_data;
205
206   if (PREDICT_FALSE (STS_COMPLETED != job->status))
207     {
208       op->status = ipsecmb_status_job (job->status);
209       *n_fail = *n_fail + 1;
210     }
211   else
212     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
213 }
214
215 static_always_inline u32
216 ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
217                                u32 n_ops, u32 key_len,
218                                JOB_CIPHER_DIRECTION direction,
219                                JOB_CIPHER_MODE cipher_mode)
220 {
221   ipsecmb_main_t *imbm = &ipsecmb_main;
222   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
223                                                      vm->thread_index);
224   JOB_AES_HMAC *job;
225   u32 i, n_fail = 0;
226
227   for (i = 0; i < n_ops; i++)
228     {
229       ipsecmb_aes_key_data_t *kd;
230       vnet_crypto_op_t *op = ops[i];
231       kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
232       __m128i iv;
233
234       job = IMB_GET_NEXT_JOB (ptd->mgr);
235
236       job->src = op->src;
237       job->dst = op->dst;
238       job->msg_len_to_cipher_in_bytes = op->len;
239       job->cipher_start_src_offset_in_bytes = 0;
240
241       job->hash_alg = NULL_HASH;
242       job->cipher_mode = cipher_mode;
243       job->cipher_direction = direction;
244       job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
245
246       if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
247         {
248           iv = ptd->cbc_iv;
249           _mm_storeu_si128 ((__m128i *) op->iv, iv);
250           ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
251         }
252
253       job->aes_key_len_in_bytes = key_len / 8;
254       job->aes_enc_key_expanded = kd->enc_key_exp;
255       job->aes_dec_key_expanded = kd->dec_key_exp;
256       job->iv = op->iv;
257       job->iv_len_in_bytes = AES_BLOCK_SIZE;
258
259       job->user_data = op;
260
261       job = IMB_SUBMIT_JOB (ptd->mgr);
262
263       if (job)
264         ipsecmb_retire_cipher_job (job, &n_fail);
265     }
266
267   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
268     ipsecmb_retire_cipher_job (job, &n_fail);
269
270   return n_ops - n_fail;
271 }
272
273 #define _(a, b, c)                                                            \
274   static_always_inline u32 ipsecmb_ops_cipher_enc_##a (                       \
275     vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
276   {                                                                           \
277     return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, ENCRYPT, c);     \
278   }                                                                           \
279                                                                               \
280   static_always_inline u32 ipsecmb_ops_cipher_dec_##a (                       \
281     vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
282   {                                                                           \
283     return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, DECRYPT, c);     \
284   }
285
286 foreach_ipsecmb_cipher_op;
287 #undef _
288
289 #define _(a, b)                                                              \
290 static_always_inline u32                                                     \
291 ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm,                  \
292     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
293 {                                                                            \
294   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
295   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
296                                                      vm->thread_index);      \
297   MB_MGR *m = ptd->mgr;                                                      \
298   vnet_crypto_op_chunk_t *chp;                                               \
299   u32 i, j;                                                                  \
300                                                                              \
301   for (i = 0; i < n_ops; i++)                                                \
302     {                                                                        \
303       struct gcm_key_data *kd;                                               \
304       struct gcm_context_data ctx;                                           \
305       vnet_crypto_op_t *op = ops[i];                                         \
306                                                                              \
307       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
308       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
309       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
310       chp = chunks + op->chunk_index;                                        \
311       for (j = 0; j < op->n_chunks; j++)                                     \
312         {                                                                    \
313           IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
314                                        chp->len);                            \
315           chp += 1;                                                          \
316         }                                                                    \
317       IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len);      \
318                                                                              \
319       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
320     }                                                                        \
321                                                                              \
322   return n_ops;                                                              \
323 }                                                                            \
324                                                                              \
325 static_always_inline u32                                                     \
326 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
327                                 u32 n_ops)                                   \
328 {                                                                            \
329   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
330   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
331                                                      vm->thread_index);      \
332   MB_MGR *m = ptd->mgr;                                                      \
333   u32 i;                                                                     \
334                                                                              \
335   for (i = 0; i < n_ops; i++)                                                \
336     {                                                                        \
337       struct gcm_key_data *kd;                                               \
338       struct gcm_context_data ctx;                                           \
339       vnet_crypto_op_t *op = ops[i];                                         \
340                                                                              \
341       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
342       IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
343                             op->aad, op->aad_len, op->tag, op->tag_len);     \
344                                                                              \
345       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
346     }                                                                        \
347                                                                              \
348   return n_ops;                                                              \
349 }                                                                            \
350                                                                              \
351 static_always_inline u32                                                     \
352 ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm,                  \
353     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
354 {                                                                            \
355   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
356   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
357                                                      vm->thread_index);      \
358   MB_MGR *m = ptd->mgr;                                                      \
359   vnet_crypto_op_chunk_t *chp;                                               \
360   u32 i, j, n_failed = 0;                                                    \
361                                                                              \
362   for (i = 0; i < n_ops; i++)                                                \
363     {                                                                        \
364       struct gcm_key_data *kd;                                               \
365       struct gcm_context_data ctx;                                           \
366       vnet_crypto_op_t *op = ops[i];                                         \
367       u8 scratch[64];                                                        \
368                                                                              \
369       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
370       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
371       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
372       chp = chunks + op->chunk_index;                                        \
373       for (j = 0; j < op->n_chunks; j++)                                     \
374         {                                                                    \
375           IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
376                                        chp->len);                            \
377           chp += 1;                                                          \
378         }                                                                    \
379       IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len);      \
380                                                                              \
381       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
382         {                                                                    \
383           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
384           n_failed++;                                                        \
385         }                                                                    \
386       else                                                                   \
387         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
388     }                                                                        \
389                                                                              \
390   return n_ops - n_failed;                                                   \
391 }                                                                            \
392                                                                              \
393 static_always_inline u32                                                     \
394 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
395                                  u32 n_ops)                                  \
396 {                                                                            \
397   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
398   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
399                                                      vm->thread_index);      \
400   MB_MGR *m = ptd->mgr;                                                      \
401   u32 i, n_failed = 0;                                                       \
402                                                                              \
403   for (i = 0; i < n_ops; i++)                                                \
404     {                                                                        \
405       struct gcm_key_data *kd;                                               \
406       struct gcm_context_data ctx;                                           \
407       vnet_crypto_op_t *op = ops[i];                                         \
408       u8 scratch[64];                                                        \
409                                                                              \
410       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
411       IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
412                             op->aad, op->aad_len, scratch, op->tag_len);     \
413                                                                              \
414       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
415         {                                                                    \
416           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
417           n_failed++;                                                        \
418         }                                                                    \
419       else                                                                   \
420         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
421     }                                                                        \
422                                                                              \
423   return n_ops - n_failed;                                                   \
424 }
425
426 foreach_ipsecmb_gcm_cipher_op;
427 #undef _
428
429 #ifdef HAVE_IPSECMB_CHACHA_POLY
430 always_inline void
431 ipsecmb_retire_aead_job (JOB_AES_HMAC *job, u32 *n_fail)
432 {
433   vnet_crypto_op_t *op = job->user_data;
434   u32 len = op->tag_len;
435
436   if (PREDICT_FALSE (STS_COMPLETED != job->status))
437     {
438       op->status = ipsecmb_status_job (job->status);
439       *n_fail = *n_fail + 1;
440       return;
441     }
442
443   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
444     {
445       if (memcmp (op->tag, job->auth_tag_output, len))
446         {
447           *n_fail = *n_fail + 1;
448           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
449           return;
450         }
451     }
452
453   clib_memcpy_fast (op->tag, job->auth_tag_output, len);
454
455   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
456 }
457
458 static_always_inline u32
459 ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
460                          IMB_CIPHER_DIRECTION dir)
461 {
462   ipsecmb_main_t *imbm = &ipsecmb_main;
463   ipsecmb_per_thread_data_t *ptd =
464     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
465   struct IMB_JOB *job;
466   MB_MGR *m = ptd->mgr;
467   u32 i, n_fail = 0, last_key_index = ~0;
468   u8 scratch[VLIB_FRAME_SIZE][16];
469   u8 iv_data[16];
470   u8 *key = 0;
471
472   for (i = 0; i < n_ops; i++)
473     {
474       vnet_crypto_op_t *op = ops[i];
475       __m128i iv;
476
477       job = IMB_GET_NEXT_JOB (m);
478       if (last_key_index != op->key_index)
479         {
480           vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
481
482           key = kd->data;
483           last_key_index = op->key_index;
484         }
485
486       job->cipher_direction = dir;
487       job->chain_order = IMB_ORDER_HASH_CIPHER;
488       job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
489       job->hash_alg = IMB_AUTH_CHACHA20_POLY1305;
490       job->enc_keys = job->dec_keys = key;
491       job->key_len_in_bytes = 32;
492
493       job->u.CHACHA20_POLY1305.aad = op->aad;
494       job->u.CHACHA20_POLY1305.aad_len_in_bytes = op->aad_len;
495       job->src = op->src;
496       job->dst = op->dst;
497
498       if ((dir == IMB_DIR_ENCRYPT) &&
499           (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
500         {
501           iv = ptd->cbc_iv;
502           _mm_storeu_si128 ((__m128i *) iv_data, iv);
503           clib_memcpy_fast (op->iv, iv_data, 12);
504           ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
505         }
506
507       job->iv = op->iv;
508       job->iv_len_in_bytes = 12;
509       job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes =
510         op->len;
511       job->cipher_start_src_offset_in_bytes =
512         job->hash_start_src_offset_in_bytes = 0;
513
514       job->auth_tag_output = scratch[i];
515       job->auth_tag_output_len_in_bytes = 16;
516
517       job->user_data = op;
518
519       job = IMB_SUBMIT_JOB_NOCHECK (ptd->mgr);
520       if (job)
521         ipsecmb_retire_aead_job (job, &n_fail);
522
523       op++;
524     }
525
526   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
527     ipsecmb_retire_aead_job (job, &n_fail);
528
529   return n_ops - n_fail;
530 }
531
532 static_always_inline u32
533 ipsecmb_ops_chacha_poly_enc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
534                              u32 n_ops)
535 {
536   return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_ENCRYPT);
537 }
538
539 static_always_inline u32
540 ipsecmb_ops_chacha_poly_dec (vlib_main_t *vm, vnet_crypto_op_t *ops[],
541                              u32 n_ops)
542 {
543   return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_DECRYPT);
544 }
545
546 static_always_inline u32
547 ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
548                                  vnet_crypto_op_chunk_t *chunks, u32 n_ops,
549                                  IMB_CIPHER_DIRECTION dir)
550 {
551   ipsecmb_main_t *imbm = &ipsecmb_main;
552   ipsecmb_per_thread_data_t *ptd =
553     vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
554   MB_MGR *m = ptd->mgr;
555   u32 i, n_fail = 0, last_key_index = ~0;
556   u8 iv_data[16];
557   u8 *key = 0;
558
559   if (dir == IMB_DIR_ENCRYPT)
560     {
561       for (i = 0; i < n_ops; i++)
562         {
563           vnet_crypto_op_t *op = ops[i];
564           struct chacha20_poly1305_context_data ctx;
565           vnet_crypto_op_chunk_t *chp;
566           __m128i iv;
567           u32 j;
568
569           ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
570
571           if (last_key_index != op->key_index)
572             {
573               vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
574
575               key = kd->data;
576               last_key_index = op->key_index;
577             }
578
579           if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
580             {
581               iv = ptd->cbc_iv;
582               _mm_storeu_si128 ((__m128i *) iv_data, iv);
583               clib_memcpy_fast (op->iv, iv_data, 12);
584               ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
585             }
586
587           IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
588                                       op->aad_len);
589
590           chp = chunks + op->chunk_index;
591           for (j = 0; j < op->n_chunks; j++)
592             {
593               IMB_CHACHA20_POLY1305_ENC_UPDATE (m, key, &ctx, chp->dst,
594                                                 chp->src, chp->len);
595               chp += 1;
596             }
597
598           IMB_CHACHA20_POLY1305_ENC_FINALIZE (m, &ctx, op->tag, op->tag_len);
599
600           op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
601         }
602     }
603   else /* dir == IMB_DIR_DECRYPT */
604     {
605       for (i = 0; i < n_ops; i++)
606         {
607           vnet_crypto_op_t *op = ops[i];
608           struct chacha20_poly1305_context_data ctx;
609           vnet_crypto_op_chunk_t *chp;
610           u8 scratch[16];
611           u32 j;
612
613           ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
614
615           if (last_key_index != op->key_index)
616             {
617               vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
618
619               key = kd->data;
620               last_key_index = op->key_index;
621             }
622
623           IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
624                                       op->aad_len);
625
626           chp = chunks + op->chunk_index;
627           for (j = 0; j < op->n_chunks; j++)
628             {
629               IMB_CHACHA20_POLY1305_DEC_UPDATE (m, key, &ctx, chp->dst,
630                                                 chp->src, chp->len);
631               chp += 1;
632             }
633
634           IMB_CHACHA20_POLY1305_DEC_FINALIZE (m, &ctx, scratch, op->tag_len);
635
636           if (memcmp (op->tag, scratch, op->tag_len))
637             {
638               n_fail = n_fail + 1;
639               op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
640             }
641           else
642             op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
643         }
644     }
645
646   return n_ops - n_fail;
647 }
648
649 static_always_inline u32
650 ipsec_mb_ops_chacha_poly_enc_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
651                                       vnet_crypto_op_chunk_t *chunks,
652                                       u32 n_ops)
653 {
654   return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
655                                           IMB_DIR_ENCRYPT);
656 }
657
658 static_always_inline u32
659 ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
660                                       vnet_crypto_op_chunk_t *chunks,
661                                       u32 n_ops)
662 {
663   return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
664                                           IMB_DIR_DECRYPT);
665 }
666 #endif
667
668 clib_error_t *
669 crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
670 {
671   ipsecmb_per_thread_data_t *ptd;
672   clib_error_t *err = 0;
673   int fd;
674
675   if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
676     return clib_error_return_unix (0, "failed to open '/dev/urandom'");
677
678   vec_foreach (ptd, imbm->per_thread_data)
679   {
680     if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
681       {
682         err = clib_error_return_unix (0, "'/dev/urandom' read failure");
683         close (fd);
684         return (err);
685       }
686   }
687
688   close (fd);
689   return (NULL);
690 }
691
692 static void
693 crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
694                             vnet_crypto_key_index_t idx)
695 {
696   ipsecmb_main_t *imbm = &ipsecmb_main;
697   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
698   ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
699   u32 i;
700   void *kd;
701
702   /** TODO: add linked alg support **/
703   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
704     return;
705
706   if (kop == VNET_CRYPTO_KEY_OP_DEL)
707     {
708       if (idx >= vec_len (imbm->key_data))
709         return;
710
711       if (imbm->key_data[idx] == 0)
712         return;
713
714       clib_mem_free_s (imbm->key_data[idx]);
715       imbm->key_data[idx] = 0;
716       return;
717     }
718
719   if (ad->data_size == 0)
720     return;
721
722   vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);
723
724   if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
725     {
726       clib_mem_free_s (imbm->key_data[idx]);
727     }
728
729   kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
730                                                      CLIB_CACHE_LINE_BYTES);
731
732   /* AES CBC key expansion */
733   if (ad->keyexp)
734     {
735       ad->keyexp (key->data, ((ipsecmb_aes_key_data_t *) kd)->enc_key_exp,
736                   ((ipsecmb_aes_key_data_t *) kd)->dec_key_exp);
737       return;
738     }
739
740   /* AES GCM */
741   if (ad->aes_gcm_pre)
742     {
743       ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
744       return;
745     }
746
747   /* HMAC */
748   if (ad->hash_one_block)
749     {
750       const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
751       u64 pad[block_qw], key_hash[block_qw];
752
753       clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
754       if (vec_len (key->data) <= ad->block_size)
755         clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
756       else
757         ad->hash_fn (key->data, vec_len (key->data), key_hash);
758
759       for (i = 0; i < block_qw; i++)
760         pad[i] = key_hash[i] ^ 0x3636363636363636;
761       ad->hash_one_block (pad, kd);
762
763       for (i = 0; i < block_qw; i++)
764         pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
765       ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));
766
767       return;
768     }
769 }
770
771 static clib_error_t *
772 crypto_ipsecmb_init (vlib_main_t * vm)
773 {
774   ipsecmb_main_t *imbm = &ipsecmb_main;
775   ipsecmb_alg_data_t *ad;
776   ipsecmb_per_thread_data_t *ptd;
777   vlib_thread_main_t *tm = vlib_get_thread_main ();
778   clib_error_t *error;
779   MB_MGR *m = 0;
780   u32 eidx;
781   u8 *name;
782
783   if (!clib_cpu_supports_aes ())
784     return 0;
785
786   /*
787    * A priority that is better than OpenSSL but worse than VPP natvie
788    */
789   name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
790                  IMB_VERSION_STR, 0);
791   eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
792
793   vec_validate_aligned (imbm->per_thread_data, tm->n_vlib_mains - 1,
794                         CLIB_CACHE_LINE_BYTES);
795
796   /* *INDENT-OFF* */
797   vec_foreach (ptd, imbm->per_thread_data)
798     {
799         ptd->mgr = alloc_mb_mgr (0);
800         if (clib_cpu_supports_avx512f ())
801           init_mb_mgr_avx512 (ptd->mgr);
802         else if (clib_cpu_supports_avx2 ())
803           init_mb_mgr_avx2 (ptd->mgr);
804         else
805           init_mb_mgr_sse (ptd->mgr);
806
807         if (ptd == imbm->per_thread_data)
808           m = ptd->mgr;
809     }
810   /* *INDENT-ON* */
811
812   if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
813     return (error);
814
815 #define _(a, b, c, d, e, f)                                              \
816   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
817                                     ipsecmb_ops_hmac_##a);               \
818   ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a;                        \
819   ad->block_size = d;                                                    \
820   ad->data_size = e * 2;                                                 \
821   ad->hash_one_block = m-> c##_one_block;                                \
822   ad->hash_fn = m-> c;                                                   \
823
824   foreach_ipsecmb_hmac_op;
825 #undef _
826 #define _(a, b, c)                                                            \
827   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,       \
828                                     ipsecmb_ops_cipher_enc_##a);              \
829   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,       \
830                                     ipsecmb_ops_cipher_dec_##a);              \
831   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                                  \
832   ad->data_size = sizeof (ipsecmb_aes_key_data_t);                            \
833   ad->keyexp = m->keyexp_##b;
834
835   foreach_ipsecmb_cipher_op;
836 #undef _
837 #define _(a, b)                                                         \
838   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
839                                     ipsecmb_ops_gcm_cipher_enc_##a);    \
840   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
841                                     ipsecmb_ops_gcm_cipher_dec_##a);    \
842   vnet_crypto_register_chained_ops_handler                              \
843       (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,                              \
844        ipsecmb_ops_gcm_cipher_enc_##a##_chained);                       \
845   vnet_crypto_register_chained_ops_handler                              \
846       (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,                              \
847        ipsecmb_ops_gcm_cipher_dec_##a##_chained);                       \
848   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                            \
849   ad->data_size = sizeof (struct gcm_key_data);                         \
850   ad->aes_gcm_pre = m->gcm##b##_pre;                                    \
851
852   foreach_ipsecmb_gcm_cipher_op;
853 #undef _
854
855 #ifdef HAVE_IPSECMB_CHACHA_POLY
856   vnet_crypto_register_ops_handler (vm, eidx,
857                                     VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
858                                     ipsecmb_ops_chacha_poly_enc);
859   vnet_crypto_register_ops_handler (vm, eidx,
860                                     VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
861                                     ipsecmb_ops_chacha_poly_dec);
862   vnet_crypto_register_chained_ops_handler (
863     vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
864     ipsec_mb_ops_chacha_poly_enc_chained);
865   vnet_crypto_register_chained_ops_handler (
866     vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
867     ipsec_mb_ops_chacha_poly_dec_chained);
868   ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305;
869   ad->data_size = 0;
870 #endif
871
872   vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
873   return (NULL);
874 }
875
876 /* *INDENT-OFF* */
877 VLIB_INIT_FUNCTION (crypto_ipsecmb_init) =
878 {
879   .runs_after = VLIB_INITS ("vnet_crypto_init"),
880 };
881 /* *INDENT-ON* */
882
883 /* *INDENT-OFF* */
884 VLIB_PLUGIN_REGISTER () =
885 {
886   .version = VPP_BUILD_VER,
887   .description = "Intel IPSEC Multi-buffer Crypto Engine",
888 };
889 /* *INDENT-ON* */
890
891 /*
892  * fd.io coding-style-patch-verification: ON
893  *
894  * Local Variables:
895  * eval: (c-set-style "gnu")
896  * End:
897  */