crypto-ipsecmb: enable GCM
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
1 /*
2  * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3  *
4  * Copyright (c) 2019 Cisco Systemss
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <fcntl.h>
19
20 #include <intel-ipsec-mb.h>
21
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
27
28 typedef struct
29 {
30   MB_MGR *mgr;
31   __m128i cbc_iv;
32 } ipsecmb_per_thread_data_t;
33
34 typedef struct ipsecmb_main_t_
35 {
36   ipsecmb_per_thread_data_t *per_thread_data;
37 } ipsecmb_main_t;
38
39 /**
40  * AES GCM key=expansion VFT
41  */
42 typedef void (*ase_gcm_pre_t) (const void *key,
43                                struct gcm_key_data * key_data);
44
45 typedef struct ipsecmb_gcm_pre_vft_t_
46 {
47   ase_gcm_pre_t ase_gcm_pre_128;
48   ase_gcm_pre_t ase_gcm_pre_192;
49   ase_gcm_pre_t ase_gcm_pre_256;
50 } ipsecmb_gcm_pre_vft_t;
51
52 static ipsecmb_gcm_pre_vft_t ipsecmb_gcm_pre_vft;
53
54 #define INIT_IPSEC_MB_GCM_PRE(_arch)                                    \
55   ipsecmb_gcm_pre_vft.ase_gcm_pre_128 = aes_gcm_pre_128_##_arch;        \
56   ipsecmb_gcm_pre_vft.ase_gcm_pre_192 = aes_gcm_pre_192_##_arch;        \
57   ipsecmb_gcm_pre_vft.ase_gcm_pre_256 = aes_gcm_pre_256_##_arch;
58
59 static ipsecmb_main_t ipsecmb_main;
60
61 #define foreach_ipsecmb_hmac_op                                \
62   _(SHA1, SHA1, sha1)                                          \
63   _(SHA256, SHA_256, sha256)                                   \
64   _(SHA384, SHA_384, sha384)                                   \
65   _(SHA512, SHA_512, sha512)
66
67 /*
68  * (Alg, key-len-bits, key-len-bytes, iv-len-bytes)
69  */
70 #define foreach_ipsecmb_cbc_cipher_op                          \
71   _(AES_128_CBC, 128, 16, 16)                                  \
72   _(AES_192_CBC, 192, 24, 16)                                  \
73   _(AES_256_CBC, 256, 32, 16)
74
75 /*
76  * (Alg, key-len-bits, key-len-bytes, iv-len-bytes)
77  */
78 #define foreach_ipsecmb_gcm_cipher_op                          \
79   _(AES_128_GCM, 128, 16, 12)                                  \
80   _(AES_192_GCM, 192, 24, 12)                                  \
81   _(AES_256_GCM, 256, 32, 12)
82
83 always_inline void
84 hash_expand_keys (const MB_MGR * mgr,
85                   const u8 * key,
86                   u32 length,
87                   u8 block_size,
88                   u8 ipad[256], u8 opad[256], hash_one_block_t fn)
89 {
90   u8 buf[block_size];
91   int i = 0;
92
93   if (length > block_size)
94     {
95       return;
96     }
97
98   memset (buf, 0x36, sizeof (buf));
99   for (i = 0; i < length; i++)
100     {
101       buf[i] ^= key[i];
102     }
103   fn (buf, ipad);
104
105   memset (buf, 0x5c, sizeof (buf));
106
107   for (i = 0; i < length; i++)
108     {
109       buf[i] ^= key[i];
110     }
111   fn (buf, opad);
112 }
113
114 always_inline void
115 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail)
116 {
117   vnet_crypto_op_t *op = job->user_data;
118
119   if (STS_COMPLETED != job->status)
120     {
121       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
122       *n_fail = *n_fail + 1;
123     }
124   else
125     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
126
127   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
128     {
129       if ((memcmp (op->digest, job->auth_tag_output, op->digest_len)))
130         {
131           *n_fail = *n_fail + 1;
132           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
133         }
134     }
135   else
136     clib_memcpy_fast (op->digest, job->auth_tag_output, op->digest_len);
137 }
138
139 static_always_inline u32
140 ipsecmb_ops_hmac_inline (vlib_main_t * vm,
141                          const ipsecmb_per_thread_data_t * ptd,
142                          vnet_crypto_op_t * ops[],
143                          u32 n_ops,
144                          u32 block_size,
145                          hash_one_block_t fn, JOB_HASH_ALG alg)
146 {
147   JOB_AES_HMAC *job;
148   u32 i, n_fail = 0;
149   u8 scratch[n_ops][64];
150
151   /*
152    * queue all the jobs first ...
153    */
154   for (i = 0; i < n_ops; i++)
155     {
156       vnet_crypto_op_t *op = ops[i];
157       u8 ipad[256], opad[256];
158
159       hash_expand_keys (ptd->mgr, op->key, op->key_len,
160                         block_size, ipad, opad, fn);
161
162       job = IMB_GET_NEXT_JOB (ptd->mgr);
163
164       job->src = op->src;
165       job->hash_start_src_offset_in_bytes = 0;
166       job->msg_len_to_hash_in_bytes = op->len;
167       job->hash_alg = alg;
168       job->auth_tag_output_len_in_bytes = op->digest_len;
169       job->auth_tag_output = scratch[i];
170
171       job->cipher_mode = NULL_CIPHER;
172       job->cipher_direction = DECRYPT;
173       job->chain_order = HASH_CIPHER;
174
175       job->aes_key_len_in_bytes = op->key_len;
176
177       job->u.HMAC._hashed_auth_key_xor_ipad = ipad;
178       job->u.HMAC._hashed_auth_key_xor_opad = opad;
179       job->user_data = op;
180
181       job = IMB_SUBMIT_JOB (ptd->mgr);
182
183       if (job)
184         ipsecmb_retire_hmac_job (job, &n_fail);
185     }
186
187   /*
188    * .. then flush (i.e. complete) them
189    *  We will have queued enough to satisfy the 'multi' buffer
190    */
191   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
192     {
193       ipsecmb_retire_hmac_job (job, &n_fail);
194     }
195
196   return n_ops - n_fail;
197 }
198
199 #define _(a, b, c)                                                      \
200 static_always_inline u32                                                \
201 ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
202                       vnet_crypto_op_t * ops[],                         \
203                       u32 n_ops)                                        \
204 {                                                                       \
205   ipsecmb_per_thread_data_t *ptd;                                       \
206   ipsecmb_main_t *imbm;                                                 \
207                                                                         \
208   imbm = &ipsecmb_main;                                                 \
209   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
210                                                                         \
211   return ipsecmb_ops_hmac_inline (vm, ptd, ops, n_ops,                  \
212                                   b##_BLOCK_SIZE,                       \
213                                   ptd->mgr->c##_one_block,              \
214                                   b);                                   \
215   }
216 foreach_ipsecmb_hmac_op;
217 #undef _
218
219 #define EXPANDED_KEY_N_BYTES (16 * 15)
220
221 always_inline void
222 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
223 {
224   vnet_crypto_op_t *op = job->user_data;
225
226   if (STS_COMPLETED != job->status)
227     {
228       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
229       *n_fail = *n_fail + 1;
230     }
231   else
232     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
233 }
234
235 static_always_inline u32
236 ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm,
237                                ipsecmb_per_thread_data_t * ptd,
238                                vnet_crypto_op_t * ops[],
239                                u32 n_ops, u32 key_len, u32 iv_len,
240                                keyexp_t fn, JOB_CIPHER_DIRECTION direction)
241 {
242   JOB_AES_HMAC *job;
243   u32 i, n_fail = 0;
244
245   /*
246    * queue all the jobs first ...
247    */
248   for (i = 0; i < n_ops; i++)
249     {
250       u8 aes_enc_key_expanded[EXPANDED_KEY_N_BYTES];
251       u8 aes_dec_key_expanded[EXPANDED_KEY_N_BYTES];
252       vnet_crypto_op_t *op = ops[i];
253       __m128i iv;
254
255       fn (op->key, aes_enc_key_expanded, aes_dec_key_expanded);
256
257       job = IMB_GET_NEXT_JOB (ptd->mgr);
258
259       job->src = op->src;
260       job->dst = op->dst;
261       job->msg_len_to_cipher_in_bytes = op->len;
262       job->cipher_start_src_offset_in_bytes = 0;
263
264       job->hash_alg = NULL_HASH;
265       job->cipher_mode = CBC;
266       job->cipher_direction = direction;
267       job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
268
269       if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
270         {
271           iv = ptd->cbc_iv;
272           _mm_storeu_si128 ((__m128i *) op->iv, iv);
273           ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
274         }
275
276       job->aes_key_len_in_bytes = key_len;
277       job->aes_enc_key_expanded = aes_enc_key_expanded;
278       job->aes_dec_key_expanded = aes_dec_key_expanded;
279       job->iv = op->iv;
280       job->iv_len_in_bytes = iv_len;
281
282       job->user_data = op;
283
284       job = IMB_SUBMIT_JOB (ptd->mgr);
285
286       if (job)
287         ipsecmb_retire_cipher_job (job, &n_fail);
288     }
289
290   /*
291    * .. then flush (i.e. complete) them
292    *  We will have queued enough to satisfy the 'multi' buffer
293    */
294   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
295     {
296       ipsecmb_retire_cipher_job (job, &n_fail);
297     }
298
299   return n_ops - n_fail;
300 }
301
302 #define _(a, b, c, d)                                                   \
303 static_always_inline u32                                                \
304 ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm,                       \
305                                 vnet_crypto_op_t * ops[],               \
306                                 u32 n_ops)                              \
307 {                                                                       \
308   ipsecmb_per_thread_data_t *ptd;                                       \
309   ipsecmb_main_t *imbm;                                                 \
310                                                                         \
311   imbm = &ipsecmb_main;                                                 \
312   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
313                                                                         \
314   return ipsecmb_ops_cbc_cipher_inline (vm, ptd, ops, n_ops, c, d,      \
315                                         ptd->mgr->keyexp_##b,           \
316                                         ENCRYPT);                       \
317   }
318 foreach_ipsecmb_cbc_cipher_op;
319 #undef _
320
321 #define _(a, b, c, d)                                                   \
322 static_always_inline u32                                                \
323 ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm,                       \
324                                 vnet_crypto_op_t * ops[],               \
325                                 u32 n_ops)                              \
326 {                                                                       \
327   ipsecmb_per_thread_data_t *ptd;                                       \
328   ipsecmb_main_t *imbm;                                                 \
329                                                                         \
330   imbm = &ipsecmb_main;                                                 \
331   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
332                                                                         \
333   return ipsecmb_ops_cbc_cipher_inline (vm, ptd, ops, n_ops, c, d,      \
334                                         ptd->mgr->keyexp_##b,           \
335                                         DECRYPT);                       \
336   }
337 foreach_ipsecmb_cbc_cipher_op;
338 #undef _
339
340 always_inline void
341 ipsecmb_retire_gcm_cipher_job (JOB_AES_HMAC * job,
342                                u32 * n_fail, JOB_CIPHER_DIRECTION direction)
343 {
344   vnet_crypto_op_t *op = job->user_data;
345
346   if (STS_COMPLETED != job->status)
347     {
348       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
349       *n_fail = *n_fail + 1;
350     }
351   else
352     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
353
354   if (DECRYPT == direction)
355     {
356       if ((memcmp (op->tag, job->auth_tag_output, op->tag_len)))
357         {
358           *n_fail = *n_fail + 1;
359           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
360         }
361     }
362 }
363
364 static_always_inline u32
365 ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm,
366                                ipsecmb_per_thread_data_t * ptd,
367                                vnet_crypto_op_t * ops[],
368                                u32 n_ops, u32 key_len, u32 iv_len,
369                                ase_gcm_pre_t fn,
370                                JOB_CIPHER_DIRECTION direction)
371 {
372   JOB_AES_HMAC *job;
373   u32 i, n_fail = 0;
374   u8 scratch[n_ops][64];
375
376   /*
377    * queue all the jobs first ...
378    */
379   for (i = 0; i < n_ops; i++)
380     {
381       struct gcm_key_data key_data;
382       vnet_crypto_op_t *op = ops[i];
383       u32 nonce[3];
384       __m128i iv;
385
386       fn (op->key, &key_data);
387
388       job = IMB_GET_NEXT_JOB (ptd->mgr);
389
390       job->src = op->src;
391       job->dst = op->dst;
392       job->msg_len_to_cipher_in_bytes = op->len;
393       job->cipher_start_src_offset_in_bytes = 0;
394
395       job->hash_alg = AES_GMAC;
396       job->cipher_mode = GCM;
397       job->cipher_direction = direction;
398       job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
399
400       if (direction == ENCRYPT)
401         {
402           if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
403             {
404               iv = ptd->cbc_iv;
405               // only use 8 bytes of the IV
406               clib_memcpy_fast (op->iv, &iv, 8);
407               ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
408             }
409           nonce[0] = op->salt;
410           clib_memcpy_fast (nonce + 1, op->iv, 8);
411           job->iv = (u8 *) nonce;
412         }
413       else
414         {
415           nonce[0] = op->salt;
416           clib_memcpy_fast (nonce + 1, op->iv, 8);
417           job->iv = op->iv;
418         }
419
420       job->aes_key_len_in_bytes = key_len;
421       job->aes_enc_key_expanded = &key_data;
422       job->aes_dec_key_expanded = &key_data;
423       job->iv_len_in_bytes = iv_len;
424
425       job->u.GCM.aad = op->aad;
426       job->u.GCM.aad_len_in_bytes = op->aad_len;
427       job->auth_tag_output_len_in_bytes = op->tag_len;
428       if (DECRYPT == direction)
429         job->auth_tag_output = scratch[i];
430       else
431         job->auth_tag_output = op->tag;
432       job->user_data = op;
433
434       job = IMB_SUBMIT_JOB (ptd->mgr);
435
436       if (job)
437         ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
438     }
439
440   /*
441    * .. then flush (i.e. complete) them
442    *  We will have queued enough to satisfy the 'multi' buffer
443    */
444   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
445     {
446       ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
447     }
448
449   return n_ops - n_fail;
450 }
451
452 #define _(a, b, c, d)                                                        \
453 static_always_inline u32                                                     \
454 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm,                            \
455                                 vnet_crypto_op_t * ops[],                    \
456                                 u32 n_ops)                                   \
457 {                                                                            \
458   ipsecmb_per_thread_data_t *ptd;                                            \
459   ipsecmb_main_t *imbm;                                                      \
460                                                                              \
461   imbm = &ipsecmb_main;                                                      \
462   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);          \
463                                                                              \
464   return ipsecmb_ops_gcm_cipher_inline (vm, ptd, ops, n_ops, c, d,           \
465                                         ipsecmb_gcm_pre_vft.ase_gcm_pre_##b, \
466                                         ENCRYPT);                            \
467   }
468 foreach_ipsecmb_gcm_cipher_op;
469 #undef _
470
471 #define _(a, b, c, d)                                                        \
472 static_always_inline u32                                                     \
473 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm,                            \
474                                 vnet_crypto_op_t * ops[],                    \
475                                 u32 n_ops)                                   \
476 {                                                                            \
477   ipsecmb_per_thread_data_t *ptd;                                            \
478   ipsecmb_main_t *imbm;                                                      \
479                                                                              \
480   imbm = &ipsecmb_main;                                                      \
481   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);          \
482                                                                              \
483   return ipsecmb_ops_gcm_cipher_inline (vm, ptd, ops, n_ops, c, d,           \
484                                         ipsecmb_gcm_pre_vft.ase_gcm_pre_##b, \
485                                         DECRYPT);                            \
486   }
487 foreach_ipsecmb_gcm_cipher_op;
488 #undef _
489
490 clib_error_t *
491 crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
492 {
493   ipsecmb_per_thread_data_t *ptd;
494   clib_error_t *err = 0;
495   int fd;
496
497   if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
498     return clib_error_return_unix (0, "failed to open '/dev/urandom'");
499
500   vec_foreach (ptd, imbm->per_thread_data)
501   {
502     if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
503       {
504         err = clib_error_return_unix (0, "'/dev/urandom' read failure");
505         close (fd);
506         return (err);
507       }
508   }
509
510   close (fd);
511   return (NULL);
512 }
513
514 static clib_error_t *
515 crypto_ipsecmb_init (vlib_main_t * vm)
516 {
517   ipsecmb_main_t *imbm = &ipsecmb_main;
518   ipsecmb_per_thread_data_t *ptd;
519   vlib_thread_main_t *tm = vlib_get_thread_main ();
520   clib_error_t *error;
521   u32 eidx;
522
523   if ((error = vlib_call_init_function (vm, vnet_crypto_init)))
524     return error;
525
526   /*
527    * A priority that is better than OpenSSL but worse than VPP natvie
528    */
529   eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80,
530                                       "Intel IPSEC multi-buffer");
531
532   vec_validate (imbm->per_thread_data, tm->n_vlib_mains - 1);
533
534   if (clib_cpu_supports_avx512f ())
535     {
536       vec_foreach (ptd, imbm->per_thread_data)
537       {
538         ptd->mgr = alloc_mb_mgr (0);
539         init_mb_mgr_avx512 (ptd->mgr);
540         INIT_IPSEC_MB_GCM_PRE (avx_gen4);
541       }
542     }
543   else if (clib_cpu_supports_avx2 ())
544     {
545       vec_foreach (ptd, imbm->per_thread_data)
546       {
547         ptd->mgr = alloc_mb_mgr (0);
548         init_mb_mgr_avx2 (ptd->mgr);
549         INIT_IPSEC_MB_GCM_PRE (avx_gen2);
550       }
551     }
552   else
553     {
554       vec_foreach (ptd, imbm->per_thread_data)
555       {
556         ptd->mgr = alloc_mb_mgr (0);
557         init_mb_mgr_sse (ptd->mgr);
558         INIT_IPSEC_MB_GCM_PRE (sse);
559       }
560     }
561
562   if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
563     return (error);
564
565
566 #define _(a, b, c)                                                       \
567   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
568                                     ipsecmb_ops_hmac_##a);               \
569
570   foreach_ipsecmb_hmac_op;
571 #undef _
572 #define _(a, b, c, d)                                                   \
573   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
574                                     ipsecmb_ops_cbc_cipher_enc_##a);    \
575
576   foreach_ipsecmb_cbc_cipher_op;
577 #undef _
578 #define _(a, b, c, d)                                                   \
579   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
580                                     ipsecmb_ops_cbc_cipher_dec_##a);    \
581
582   foreach_ipsecmb_cbc_cipher_op;
583 #undef _
584 #define _(a, b, c, d)                                                   \
585   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
586                                     ipsecmb_ops_gcm_cipher_enc_##a);    \
587
588   foreach_ipsecmb_gcm_cipher_op;
589 #undef _
590 #define _(a, b, c, d)                                                   \
591   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
592                                     ipsecmb_ops_gcm_cipher_dec_##a);    \
593
594   foreach_ipsecmb_gcm_cipher_op;
595 #undef _
596
597   return (NULL);
598 }
599
600 VLIB_INIT_FUNCTION (crypto_ipsecmb_init);
601
602 /* *INDENT-OFF* */
603 VLIB_PLUGIN_REGISTER () =
604 {
605   .version = VPP_BUILD_VER,
606   .description = "Intel IPSEC multi-buffer",
607 };
608 /* *INDENT-ON* */
609
610 /*
611  * fd.io coding-style-patch-verification: ON
612  *
613  * Local Variables:
614  * eval: (c-set-style "gnu")
615  * End:
616  */