crypto: improve key handling
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
1 /*
2  * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3  *
4  * Copyright (c) 2019 Cisco Systemss
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <fcntl.h>
19
20 #include <intel-ipsec-mb.h>
21
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
27
28 typedef struct
29 {
30   MB_MGR *mgr;
31   __m128i cbc_iv;
32 } ipsecmb_per_thread_data_t;
33
34 typedef struct ipsecmb_main_t_
35 {
36   ipsecmb_per_thread_data_t *per_thread_data;
37 } ipsecmb_main_t;
38
39 /**
40  * AES GCM key=expansion VFT
41  */
42 typedef void (*ase_gcm_pre_t) (const void *key,
43                                struct gcm_key_data * key_data);
44
45 typedef struct ipsecmb_gcm_pre_vft_t_
46 {
47   ase_gcm_pre_t ase_gcm_pre_128;
48   ase_gcm_pre_t ase_gcm_pre_192;
49   ase_gcm_pre_t ase_gcm_pre_256;
50 } ipsecmb_gcm_pre_vft_t;
51
52 static ipsecmb_gcm_pre_vft_t ipsecmb_gcm_pre_vft;
53
54 #define INIT_IPSEC_MB_GCM_PRE(_arch)                                    \
55   ipsecmb_gcm_pre_vft.ase_gcm_pre_128 = aes_gcm_pre_128_##_arch;        \
56   ipsecmb_gcm_pre_vft.ase_gcm_pre_192 = aes_gcm_pre_192_##_arch;        \
57   ipsecmb_gcm_pre_vft.ase_gcm_pre_256 = aes_gcm_pre_256_##_arch;
58
59 static ipsecmb_main_t ipsecmb_main;
60
61 #define foreach_ipsecmb_hmac_op                                \
62   _(SHA1, SHA1, sha1)                                          \
63   _(SHA256, SHA_256, sha256)                                   \
64   _(SHA384, SHA_384, sha384)                                   \
65   _(SHA512, SHA_512, sha512)
66
67 /*
68  * (Alg, key-len-bits, key-len-bytes, iv-len-bytes)
69  */
70 #define foreach_ipsecmb_cbc_cipher_op                          \
71   _(AES_128_CBC, 128, 16, 16)                                  \
72   _(AES_192_CBC, 192, 24, 16)                                  \
73   _(AES_256_CBC, 256, 32, 16)
74
75 /*
76  * (Alg, key-len-bits, key-len-bytes, iv-len-bytes)
77  */
78 #define foreach_ipsecmb_gcm_cipher_op                          \
79   _(AES_128_GCM, 128, 16, 12)                                  \
80   _(AES_192_GCM, 192, 24, 12)                                  \
81   _(AES_256_GCM, 256, 32, 12)
82
83 always_inline void
84 hash_expand_keys (const MB_MGR * mgr,
85                   const u8 * key,
86                   u32 length,
87                   u8 block_size,
88                   u8 ipad[256], u8 opad[256], hash_one_block_t fn)
89 {
90   u8 buf[block_size];
91   int i = 0;
92
93   if (length > block_size)
94     {
95       return;
96     }
97
98   memset (buf, 0x36, sizeof (buf));
99   for (i = 0; i < length; i++)
100     {
101       buf[i] ^= key[i];
102     }
103   fn (buf, ipad);
104
105   memset (buf, 0x5c, sizeof (buf));
106
107   for (i = 0; i < length; i++)
108     {
109       buf[i] ^= key[i];
110     }
111   fn (buf, opad);
112 }
113
114 always_inline void
115 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail)
116 {
117   vnet_crypto_op_t *op = job->user_data;
118
119   if (STS_COMPLETED != job->status)
120     {
121       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
122       *n_fail = *n_fail + 1;
123     }
124   else
125     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
126
127   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
128     {
129       if ((memcmp (op->digest, job->auth_tag_output, op->digest_len)))
130         {
131           *n_fail = *n_fail + 1;
132           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
133         }
134     }
135   else
136     clib_memcpy_fast (op->digest, job->auth_tag_output, op->digest_len);
137 }
138
139 static_always_inline u32
140 ipsecmb_ops_hmac_inline (vlib_main_t * vm,
141                          const ipsecmb_per_thread_data_t * ptd,
142                          vnet_crypto_op_t * ops[],
143                          u32 n_ops,
144                          u32 block_size,
145                          hash_one_block_t fn, JOB_HASH_ALG alg)
146 {
147   JOB_AES_HMAC *job;
148   u32 i, n_fail = 0;
149   u8 scratch[n_ops][64];
150
151   /*
152    * queue all the jobs first ...
153    */
154   for (i = 0; i < n_ops; i++)
155     {
156       vnet_crypto_op_t *op = ops[i];
157       vnet_crypto_key_t *key = vnet_crypto_get_key (op->key_index);
158       u8 ipad[256], opad[256];
159
160       hash_expand_keys (ptd->mgr, key->data, vec_len (key->data),
161                         block_size, ipad, opad, fn);
162
163       job = IMB_GET_NEXT_JOB (ptd->mgr);
164
165       job->src = op->src;
166       job->hash_start_src_offset_in_bytes = 0;
167       job->msg_len_to_hash_in_bytes = op->len;
168       job->hash_alg = alg;
169       job->auth_tag_output_len_in_bytes = op->digest_len;
170       job->auth_tag_output = scratch[i];
171
172       job->cipher_mode = NULL_CIPHER;
173       job->cipher_direction = DECRYPT;
174       job->chain_order = HASH_CIPHER;
175
176       job->aes_key_len_in_bytes = vec_len (key->data);
177
178       job->u.HMAC._hashed_auth_key_xor_ipad = ipad;
179       job->u.HMAC._hashed_auth_key_xor_opad = opad;
180       job->user_data = op;
181
182       job = IMB_SUBMIT_JOB (ptd->mgr);
183
184       if (job)
185         ipsecmb_retire_hmac_job (job, &n_fail);
186     }
187
188   /*
189    * .. then flush (i.e. complete) them
190    *  We will have queued enough to satisfy the 'multi' buffer
191    */
192   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
193     {
194       ipsecmb_retire_hmac_job (job, &n_fail);
195     }
196
197   return n_ops - n_fail;
198 }
199
200 #define _(a, b, c)                                                      \
201 static_always_inline u32                                                \
202 ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
203                       vnet_crypto_op_t * ops[],                         \
204                       u32 n_ops)                                        \
205 {                                                                       \
206   ipsecmb_per_thread_data_t *ptd;                                       \
207   ipsecmb_main_t *imbm;                                                 \
208                                                                         \
209   imbm = &ipsecmb_main;                                                 \
210   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
211                                                                         \
212   return ipsecmb_ops_hmac_inline (vm, ptd, ops, n_ops,                  \
213                                   b##_BLOCK_SIZE,                       \
214                                   ptd->mgr->c##_one_block,              \
215                                   b);                                   \
216   }
217 foreach_ipsecmb_hmac_op;
218 #undef _
219
220 #define EXPANDED_KEY_N_BYTES (16 * 15)
221
222 always_inline void
223 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
224 {
225   vnet_crypto_op_t *op = job->user_data;
226
227   if (STS_COMPLETED != job->status)
228     {
229       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
230       *n_fail = *n_fail + 1;
231     }
232   else
233     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
234 }
235
236 static_always_inline u32
237 ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm,
238                                ipsecmb_per_thread_data_t * ptd,
239                                vnet_crypto_op_t * ops[],
240                                u32 n_ops, u32 key_len, u32 iv_len,
241                                keyexp_t fn, JOB_CIPHER_DIRECTION direction)
242 {
243   JOB_AES_HMAC *job;
244   u32 i, n_fail = 0;
245
246   /*
247    * queue all the jobs first ...
248    */
249   for (i = 0; i < n_ops; i++)
250     {
251       u8 aes_enc_key_expanded[EXPANDED_KEY_N_BYTES];
252       u8 aes_dec_key_expanded[EXPANDED_KEY_N_BYTES];
253       vnet_crypto_op_t *op = ops[i];
254       vnet_crypto_key_t *key = vnet_crypto_get_key (op->key_index);
255       __m128i iv;
256
257       fn (key->data, aes_enc_key_expanded, aes_dec_key_expanded);
258
259       job = IMB_GET_NEXT_JOB (ptd->mgr);
260
261       job->src = op->src;
262       job->dst = op->dst;
263       job->msg_len_to_cipher_in_bytes = op->len;
264       job->cipher_start_src_offset_in_bytes = 0;
265
266       job->hash_alg = NULL_HASH;
267       job->cipher_mode = CBC;
268       job->cipher_direction = direction;
269       job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
270
271       if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
272         {
273           iv = ptd->cbc_iv;
274           _mm_storeu_si128 ((__m128i *) op->iv, iv);
275           ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
276         }
277
278       job->aes_key_len_in_bytes = key_len;
279       job->aes_enc_key_expanded = aes_enc_key_expanded;
280       job->aes_dec_key_expanded = aes_dec_key_expanded;
281       job->iv = op->iv;
282       job->iv_len_in_bytes = iv_len;
283
284       job->user_data = op;
285
286       job = IMB_SUBMIT_JOB (ptd->mgr);
287
288       if (job)
289         ipsecmb_retire_cipher_job (job, &n_fail);
290     }
291
292   /*
293    * .. then flush (i.e. complete) them
294    *  We will have queued enough to satisfy the 'multi' buffer
295    */
296   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
297     {
298       ipsecmb_retire_cipher_job (job, &n_fail);
299     }
300
301   return n_ops - n_fail;
302 }
303
304 #define _(a, b, c, d)                                                   \
305 static_always_inline u32                                                \
306 ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm,                       \
307                                 vnet_crypto_op_t * ops[],               \
308                                 u32 n_ops)                              \
309 {                                                                       \
310   ipsecmb_per_thread_data_t *ptd;                                       \
311   ipsecmb_main_t *imbm;                                                 \
312                                                                         \
313   imbm = &ipsecmb_main;                                                 \
314   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
315                                                                         \
316   return ipsecmb_ops_cbc_cipher_inline (vm, ptd, ops, n_ops, c, d,      \
317                                         ptd->mgr->keyexp_##b,           \
318                                         ENCRYPT);                       \
319   }
320 foreach_ipsecmb_cbc_cipher_op;
321 #undef _
322
323 #define _(a, b, c, d)                                                   \
324 static_always_inline u32                                                \
325 ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm,                       \
326                                 vnet_crypto_op_t * ops[],               \
327                                 u32 n_ops)                              \
328 {                                                                       \
329   ipsecmb_per_thread_data_t *ptd;                                       \
330   ipsecmb_main_t *imbm;                                                 \
331                                                                         \
332   imbm = &ipsecmb_main;                                                 \
333   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
334                                                                         \
335   return ipsecmb_ops_cbc_cipher_inline (vm, ptd, ops, n_ops, c, d,      \
336                                         ptd->mgr->keyexp_##b,           \
337                                         DECRYPT);                       \
338   }
339 foreach_ipsecmb_cbc_cipher_op;
340 #undef _
341
342 always_inline void
343 ipsecmb_retire_gcm_cipher_job (JOB_AES_HMAC * job,
344                                u32 * n_fail, JOB_CIPHER_DIRECTION direction)
345 {
346   vnet_crypto_op_t *op = job->user_data;
347
348   if (STS_COMPLETED != job->status)
349     {
350       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
351       *n_fail = *n_fail + 1;
352     }
353   else
354     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
355
356   if (DECRYPT == direction)
357     {
358       if ((memcmp (op->tag, job->auth_tag_output, op->tag_len)))
359         {
360           *n_fail = *n_fail + 1;
361           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
362         }
363     }
364 }
365
366 static_always_inline u32
367 ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm,
368                                ipsecmb_per_thread_data_t * ptd,
369                                vnet_crypto_op_t * ops[],
370                                u32 n_ops, u32 key_len, u32 iv_len,
371                                ase_gcm_pre_t fn,
372                                JOB_CIPHER_DIRECTION direction)
373 {
374   JOB_AES_HMAC *job;
375   u32 i, n_fail = 0;
376   u8 scratch[n_ops][64];
377
378   /*
379    * queue all the jobs first ...
380    */
381   for (i = 0; i < n_ops; i++)
382     {
383       struct gcm_key_data key_data;
384       vnet_crypto_op_t *op = ops[i];
385       vnet_crypto_key_t *key = vnet_crypto_get_key (op->key_index);
386       u32 nonce[3];
387       __m128i iv;
388
389       fn (key->data, &key_data);
390
391       job = IMB_GET_NEXT_JOB (ptd->mgr);
392
393       job->src = op->src;
394       job->dst = op->dst;
395       job->msg_len_to_cipher_in_bytes = op->len;
396       job->cipher_start_src_offset_in_bytes = 0;
397
398       job->hash_alg = AES_GMAC;
399       job->cipher_mode = GCM;
400       job->cipher_direction = direction;
401       job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
402
403       if (direction == ENCRYPT)
404         {
405           if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
406             {
407               iv = ptd->cbc_iv;
408               // only use 8 bytes of the IV
409               clib_memcpy_fast (op->iv, &iv, 8);
410               ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
411             }
412           nonce[0] = op->salt;
413           clib_memcpy_fast (nonce + 1, op->iv, 8);
414           job->iv = (u8 *) nonce;
415         }
416       else
417         {
418           nonce[0] = op->salt;
419           clib_memcpy_fast (nonce + 1, op->iv, 8);
420           job->iv = op->iv;
421         }
422
423       job->aes_key_len_in_bytes = key_len;
424       job->aes_enc_key_expanded = &key_data;
425       job->aes_dec_key_expanded = &key_data;
426       job->iv_len_in_bytes = iv_len;
427
428       job->u.GCM.aad = op->aad;
429       job->u.GCM.aad_len_in_bytes = op->aad_len;
430       job->auth_tag_output_len_in_bytes = op->tag_len;
431       if (DECRYPT == direction)
432         job->auth_tag_output = scratch[i];
433       else
434         job->auth_tag_output = op->tag;
435       job->user_data = op;
436
437       job = IMB_SUBMIT_JOB (ptd->mgr);
438
439       if (job)
440         ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
441     }
442
443   /*
444    * .. then flush (i.e. complete) them
445    *  We will have queued enough to satisfy the 'multi' buffer
446    */
447   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
448     {
449       ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
450     }
451
452   return n_ops - n_fail;
453 }
454
455 #define _(a, b, c, d)                                                        \
456 static_always_inline u32                                                     \
457 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm,                            \
458                                 vnet_crypto_op_t * ops[],                    \
459                                 u32 n_ops)                                   \
460 {                                                                            \
461   ipsecmb_per_thread_data_t *ptd;                                            \
462   ipsecmb_main_t *imbm;                                                      \
463                                                                              \
464   imbm = &ipsecmb_main;                                                      \
465   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);          \
466                                                                              \
467   return ipsecmb_ops_gcm_cipher_inline (vm, ptd, ops, n_ops, c, d,           \
468                                         ipsecmb_gcm_pre_vft.ase_gcm_pre_##b, \
469                                         ENCRYPT);                            \
470   }
471 foreach_ipsecmb_gcm_cipher_op;
472 #undef _
473
474 #define _(a, b, c, d)                                                        \
475 static_always_inline u32                                                     \
476 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm,                            \
477                                 vnet_crypto_op_t * ops[],                    \
478                                 u32 n_ops)                                   \
479 {                                                                            \
480   ipsecmb_per_thread_data_t *ptd;                                            \
481   ipsecmb_main_t *imbm;                                                      \
482                                                                              \
483   imbm = &ipsecmb_main;                                                      \
484   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);          \
485                                                                              \
486   return ipsecmb_ops_gcm_cipher_inline (vm, ptd, ops, n_ops, c, d,           \
487                                         ipsecmb_gcm_pre_vft.ase_gcm_pre_##b, \
488                                         DECRYPT);                            \
489   }
490 foreach_ipsecmb_gcm_cipher_op;
491 #undef _
492
493 clib_error_t *
494 crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
495 {
496   ipsecmb_per_thread_data_t *ptd;
497   clib_error_t *err = 0;
498   int fd;
499
500   if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
501     return clib_error_return_unix (0, "failed to open '/dev/urandom'");
502
503   vec_foreach (ptd, imbm->per_thread_data)
504   {
505     if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
506       {
507         err = clib_error_return_unix (0, "'/dev/urandom' read failure");
508         close (fd);
509         return (err);
510       }
511   }
512
513   close (fd);
514   return (NULL);
515 }
516
517 static clib_error_t *
518 crypto_ipsecmb_init (vlib_main_t * vm)
519 {
520   ipsecmb_main_t *imbm = &ipsecmb_main;
521   ipsecmb_per_thread_data_t *ptd;
522   vlib_thread_main_t *tm = vlib_get_thread_main ();
523   clib_error_t *error;
524   u32 eidx;
525   u8 *name;
526
527   if ((error = vlib_call_init_function (vm, vnet_crypto_init)))
528     return error;
529
530   /*
531    * A priority that is better than OpenSSL but worse than VPP natvie
532    */
533   name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
534                  IMB_VERSION_STR, 0);
535   eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
536
537   vec_validate (imbm->per_thread_data, tm->n_vlib_mains - 1);
538
539   if (clib_cpu_supports_avx512f ())
540     {
541       vec_foreach (ptd, imbm->per_thread_data)
542       {
543         ptd->mgr = alloc_mb_mgr (0);
544         init_mb_mgr_avx512 (ptd->mgr);
545         INIT_IPSEC_MB_GCM_PRE (avx_gen4);
546       }
547     }
548   else if (clib_cpu_supports_avx2 ())
549     {
550       vec_foreach (ptd, imbm->per_thread_data)
551       {
552         ptd->mgr = alloc_mb_mgr (0);
553         init_mb_mgr_avx2 (ptd->mgr);
554         INIT_IPSEC_MB_GCM_PRE (avx_gen2);
555       }
556     }
557   else
558     {
559       vec_foreach (ptd, imbm->per_thread_data)
560       {
561         ptd->mgr = alloc_mb_mgr (0);
562         init_mb_mgr_sse (ptd->mgr);
563         INIT_IPSEC_MB_GCM_PRE (sse);
564       }
565     }
566
567   if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
568     return (error);
569
570
571 #define _(a, b, c)                                                       \
572   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
573                                     ipsecmb_ops_hmac_##a);               \
574
575   foreach_ipsecmb_hmac_op;
576 #undef _
577 #define _(a, b, c, d)                                                   \
578   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
579                                     ipsecmb_ops_cbc_cipher_enc_##a);    \
580
581   foreach_ipsecmb_cbc_cipher_op;
582 #undef _
583 #define _(a, b, c, d)                                                   \
584   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
585                                     ipsecmb_ops_cbc_cipher_dec_##a);    \
586
587   foreach_ipsecmb_cbc_cipher_op;
588 #undef _
589 #define _(a, b, c, d)                                                   \
590   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
591                                     ipsecmb_ops_gcm_cipher_enc_##a);    \
592
593   foreach_ipsecmb_gcm_cipher_op;
594 #undef _
595 #define _(a, b, c, d)                                                   \
596   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
597                                     ipsecmb_ops_gcm_cipher_dec_##a);    \
598
599   foreach_ipsecmb_gcm_cipher_op;
600 #undef _
601
602   return (NULL);
603 }
604
605 VLIB_INIT_FUNCTION (crypto_ipsecmb_init);
606
607 /* *INDENT-OFF* */
608 VLIB_PLUGIN_REGISTER () =
609 {
610   .version = VPP_BUILD_VER,
611   .description = "Intel IPSEC multi-buffer",
612 };
613 /* *INDENT-ON* */
614
615 /*
616  * fd.io coding-style-patch-verification: ON
617  *
618  * Local Variables:
619  * eval: (c-set-style "gnu")
620  * End:
621  */