crypto: Intel IPSEC-MB engine
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
1 /*
2  * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3  *
4  * Copyright (c) 2019 Cisco Systemss
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <intel-ipsec-mb.h>
19
20 #include <vnet/vnet.h>
21 #include <vnet/plugin/plugin.h>
22 #include <vpp/app/version.h>
23 #include <vnet/crypto/crypto.h>
24 #include <vppinfra/cpu.h>
25
26 typedef struct
27 {
28   MB_MGR *mgr;
29 } ipsecmb_per_thread_data_t;
30
31 typedef struct ipsecmb_main_t_
32 {
33   ipsecmb_per_thread_data_t *per_thread_data;
34 } ipsecmb_main_t;
35
36 static ipsecmb_main_t ipsecmb_main;
37
38 #define foreach_ipsecmb_hmac_op                                \
39   _(SHA1, SHA1, sha1)                                          \
40   _(SHA256, SHA_256, sha256)                                   \
41   _(SHA384, SHA_384, sha384)                                   \
42   _(SHA512, SHA_512, sha512)
43
44 #define foreach_ipsecmb_cipher_op                              \
45   _(AES_128_CBC, 128)                                          \
46   _(AES_192_CBC, 192)                                          \
47   _(AES_256_CBC, 256)
48
49 always_inline void
50 hash_expand_keys (const MB_MGR * mgr,
51                   const u8 * key,
52                   u32 length,
53                   u8 block_size,
54                   u8 ipad[256], u8 opad[256], hash_one_block_t fn)
55 {
56   u8 buf[block_size];
57   int i = 0;
58
59   if (length > block_size)
60     {
61       return;
62     }
63
64   memset (buf, 0x36, sizeof (buf));
65   for (i = 0; i < length; i++)
66     {
67       buf[i] ^= key[i];
68     }
69   fn (buf, ipad);
70
71   memset (buf, 0x5c, sizeof (buf));
72
73   for (i = 0; i < length; i++)
74     {
75       buf[i] ^= key[i];
76     }
77   fn (buf, opad);
78 }
79
80 always_inline void
81 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail)
82 {
83   vnet_crypto_op_t *op = job->user_data;
84
85   if (STS_COMPLETED != job->status)
86     {
87       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
88       *n_fail = *n_fail + 1;
89     }
90   else
91     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
92
93   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
94     {
95       if ((memcmp (op->digest, job->auth_tag_output, op->digest_len)))
96         {
97           *n_fail = *n_fail + 1;
98           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
99         }
100     }
101   else
102     clib_memcpy_fast (op->digest, job->auth_tag_output, op->digest_len);
103 }
104
105 static_always_inline u32
106 ipsecmb_ops_hmac_inline (vlib_main_t * vm,
107                          const ipsecmb_per_thread_data_t * ptd,
108                          vnet_crypto_op_t * ops[],
109                          u32 n_ops,
110                          u32 block_size,
111                          hash_one_block_t fn, JOB_HASH_ALG alg)
112 {
113   JOB_AES_HMAC *job;
114   u32 i, n_fail = 0;
115   u8 scratch[n_ops][64];
116
117   /*
118    * queue all the jobs first ...
119    */
120   for (i = 0; i < n_ops; i++)
121     {
122       vnet_crypto_op_t *op = ops[i];
123       u8 ipad[256], opad[256];
124
125       hash_expand_keys (ptd->mgr, op->key, op->key_len,
126                         block_size, ipad, opad, fn);
127
128       job = IMB_GET_NEXT_JOB (ptd->mgr);
129
130       job->src = op->src;
131       job->hash_start_src_offset_in_bytes = 0;
132       job->msg_len_to_hash_in_bytes = op->len;
133       job->hash_alg = alg;
134       job->auth_tag_output_len_in_bytes = op->digest_len;
135       job->auth_tag_output = scratch[i];
136
137       job->cipher_mode = NULL_CIPHER;
138       job->cipher_direction = DECRYPT;
139       job->chain_order = HASH_CIPHER;
140
141       job->aes_key_len_in_bytes = op->key_len;
142
143       job->u.HMAC._hashed_auth_key_xor_ipad = ipad;
144       job->u.HMAC._hashed_auth_key_xor_opad = opad;
145       job->user_data = op;
146
147       job = IMB_SUBMIT_JOB (ptd->mgr);
148
149       if (job)
150         ipsecmb_retire_hmac_job (job, &n_fail);
151     }
152
153   /*
154    * .. then flush (i.e. complete) them
155    *  We will have queued enough to satisfy the 'multi' buffer
156    */
157   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
158     {
159       ipsecmb_retire_hmac_job (job, &n_fail);
160     }
161
162   return n_ops - n_fail;
163 }
164
165 #define _(a, b, c)                                                      \
166 static_always_inline u32                                                \
167 ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
168                       vnet_crypto_op_t * ops[],                         \
169                       u32 n_ops)                                        \
170 {                                                                       \
171   ipsecmb_per_thread_data_t *ptd;                                       \
172   ipsecmb_main_t *imbm;                                                 \
173                                                                         \
174   imbm = &ipsecmb_main;                                                 \
175   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
176                                                                         \
177   return ipsecmb_ops_hmac_inline (vm, ptd, ops, n_ops,                  \
178                                   b##_BLOCK_SIZE,                       \
179                                   ptd->mgr->c##_one_block,              \
180                                   b);                                   \
181   }
182 foreach_ipsecmb_hmac_op;
183 #undef _
184
185 #define EXPANDED_KEY_N_BYTES (16 * 15)
186
187 always_inline void
188 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
189 {
190   vnet_crypto_op_t *op = job->user_data;
191
192   if (STS_COMPLETED != job->status)
193     {
194       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
195       *n_fail = *n_fail + 1;
196     }
197   else
198     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
199 }
200
201 static_always_inline u32
202 ipsecmb_ops_cipher_inline (vlib_main_t * vm,
203                            const ipsecmb_per_thread_data_t * ptd,
204                            vnet_crypto_op_t * ops[],
205                            u32 n_ops,
206                            keyexp_t fn, JOB_CIPHER_DIRECTION direction)
207 {
208   JOB_AES_HMAC *job;
209   u32 i, n_fail = 0;
210
211   /*
212    * queue all the jobs first ...
213    */
214   for (i = 0; i < n_ops; i++)
215     {
216       u8 aes_enc_key_expanded[EXPANDED_KEY_N_BYTES];
217       u8 aes_dec_key_expanded[EXPANDED_KEY_N_BYTES];
218       vnet_crypto_op_t *op = ops[i];
219
220       fn (op->key, aes_enc_key_expanded, aes_dec_key_expanded);
221
222       job = IMB_GET_NEXT_JOB (ptd->mgr);
223
224       job->src = op->src;
225       job->dst = op->dst;
226       job->msg_len_to_cipher_in_bytes = op->len;
227       job->cipher_start_src_offset_in_bytes = 0;
228
229       job->hash_alg = NULL_HASH;
230       job->cipher_mode = CBC;
231       job->cipher_direction = direction;
232       job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
233
234       job->aes_key_len_in_bytes = op->key_len;
235       job->aes_enc_key_expanded = aes_enc_key_expanded;
236       job->aes_dec_key_expanded = aes_dec_key_expanded;
237       job->iv = op->iv;
238       job->iv_len_in_bytes = op->iv_len;
239
240       job->user_data = op;
241
242       job = IMB_SUBMIT_JOB (ptd->mgr);
243
244       if (job)
245         ipsecmb_retire_cipher_job (job, &n_fail);
246     }
247
248   /*
249    * .. then flush (i.e. complete) them
250    *  We will have queued enough to satisfy the 'multi' buffer
251    */
252   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
253     {
254       ipsecmb_retire_cipher_job (job, &n_fail);
255     }
256
257   return n_ops - n_fail;
258 }
259
260 #define _(a, b)                                                         \
261 static_always_inline u32                                                \
262 ipsecmb_ops_cipher_enc_##a (vlib_main_t * vm,                           \
263                             vnet_crypto_op_t * ops[],                   \
264                             u32 n_ops)                                  \
265 {                                                                       \
266   ipsecmb_per_thread_data_t *ptd;                                       \
267   ipsecmb_main_t *imbm;                                                 \
268                                                                         \
269   imbm = &ipsecmb_main;                                                 \
270   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
271                                                                         \
272   return ipsecmb_ops_cipher_inline (vm, ptd, ops, n_ops,                \
273                                     ptd->mgr->keyexp_##b,               \
274                                     ENCRYPT);                           \
275   }
276 foreach_ipsecmb_cipher_op;
277 #undef _
278
279 #define _(a, b)                                                         \
280 static_always_inline u32                                                \
281 ipsecmb_ops_cipher_dec_##a (vlib_main_t * vm,                           \
282                             vnet_crypto_op_t * ops[],                   \
283                             u32 n_ops)                                  \
284 {                                                                       \
285   ipsecmb_per_thread_data_t *ptd;                                       \
286   ipsecmb_main_t *imbm;                                                 \
287                                                                         \
288   imbm = &ipsecmb_main;                                                 \
289   ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index);     \
290                                                                         \
291   return ipsecmb_ops_cipher_inline (vm, ptd, ops, n_ops,                \
292                                     ptd->mgr->keyexp_##b,               \
293                                     DECRYPT);                           \
294   }
295 foreach_ipsecmb_cipher_op;
296 #undef _
297
298 static clib_error_t *
299 crypto_ipsecmb_init (vlib_main_t * vm)
300 {
301   ipsecmb_main_t *imbm = &ipsecmb_main;
302   ipsecmb_per_thread_data_t *ptd;
303   vlib_thread_main_t *tm = vlib_get_thread_main ();
304   clib_error_t *error;
305   u32 eidx;
306
307   if ((error = vlib_call_init_function (vm, vnet_crypto_init)))
308     return error;
309
310   /*
311    * A priority that is better than OpenSSL but worse than VPP natvie
312    */
313   eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80,
314                                       "Intel IPSEC multi-buffer");
315
316   vec_validate (imbm->per_thread_data, tm->n_vlib_mains - 1);
317
318   if (clib_cpu_supports_avx512f ())
319     {
320       vec_foreach (ptd, imbm->per_thread_data)
321       {
322         ptd->mgr = alloc_mb_mgr (0);
323         init_mb_mgr_avx512 (ptd->mgr);
324       }
325     }
326   else if (clib_cpu_supports_avx2 ())
327     {
328       vec_foreach (ptd, imbm->per_thread_data)
329       {
330         ptd->mgr = alloc_mb_mgr (0);
331         init_mb_mgr_avx2 (ptd->mgr);
332       }
333     }
334   else
335     {
336       vec_foreach (ptd, imbm->per_thread_data)
337       {
338         ptd->mgr = alloc_mb_mgr (0);
339         init_mb_mgr_sse (ptd->mgr);
340       }
341     }
342
343 #define _(a, b, c)                                                       \
344   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
345                                     ipsecmb_ops_hmac_##a);               \
346
347   foreach_ipsecmb_hmac_op;
348 #undef _
349 #define _(a, b)                                                         \
350   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
351                                     ipsecmb_ops_cipher_enc_##a);        \
352
353   foreach_ipsecmb_cipher_op;
354 #undef _
355 #define _(a, b)                                                         \
356   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
357                                     ipsecmb_ops_cipher_dec_##a);        \
358
359   foreach_ipsecmb_cipher_op;
360 #undef _
361
362   return 0;
363 }
364
365 VLIB_INIT_FUNCTION (crypto_ipsecmb_init);
366
367 /* *INDENT-OFF* */
368 VLIB_PLUGIN_REGISTER () =
369 {
370   .version = VPP_BUILD_VER,
371   .description = "Intel IPSEC multi-buffer",
372 };
373 /* *INDENT-ON* */
374
375 /*
376  * fd.io coding-style-patch-verification: ON
377  *
378  * Local Variables:
379  * eval: (c-set-style "gnu")
380  * End:
381  */