crypto: align per thread data to cache line
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
1 /*
2  * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3  *
4  * Copyright (c) 2019 Cisco Systemss
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <fcntl.h>
19
20 #include <intel-ipsec-mb.h>
21
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
27
28 #define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE
29 #define EXPANDED_KEY_N_BYTES (16 * 15)
30
31 typedef struct
32 {
33   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
34   MB_MGR *mgr;
35   __m128i cbc_iv;
36 } ipsecmb_per_thread_data_t;
37
38 typedef struct
39 {
40   u16 data_size;
41   u8 block_size;
42   aes_gcm_pre_t aes_gcm_pre;
43   keyexp_t keyexp;
44   hash_one_block_t hash_one_block;
45   hash_fn_t hash_fn;
46 } ipsecmb_alg_data_t;
47
48 typedef struct ipsecmb_main_t_
49 {
50   ipsecmb_per_thread_data_t *per_thread_data;
51   ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
52   void **key_data;
53 } ipsecmb_main_t;
54
55 typedef struct
56 {
57   u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
58   u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
59 } ipsecmb_aes_cbc_key_data_t;
60
61 static ipsecmb_main_t ipsecmb_main = { };
62
63 /*
64  * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
65  */
66 #define foreach_ipsecmb_hmac_op                                \
67   _(SHA1,   SHA1,    sha1,   64,  20, 20)                      \
68   _(SHA224, SHA_224, sha224, 64,  32, 28)                      \
69   _(SHA256, SHA_256, sha256, 64,  32, 32)                      \
70   _(SHA384, SHA_384, sha384, 128, 64, 48)                      \
71   _(SHA512, SHA_512, sha512, 128, 64, 64)
72
73 /*
74  * (Alg, key-len-bits)
75  */
76 #define foreach_ipsecmb_cbc_cipher_op                          \
77   _(AES_128_CBC, 128)                                          \
78   _(AES_192_CBC, 192)                                          \
79   _(AES_256_CBC, 256)
80
81 /*
82  * (Alg, key-len-bytes, iv-len-bytes)
83  */
84 #define foreach_ipsecmb_gcm_cipher_op                          \
85   _(AES_128_GCM, 128)                                          \
86   _(AES_192_GCM, 192)                                          \
87   _(AES_256_GCM, 256)
88
89 always_inline void
90 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
91 {
92   vnet_crypto_op_t *op = job->user_data;
93   u32 len = op->digest_len ? op->digest_len : digest_size;
94
95   if (STS_COMPLETED != job->status)
96     {
97       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
98       *n_fail = *n_fail + 1;
99       return;
100     }
101
102   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
103     {
104       if ((memcmp (op->digest, job->auth_tag_output, len)))
105         {
106           *n_fail = *n_fail + 1;
107           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
108           return;
109         }
110     }
111   else if (len == digest_size)
112     clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
113   else
114     clib_memcpy_fast (op->digest, job->auth_tag_output, len);
115
116   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
117 }
118
119 static_always_inline u32
120 ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
121                          u32 n_ops, u32 block_size, u32 hash_size,
122                          u32 digest_size, JOB_HASH_ALG alg)
123 {
124   ipsecmb_main_t *imbm = &ipsecmb_main;
125   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
126                                                      vm->thread_index);
127   JOB_AES_HMAC *job;
128   u32 i, n_fail = 0;
129   u8 scratch[n_ops][digest_size];
130
131   /*
132    * queue all the jobs first ...
133    */
134   for (i = 0; i < n_ops; i++)
135     {
136       vnet_crypto_op_t *op = ops[i];
137       u8 *kd = (u8 *) imbm->key_data[op->key_index];
138
139       job = IMB_GET_NEXT_JOB (ptd->mgr);
140
141       job->src = op->src;
142       job->hash_start_src_offset_in_bytes = 0;
143       job->msg_len_to_hash_in_bytes = op->len;
144       job->hash_alg = alg;
145       job->auth_tag_output_len_in_bytes = digest_size;
146       job->auth_tag_output = scratch[i];
147
148       job->cipher_mode = NULL_CIPHER;
149       job->cipher_direction = DECRYPT;
150       job->chain_order = HASH_CIPHER;
151
152       job->u.HMAC._hashed_auth_key_xor_ipad = kd;
153       job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
154       job->user_data = op;
155
156       job = IMB_SUBMIT_JOB (ptd->mgr);
157
158       if (job)
159         ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
160     }
161
162   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
163     ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
164
165   return n_ops - n_fail;
166 }
167
168 #define _(a, b, c, d, e, f)                                             \
169 static_always_inline u32                                                \
170 ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
171                       vnet_crypto_op_t * ops[],                         \
172                       u32 n_ops)                                        \
173 { return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); }        \
174
175 foreach_ipsecmb_hmac_op;
176 #undef _
177
178 always_inline void
179 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
180 {
181   vnet_crypto_op_t *op = job->user_data;
182
183   if (STS_COMPLETED != job->status)
184     {
185       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
186       *n_fail = *n_fail + 1;
187     }
188   else
189     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
190 }
191
192 static_always_inline u32
193 ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
194                                u32 n_ops, u32 key_len,
195                                JOB_CIPHER_DIRECTION direction)
196 {
197   ipsecmb_main_t *imbm = &ipsecmb_main;
198   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
199                                                      vm->thread_index);
200   JOB_AES_HMAC *job;
201   u32 i, n_fail = 0;
202
203   for (i = 0; i < n_ops; i++)
204     {
205       ipsecmb_aes_cbc_key_data_t *kd;
206       vnet_crypto_op_t *op = ops[i];
207       kd = (ipsecmb_aes_cbc_key_data_t *) imbm->key_data[op->key_index];
208       __m128i iv;
209
210       job = IMB_GET_NEXT_JOB (ptd->mgr);
211
212       job->src = op->src;
213       job->dst = op->dst;
214       job->msg_len_to_cipher_in_bytes = op->len;
215       job->cipher_start_src_offset_in_bytes = 0;
216
217       job->hash_alg = NULL_HASH;
218       job->cipher_mode = CBC;
219       job->cipher_direction = direction;
220       job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
221
222       if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
223         {
224           iv = ptd->cbc_iv;
225           _mm_storeu_si128 ((__m128i *) op->iv, iv);
226           ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
227         }
228
229       job->aes_key_len_in_bytes = key_len / 8;
230       job->aes_enc_key_expanded = kd->enc_key_exp;
231       job->aes_dec_key_expanded = kd->dec_key_exp;
232       job->iv = op->iv;
233       job->iv_len_in_bytes = AES_BLOCK_SIZE;
234
235       job->user_data = op;
236
237       job = IMB_SUBMIT_JOB (ptd->mgr);
238
239       if (job)
240         ipsecmb_retire_cipher_job (job, &n_fail);
241     }
242
243   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
244     ipsecmb_retire_cipher_job (job, &n_fail);
245
246   return n_ops - n_fail;
247 }
248
249 #define _(a, b)                                                              \
250 static_always_inline u32                                                     \
251 ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm,                            \
252                                 vnet_crypto_op_t * ops[],                    \
253                                 u32 n_ops)                                   \
254 { return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, ENCRYPT); }       \
255                                                                              \
256 static_always_inline u32                                                     \
257 ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm,                            \
258                                 vnet_crypto_op_t * ops[],                    \
259                                 u32 n_ops)                                   \
260 { return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, DECRYPT); }       \
261
262 foreach_ipsecmb_cbc_cipher_op;
263 #undef _
264
265 #define _(a, b)                                                              \
266 static_always_inline u32                                                     \
267 ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm,                  \
268     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
269 {                                                                            \
270   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
271   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
272                                                      vm->thread_index);      \
273   MB_MGR *m = ptd->mgr;                                                      \
274   vnet_crypto_op_chunk_t *chp;                                               \
275   u32 i, j;                                                                  \
276                                                                              \
277   for (i = 0; i < n_ops; i++)                                                \
278     {                                                                        \
279       struct gcm_key_data *kd;                                               \
280       struct gcm_context_data ctx;                                           \
281       vnet_crypto_op_t *op = ops[i];                                         \
282                                                                              \
283       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
284       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
285       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
286       chp = chunks + op->chunk_index;                                        \
287       for (j = 0; j < op->n_chunks; j++)                                     \
288         {                                                                    \
289           IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
290                                        chp->len);                            \
291           chp += 1;                                                          \
292         }                                                                    \
293       IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len);      \
294                                                                              \
295       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
296     }                                                                        \
297                                                                              \
298   return n_ops;                                                              \
299 }                                                                            \
300                                                                              \
301 static_always_inline u32                                                     \
302 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
303                                 u32 n_ops)                                   \
304 {                                                                            \
305   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
306   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
307                                                      vm->thread_index);      \
308   MB_MGR *m = ptd->mgr;                                                      \
309   u32 i;                                                                     \
310                                                                              \
311   for (i = 0; i < n_ops; i++)                                                \
312     {                                                                        \
313       struct gcm_key_data *kd;                                               \
314       struct gcm_context_data ctx;                                           \
315       vnet_crypto_op_t *op = ops[i];                                         \
316                                                                              \
317       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
318       IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
319                             op->aad, op->aad_len, op->tag, op->tag_len);     \
320                                                                              \
321       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
322     }                                                                        \
323                                                                              \
324   return n_ops;                                                              \
325 }                                                                            \
326                                                                              \
327 static_always_inline u32                                                     \
328 ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm,                  \
329     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
330 {                                                                            \
331   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
332   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
333                                                      vm->thread_index);      \
334   MB_MGR *m = ptd->mgr;                                                      \
335   vnet_crypto_op_chunk_t *chp;                                               \
336   u32 i, j, n_failed = 0;                                                    \
337                                                                              \
338   for (i = 0; i < n_ops; i++)                                                \
339     {                                                                        \
340       struct gcm_key_data *kd;                                               \
341       struct gcm_context_data ctx;                                           \
342       vnet_crypto_op_t *op = ops[i];                                         \
343       u8 scratch[64];                                                        \
344                                                                              \
345       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
346       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
347       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
348       chp = chunks + op->chunk_index;                                        \
349       for (j = 0; j < op->n_chunks; j++)                                     \
350         {                                                                    \
351           IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
352                                        chp->len);                            \
353           chp += 1;                                                          \
354         }                                                                    \
355       IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len);      \
356                                                                              \
357       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
358         {                                                                    \
359           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
360           n_failed++;                                                        \
361         }                                                                    \
362       else                                                                   \
363         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
364     }                                                                        \
365                                                                              \
366   return n_ops - n_failed;                                                   \
367 }                                                                            \
368                                                                              \
369 static_always_inline u32                                                     \
370 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
371                                  u32 n_ops)                                  \
372 {                                                                            \
373   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
374   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
375                                                      vm->thread_index);      \
376   MB_MGR *m = ptd->mgr;                                                      \
377   u32 i, n_failed = 0;                                                       \
378                                                                              \
379   for (i = 0; i < n_ops; i++)                                                \
380     {                                                                        \
381       struct gcm_key_data *kd;                                               \
382       struct gcm_context_data ctx;                                           \
383       vnet_crypto_op_t *op = ops[i];                                         \
384       u8 scratch[64];                                                        \
385                                                                              \
386       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
387       IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
388                             op->aad, op->aad_len, scratch, op->tag_len);     \
389                                                                              \
390       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
391         {                                                                    \
392           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
393           n_failed++;                                                        \
394         }                                                                    \
395       else                                                                   \
396         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
397     }                                                                        \
398                                                                              \
399   return n_ops - n_failed;                                                   \
400 }
401
402 foreach_ipsecmb_gcm_cipher_op;
403 #undef _
404
405 clib_error_t *
406 crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
407 {
408   ipsecmb_per_thread_data_t *ptd;
409   clib_error_t *err = 0;
410   int fd;
411
412   if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
413     return clib_error_return_unix (0, "failed to open '/dev/urandom'");
414
415   vec_foreach (ptd, imbm->per_thread_data)
416   {
417     if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
418       {
419         err = clib_error_return_unix (0, "'/dev/urandom' read failure");
420         close (fd);
421         return (err);
422       }
423   }
424
425   close (fd);
426   return (NULL);
427 }
428
429 static void
430 crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
431                             vnet_crypto_key_index_t idx)
432 {
433   ipsecmb_main_t *imbm = &ipsecmb_main;
434   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
435   ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
436   u32 i;
437   void *kd;
438
439   if (kop == VNET_CRYPTO_KEY_OP_DEL)
440     {
441       if (idx >= vec_len (imbm->key_data))
442         return;
443
444       if (imbm->key_data[idx] == 0)
445         return;
446
447       clib_mem_free_s (imbm->key_data[idx]);
448       imbm->key_data[idx] = 0;
449       return;
450     }
451
452   if (ad->data_size == 0)
453     return;
454
455   vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);
456
457   if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
458     {
459       clib_mem_free_s (imbm->key_data[idx]);
460     }
461
462   kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
463                                                      CLIB_CACHE_LINE_BYTES);
464
465   /* AES CBC key expansion */
466   if (ad->keyexp)
467     {
468       ad->keyexp (key->data, ((ipsecmb_aes_cbc_key_data_t *) kd)->enc_key_exp,
469                   ((ipsecmb_aes_cbc_key_data_t *) kd)->dec_key_exp);
470       return;
471     }
472
473   /* AES GCM */
474   if (ad->aes_gcm_pre)
475     {
476       ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
477       return;
478     }
479
480   /* HMAC */
481   if (ad->hash_one_block)
482     {
483       const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
484       u64 pad[block_qw], key_hash[block_qw];
485
486       clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
487       if (vec_len (key->data) <= ad->block_size)
488         clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
489       else
490         ad->hash_fn (key->data, vec_len (key->data), key_hash);
491
492       for (i = 0; i < block_qw; i++)
493         pad[i] = key_hash[i] ^ 0x3636363636363636;
494       ad->hash_one_block (pad, kd);
495
496       for (i = 0; i < block_qw; i++)
497         pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
498       ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));
499
500       return;
501     }
502 }
503
504 static clib_error_t *
505 crypto_ipsecmb_init (vlib_main_t * vm)
506 {
507   ipsecmb_main_t *imbm = &ipsecmb_main;
508   ipsecmb_alg_data_t *ad;
509   ipsecmb_per_thread_data_t *ptd;
510   vlib_thread_main_t *tm = vlib_get_thread_main ();
511   clib_error_t *error;
512   MB_MGR *m = 0;
513   u32 eidx;
514   u8 *name;
515
516   if (!clib_cpu_supports_aes ())
517     return 0;
518
519   /*
520    * A priority that is better than OpenSSL but worse than VPP natvie
521    */
522   name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
523                  IMB_VERSION_STR, 0);
524   eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
525
526   vec_validate_aligned (imbm->per_thread_data, tm->n_vlib_mains - 1,
527                         CLIB_CACHE_LINE_BYTES);
528
529   /* *INDENT-OFF* */
530   vec_foreach (ptd, imbm->per_thread_data)
531     {
532         ptd->mgr = alloc_mb_mgr (0);
533         if (clib_cpu_supports_avx512f ())
534           init_mb_mgr_avx512 (ptd->mgr);
535         else if (clib_cpu_supports_avx2 ())
536           init_mb_mgr_avx2 (ptd->mgr);
537         else
538           init_mb_mgr_sse (ptd->mgr);
539
540         if (ptd == imbm->per_thread_data)
541           m = ptd->mgr;
542     }
543   /* *INDENT-ON* */
544
545   if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
546     return (error);
547
548 #define _(a, b, c, d, e, f)                                              \
549   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
550                                     ipsecmb_ops_hmac_##a);               \
551   ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a;                        \
552   ad->block_size = d;                                                    \
553   ad->data_size = e * 2;                                                 \
554   ad->hash_one_block = m-> c##_one_block;                                \
555   ad->hash_fn = m-> c;                                                   \
556
557   foreach_ipsecmb_hmac_op;
558 #undef _
559 #define _(a, b)                                                         \
560   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
561                                     ipsecmb_ops_cbc_cipher_enc_##a);    \
562   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
563                                     ipsecmb_ops_cbc_cipher_dec_##a);    \
564   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                            \
565   ad->data_size = sizeof (ipsecmb_aes_cbc_key_data_t);                  \
566   ad->keyexp = m->keyexp_##b;                                           \
567
568   foreach_ipsecmb_cbc_cipher_op;
569 #undef _
570 #define _(a, b)                                                         \
571   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
572                                     ipsecmb_ops_gcm_cipher_enc_##a);    \
573   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
574                                     ipsecmb_ops_gcm_cipher_dec_##a);    \
575   vnet_crypto_register_chained_ops_handler                              \
576       (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,                              \
577        ipsecmb_ops_gcm_cipher_enc_##a##_chained);                       \
578   vnet_crypto_register_chained_ops_handler                              \
579       (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,                              \
580        ipsecmb_ops_gcm_cipher_dec_##a##_chained);                       \
581   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                            \
582   ad->data_size = sizeof (struct gcm_key_data);                         \
583   ad->aes_gcm_pre = m->gcm##b##_pre;                                    \
584
585   foreach_ipsecmb_gcm_cipher_op;
586 #undef _
587
588   vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
589   return (NULL);
590 }
591
592 /* *INDENT-OFF* */
593 VLIB_INIT_FUNCTION (crypto_ipsecmb_init) =
594 {
595   .runs_after = VLIB_INITS ("vnet_crypto_init"),
596 };
597 /* *INDENT-ON* */
598
599 /* *INDENT-OFF* */
600 VLIB_PLUGIN_REGISTER () =
601 {
602   .version = VPP_BUILD_VER,
603   .description = "Intel IPSEC Multi-buffer Crypto Engine",
604 };
605 /* *INDENT-ON* */
606
607 /*
608  * fd.io coding-style-patch-verification: ON
609  *
610  * Local Variables:
611  * eval: (c-set-style "gnu")
612  * End:
613  */