crypto: add chained buffer support in ipsecmb (AES-GCM)
[vpp.git] / src / plugins / crypto_ipsecmb / ipsecmb.c
1 /*
2  * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3  *
4  * Copyright (c) 2019 Cisco Systemss
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <fcntl.h>
19
20 #include <intel-ipsec-mb.h>
21
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
27
28 #define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE
29 #define EXPANDED_KEY_N_BYTES (16 * 15)
30
31 typedef struct
32 {
33   MB_MGR *mgr;
34   __m128i cbc_iv;
35 } ipsecmb_per_thread_data_t;
36
37 typedef struct
38 {
39   u16 data_size;
40   u8 block_size;
41   aes_gcm_pre_t aes_gcm_pre;
42   keyexp_t keyexp;
43   hash_one_block_t hash_one_block;
44   hash_fn_t hash_fn;
45 } ipsecmb_alg_data_t;
46
47 typedef struct ipsecmb_main_t_
48 {
49   ipsecmb_per_thread_data_t *per_thread_data;
50   ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
51   void **key_data;
52 } ipsecmb_main_t;
53
54 typedef struct
55 {
56   u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
57   u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
58 } ipsecmb_aes_cbc_key_data_t;
59
60 static ipsecmb_main_t ipsecmb_main = { };
61
62 /*
63  * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
64  */
65 #define foreach_ipsecmb_hmac_op                                \
66   _(SHA1,   SHA1,    sha1,   64,  20, 20)                      \
67   _(SHA224, SHA_224, sha224, 64,  32, 28)                      \
68   _(SHA256, SHA_256, sha256, 64,  32, 32)                      \
69   _(SHA384, SHA_384, sha384, 128, 64, 48)                      \
70   _(SHA512, SHA_512, sha512, 128, 64, 64)
71
72 /*
73  * (Alg, key-len-bits)
74  */
75 #define foreach_ipsecmb_cbc_cipher_op                          \
76   _(AES_128_CBC, 128)                                          \
77   _(AES_192_CBC, 192)                                          \
78   _(AES_256_CBC, 256)
79
80 /*
81  * (Alg, key-len-bytes, iv-len-bytes)
82  */
83 #define foreach_ipsecmb_gcm_cipher_op                          \
84   _(AES_128_GCM, 128)                                          \
85   _(AES_192_GCM, 192)                                          \
86   _(AES_256_GCM, 256)
87
88 always_inline void
89 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
90 {
91   vnet_crypto_op_t *op = job->user_data;
92   u32 len = op->digest_len ? op->digest_len : digest_size;
93
94   if (STS_COMPLETED != job->status)
95     {
96       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
97       *n_fail = *n_fail + 1;
98       return;
99     }
100
101   if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
102     {
103       if ((memcmp (op->digest, job->auth_tag_output, len)))
104         {
105           *n_fail = *n_fail + 1;
106           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
107           return;
108         }
109     }
110   else if (len == digest_size)
111     clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
112   else
113     clib_memcpy_fast (op->digest, job->auth_tag_output, len);
114
115   op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
116 }
117
118 static_always_inline u32
119 ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
120                          u32 n_ops, u32 block_size, u32 hash_size,
121                          u32 digest_size, JOB_HASH_ALG alg)
122 {
123   ipsecmb_main_t *imbm = &ipsecmb_main;
124   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
125                                                      vm->thread_index);
126   JOB_AES_HMAC *job;
127   u32 i, n_fail = 0;
128   u8 scratch[n_ops][digest_size];
129
130   /*
131    * queue all the jobs first ...
132    */
133   for (i = 0; i < n_ops; i++)
134     {
135       vnet_crypto_op_t *op = ops[i];
136       u8 *kd = (u8 *) imbm->key_data[op->key_index];
137
138       job = IMB_GET_NEXT_JOB (ptd->mgr);
139
140       job->src = op->src;
141       job->hash_start_src_offset_in_bytes = 0;
142       job->msg_len_to_hash_in_bytes = op->len;
143       job->hash_alg = alg;
144       job->auth_tag_output_len_in_bytes = digest_size;
145       job->auth_tag_output = scratch[i];
146
147       job->cipher_mode = NULL_CIPHER;
148       job->cipher_direction = DECRYPT;
149       job->chain_order = HASH_CIPHER;
150
151       job->u.HMAC._hashed_auth_key_xor_ipad = kd;
152       job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
153       job->user_data = op;
154
155       job = IMB_SUBMIT_JOB (ptd->mgr);
156
157       if (job)
158         ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
159     }
160
161   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
162     ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
163
164   return n_ops - n_fail;
165 }
166
167 #define _(a, b, c, d, e, f)                                             \
168 static_always_inline u32                                                \
169 ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
170                       vnet_crypto_op_t * ops[],                         \
171                       u32 n_ops)                                        \
172 { return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); }        \
173
174 foreach_ipsecmb_hmac_op;
175 #undef _
176
177 always_inline void
178 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
179 {
180   vnet_crypto_op_t *op = job->user_data;
181
182   if (STS_COMPLETED != job->status)
183     {
184       op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
185       *n_fail = *n_fail + 1;
186     }
187   else
188     op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
189 }
190
191 static_always_inline u32
192 ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
193                                u32 n_ops, u32 key_len,
194                                JOB_CIPHER_DIRECTION direction)
195 {
196   ipsecmb_main_t *imbm = &ipsecmb_main;
197   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,
198                                                      vm->thread_index);
199   JOB_AES_HMAC *job;
200   u32 i, n_fail = 0;
201
202   for (i = 0; i < n_ops; i++)
203     {
204       ipsecmb_aes_cbc_key_data_t *kd;
205       vnet_crypto_op_t *op = ops[i];
206       kd = (ipsecmb_aes_cbc_key_data_t *) imbm->key_data[op->key_index];
207       __m128i iv;
208
209       job = IMB_GET_NEXT_JOB (ptd->mgr);
210
211       job->src = op->src;
212       job->dst = op->dst;
213       job->msg_len_to_cipher_in_bytes = op->len;
214       job->cipher_start_src_offset_in_bytes = 0;
215
216       job->hash_alg = NULL_HASH;
217       job->cipher_mode = CBC;
218       job->cipher_direction = direction;
219       job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
220
221       if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
222         {
223           iv = ptd->cbc_iv;
224           _mm_storeu_si128 ((__m128i *) op->iv, iv);
225           ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
226         }
227
228       job->aes_key_len_in_bytes = key_len / 8;
229       job->aes_enc_key_expanded = kd->enc_key_exp;
230       job->aes_dec_key_expanded = kd->dec_key_exp;
231       job->iv = op->iv;
232       job->iv_len_in_bytes = AES_BLOCK_SIZE;
233
234       job->user_data = op;
235
236       job = IMB_SUBMIT_JOB (ptd->mgr);
237
238       if (job)
239         ipsecmb_retire_cipher_job (job, &n_fail);
240     }
241
242   while ((job = IMB_FLUSH_JOB (ptd->mgr)))
243     ipsecmb_retire_cipher_job (job, &n_fail);
244
245   return n_ops - n_fail;
246 }
247
248 #define _(a, b)                                                              \
249 static_always_inline u32                                                     \
250 ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm,                            \
251                                 vnet_crypto_op_t * ops[],                    \
252                                 u32 n_ops)                                   \
253 { return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, ENCRYPT); }       \
254                                                                              \
255 static_always_inline u32                                                     \
256 ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm,                            \
257                                 vnet_crypto_op_t * ops[],                    \
258                                 u32 n_ops)                                   \
259 { return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, DECRYPT); }       \
260
261 foreach_ipsecmb_cbc_cipher_op;
262 #undef _
263
264 #define _(a, b)                                                              \
265 static_always_inline u32                                                     \
266 ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm,                  \
267     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
268 {                                                                            \
269   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
270   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
271                                                      vm->thread_index);      \
272   MB_MGR *m = ptd->mgr;                                                      \
273   vnet_crypto_op_chunk_t *chp;                                               \
274   u32 i, j;                                                                  \
275                                                                              \
276   for (i = 0; i < n_ops; i++)                                                \
277     {                                                                        \
278       struct gcm_key_data *kd;                                               \
279       struct gcm_context_data ctx;                                           \
280       vnet_crypto_op_t *op = ops[i];                                         \
281                                                                              \
282       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
283       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
284       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
285       chp = chunks + op->chunk_index;                                        \
286       for (j = 0; j < op->n_chunks; j++)                                     \
287         {                                                                    \
288           IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
289                                        chp->len);                            \
290           chp += 1;                                                          \
291         }                                                                    \
292       IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len);      \
293                                                                              \
294       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
295     }                                                                        \
296                                                                              \
297   return n_ops;                                                              \
298 }                                                                            \
299                                                                              \
300 static_always_inline u32                                                     \
301 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
302                                 u32 n_ops)                                   \
303 {                                                                            \
304   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
305   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
306                                                      vm->thread_index);      \
307   MB_MGR *m = ptd->mgr;                                                      \
308   u32 i;                                                                     \
309                                                                              \
310   for (i = 0; i < n_ops; i++)                                                \
311     {                                                                        \
312       struct gcm_key_data *kd;                                               \
313       struct gcm_context_data ctx;                                           \
314       vnet_crypto_op_t *op = ops[i];                                         \
315                                                                              \
316       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
317       IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
318                             op->aad, op->aad_len, op->tag, op->tag_len);     \
319                                                                              \
320       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
321     }                                                                        \
322                                                                              \
323   return n_ops;                                                              \
324 }                                                                            \
325                                                                              \
326 static_always_inline u32                                                     \
327 ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm,                  \
328     vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
329 {                                                                            \
330   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
331   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
332                                                      vm->thread_index);      \
333   MB_MGR *m = ptd->mgr;                                                      \
334   vnet_crypto_op_chunk_t *chp;                                               \
335   u32 i, j, n_failed = 0;                                                    \
336                                                                              \
337   for (i = 0; i < n_ops; i++)                                                \
338     {                                                                        \
339       struct gcm_key_data *kd;                                               \
340       struct gcm_context_data ctx;                                           \
341       vnet_crypto_op_t *op = ops[i];                                         \
342       u8 scratch[64];                                                        \
343                                                                              \
344       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
345       ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
346       IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
347       chp = chunks + op->chunk_index;                                        \
348       for (j = 0; j < op->n_chunks; j++)                                     \
349         {                                                                    \
350           IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
351                                        chp->len);                            \
352           chp += 1;                                                          \
353         }                                                                    \
354       IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len);      \
355                                                                              \
356       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
357         {                                                                    \
358           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
359           n_failed++;                                                        \
360         }                                                                    \
361       else                                                                   \
362         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
363     }                                                                        \
364                                                                              \
365   return n_ops - n_failed;                                                   \
366 }                                                                            \
367                                                                              \
368 static_always_inline u32                                                     \
369 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
370                                  u32 n_ops)                                  \
371 {                                                                            \
372   ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
373   ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
374                                                      vm->thread_index);      \
375   MB_MGR *m = ptd->mgr;                                                      \
376   u32 i, n_failed = 0;                                                       \
377                                                                              \
378   for (i = 0; i < n_ops; i++)                                                \
379     {                                                                        \
380       struct gcm_key_data *kd;                                               \
381       struct gcm_context_data ctx;                                           \
382       vnet_crypto_op_t *op = ops[i];                                         \
383       u8 scratch[64];                                                        \
384                                                                              \
385       kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
386       IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
387                             op->aad, op->aad_len, scratch, op->tag_len);     \
388                                                                              \
389       if ((memcmp (op->tag, scratch, op->tag_len)))                          \
390         {                                                                    \
391           op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
392           n_failed++;                                                        \
393         }                                                                    \
394       else                                                                   \
395         op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
396     }                                                                        \
397                                                                              \
398   return n_ops - n_failed;                                                   \
399 }
400
401 foreach_ipsecmb_gcm_cipher_op;
402 #undef _
403
404 clib_error_t *
405 crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm)
406 {
407   ipsecmb_per_thread_data_t *ptd;
408   clib_error_t *err = 0;
409   int fd;
410
411   if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
412     return clib_error_return_unix (0, "failed to open '/dev/urandom'");
413
414   vec_foreach (ptd, imbm->per_thread_data)
415   {
416     if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
417       {
418         err = clib_error_return_unix (0, "'/dev/urandom' read failure");
419         close (fd);
420         return (err);
421       }
422   }
423
424   close (fd);
425   return (NULL);
426 }
427
428 static void
429 crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
430                             vnet_crypto_key_index_t idx)
431 {
432   ipsecmb_main_t *imbm = &ipsecmb_main;
433   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
434   ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
435   u32 i;
436   void *kd;
437
438   if (kop == VNET_CRYPTO_KEY_OP_DEL)
439     {
440       if (idx >= vec_len (imbm->key_data))
441         return;
442
443       if (imbm->key_data[idx] == 0)
444         return;
445
446       clib_mem_free_s (imbm->key_data[idx]);
447       imbm->key_data[idx] = 0;
448       return;
449     }
450
451   if (ad->data_size == 0)
452     return;
453
454   vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);
455
456   if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
457     {
458       clib_mem_free_s (imbm->key_data[idx]);
459     }
460
461   kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
462                                                      CLIB_CACHE_LINE_BYTES);
463
464   /* AES CBC key expansion */
465   if (ad->keyexp)
466     {
467       ad->keyexp (key->data, ((ipsecmb_aes_cbc_key_data_t *) kd)->enc_key_exp,
468                   ((ipsecmb_aes_cbc_key_data_t *) kd)->dec_key_exp);
469       return;
470     }
471
472   /* AES GCM */
473   if (ad->aes_gcm_pre)
474     {
475       ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
476       return;
477     }
478
479   /* HMAC */
480   if (ad->hash_one_block)
481     {
482       const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
483       u64 pad[block_qw], key_hash[block_qw];
484
485       clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
486       if (vec_len (key->data) <= ad->block_size)
487         clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
488       else
489         ad->hash_fn (key->data, vec_len (key->data), key_hash);
490
491       for (i = 0; i < block_qw; i++)
492         pad[i] = key_hash[i] ^ 0x3636363636363636;
493       ad->hash_one_block (pad, kd);
494
495       for (i = 0; i < block_qw; i++)
496         pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
497       ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));
498
499       return;
500     }
501 }
502
503 static clib_error_t *
504 crypto_ipsecmb_init (vlib_main_t * vm)
505 {
506   ipsecmb_main_t *imbm = &ipsecmb_main;
507   ipsecmb_alg_data_t *ad;
508   ipsecmb_per_thread_data_t *ptd;
509   vlib_thread_main_t *tm = vlib_get_thread_main ();
510   clib_error_t *error;
511   MB_MGR *m = 0;
512   u32 eidx;
513   u8 *name;
514
515   if (!clib_cpu_supports_aes ())
516     return 0;
517
518   /*
519    * A priority that is better than OpenSSL but worse than VPP natvie
520    */
521   name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
522                  IMB_VERSION_STR, 0);
523   eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
524
525   vec_validate (imbm->per_thread_data, tm->n_vlib_mains - 1);
526
527   /* *INDENT-OFF* */
528   vec_foreach (ptd, imbm->per_thread_data)
529     {
530         ptd->mgr = alloc_mb_mgr (0);
531         if (clib_cpu_supports_avx512f ())
532           init_mb_mgr_avx512 (ptd->mgr);
533         else if (clib_cpu_supports_avx2 ())
534           init_mb_mgr_avx2 (ptd->mgr);
535         else
536           init_mb_mgr_sse (ptd->mgr);
537
538         if (ptd == imbm->per_thread_data)
539           m = ptd->mgr;
540     }
541   /* *INDENT-ON* */
542
543   if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
544     return (error);
545
546 #define _(a, b, c, d, e, f)                                              \
547   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
548                                     ipsecmb_ops_hmac_##a);               \
549   ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a;                        \
550   ad->block_size = d;                                                    \
551   ad->data_size = e * 2;                                                 \
552   ad->hash_one_block = m-> c##_one_block;                                \
553   ad->hash_fn = m-> c;                                                   \
554
555   foreach_ipsecmb_hmac_op;
556 #undef _
557 #define _(a, b)                                                         \
558   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
559                                     ipsecmb_ops_cbc_cipher_enc_##a);    \
560   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
561                                     ipsecmb_ops_cbc_cipher_dec_##a);    \
562   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                            \
563   ad->data_size = sizeof (ipsecmb_aes_cbc_key_data_t);                  \
564   ad->keyexp = m->keyexp_##b;                                           \
565
566   foreach_ipsecmb_cbc_cipher_op;
567 #undef _
568 #define _(a, b)                                                         \
569   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
570                                     ipsecmb_ops_gcm_cipher_enc_##a);    \
571   vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
572                                     ipsecmb_ops_gcm_cipher_dec_##a);    \
573   vnet_crypto_register_chained_ops_handler                              \
574       (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,                              \
575        ipsecmb_ops_gcm_cipher_enc_##a##_chained);                       \
576   vnet_crypto_register_chained_ops_handler                              \
577       (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,                              \
578        ipsecmb_ops_gcm_cipher_dec_##a##_chained);                       \
579   ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                            \
580   ad->data_size = sizeof (struct gcm_key_data);                         \
581   ad->aes_gcm_pre = m->gcm##b##_pre;                                    \
582
583   foreach_ipsecmb_gcm_cipher_op;
584 #undef _
585
586   vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
587   return (NULL);
588 }
589
590 /* *INDENT-OFF* */
591 VLIB_INIT_FUNCTION (crypto_ipsecmb_init) =
592 {
593   .runs_after = VLIB_INITS ("vnet_crypto_init"),
594 };
595 /* *INDENT-ON* */
596
597 /* *INDENT-OFF* */
598 VLIB_PLUGIN_REGISTER () =
599 {
600   .version = VPP_BUILD_VER,
601   .description = "Intel IPSEC Multi-buffer Crypto Engine",
602 };
603 /* *INDENT-ON* */
604
605 /*
606  * fd.io coding-style-patch-verification: ON
607  *
608  * Local Variables:
609  * eval: (c-set-style "gnu")
610  * End:
611  */