devices: af_packet - use netlink to get/set mtu
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev_dp_api.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
34
35 #if CLIB_DEBUG > 0
36 #define always_inline static inline
37 #else
38 #define always_inline static inline __attribute__ ((__always_inline__))
39 #endif
40
41 #define CRYPTODEV_NB_CRYPTO_OPS 1024
42 #define CRYPTODEV_MAX_INFLIGHT  (CRYPTODEV_NB_CRYPTO_OPS - 1)
43 #define CRYPTODEV_AAD_MASK      (CRYPTODEV_NB_CRYPTO_OPS - 1)
44 #define CRYPTODEV_DEQ_CACHE_SZ  32
45 #define CRYPTODEV_NB_SESSION    10240
46 #define CRYPTODEV_MAX_AAD_SIZE  16
47 #define CRYPTODEV_MAX_N_SGL     8 /**< maximum number of segments */
48
49 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN, KEY_LEN
50  */
51 #define foreach_vnet_aead_crypto_conversion                                   \
52   _ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 8, 16)                               \
53   _ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 12, 16)                              \
54   _ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 8, 24)                               \
55   _ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 12, 24)                              \
56   _ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 8, 32)                               \
57   _ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 12, 32)
58
59 /**
60  * crypto (alg, cryptodev_alg, key_size), hash (alg, digest-size)
61  **/
62 #define foreach_cryptodev_link_async_alg                                      \
63   _ (AES_128_CBC, AES_CBC, 16, SHA1, 12)                                      \
64   _ (AES_192_CBC, AES_CBC, 24, SHA1, 12)                                      \
65   _ (AES_256_CBC, AES_CBC, 32, SHA1, 12)                                      \
66   _ (AES_128_CBC, AES_CBC, 16, SHA224, 14)                                    \
67   _ (AES_192_CBC, AES_CBC, 24, SHA224, 14)                                    \
68   _ (AES_256_CBC, AES_CBC, 32, SHA224, 14)                                    \
69   _ (AES_128_CBC, AES_CBC, 16, SHA256, 16)                                    \
70   _ (AES_192_CBC, AES_CBC, 24, SHA256, 16)                                    \
71   _ (AES_256_CBC, AES_CBC, 32, SHA256, 16)                                    \
72   _ (AES_128_CBC, AES_CBC, 16, SHA384, 24)                                    \
73   _ (AES_192_CBC, AES_CBC, 24, SHA384, 24)                                    \
74   _ (AES_256_CBC, AES_CBC, 32, SHA384, 24)                                    \
75   _ (AES_128_CBC, AES_CBC, 16, SHA512, 32)                                    \
76   _ (AES_192_CBC, AES_CBC, 24, SHA512, 32)                                    \
77   _ (AES_256_CBC, AES_CBC, 32, SHA512, 32)
78
79 typedef enum
80 {
81   CRYPTODEV_OP_TYPE_ENCRYPT = 0,
82   CRYPTODEV_OP_TYPE_DECRYPT,
83   CRYPTODEV_N_OP_TYPES,
84 } cryptodev_op_type_t;
85
86 typedef struct
87 {
88   union rte_cryptodev_session_ctx **keys;
89 } cryptodev_key_t;
90
91 /* Replicate DPDK rte_cryptodev_sym_capability structure with key size ranges
92  * in favor of vpp vector */
93 typedef struct
94 {
95   enum rte_crypto_sym_xform_type xform_type;
96   union
97   {
98     struct
99     {
100       enum rte_crypto_auth_algorithm algo; /*auth algo */
101       u32 *digest_sizes;                   /* vector of auth digest sizes */
102     } auth;
103     struct
104     {
105       enum rte_crypto_cipher_algorithm algo; /* cipher algo */
106       u32 *key_sizes;                        /* vector of cipher key sizes */
107     } cipher;
108     struct
109     {
110       enum rte_crypto_aead_algorithm algo; /* aead algo */
111       u32 *key_sizes;                      /*vector of aead key sizes */
112       u32 *aad_sizes;                      /*vector of aad sizes */
113       u32 *digest_sizes;                   /* vector of aead digest sizes */
114     } aead;
115   };
116 } cryptodev_capability_t;
117
118 typedef struct
119 {
120   u32 dev_id;
121   u32 q_id;
122   char *desc;
123 } cryptodev_inst_t;
124
125 typedef struct
126 {
127   struct rte_mempool *sess_pool;
128   struct rte_mempool *sess_priv_pool;
129 } cryptodev_numa_data_t;
130
131 typedef struct
132 {
133   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
134   vlib_buffer_t *b[VNET_CRYPTO_FRAME_SIZE];
135   struct rte_crypto_raw_dp_ctx *ctx;
136   struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
137   struct rte_ring *cached_frame;
138   u16 aad_index;
139   u8 *aad_buf;
140   u64 aad_phy_addr;
141   u16 cryptodev_id;
142   u16 cryptodev_q;
143   u16 inflight;
144   union rte_cryptodev_session_ctx reset_sess; /* session data for reset ctx */
145 } cryptodev_engine_thread_t;
146
147 typedef struct
148 {
149   cryptodev_numa_data_t *per_numa_data;
150   cryptodev_key_t *keys;
151   cryptodev_engine_thread_t *per_thread_data;
152   enum rte_iova_mode iova_mode;
153   cryptodev_inst_t *cryptodev_inst;
154   clib_bitmap_t *active_cdev_inst_mask;
155   clib_spinlock_t tlock;
156   cryptodev_capability_t *supported_caps;
157 } cryptodev_main_t;
158
159 cryptodev_main_t cryptodev_main;
160
161 static_always_inline int
162 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
163                     cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
164                     u32 aad_len)
165 {
166   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
167   memset (xform, 0, sizeof (*xform));
168   xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
169   xform->next = 0;
170
171   if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
172       key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
173       key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
174     return -1;
175
176   aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
177   aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
178     RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
179   aead_xform->aad_length = aad_len;
180   aead_xform->digest_length = 16;
181   aead_xform->iv.offset = 0;
182   aead_xform->iv.length = 12;
183   aead_xform->key.data = key->data;
184   aead_xform->key.length = vec_len (key->data);
185
186   return 0;
187 }
188
189 static_always_inline int
190 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
191                       cryptodev_op_type_t op_type,
192                       const vnet_crypto_key_t *key)
193 {
194   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
195   vnet_crypto_key_t *key_cipher, *key_auth;
196   enum rte_crypto_cipher_algorithm cipher_algo = ~0;
197   enum rte_crypto_auth_algorithm auth_algo = ~0;
198   u32 digest_len = ~0;
199
200   key_cipher = vnet_crypto_get_key (key->index_crypto);
201   key_auth = vnet_crypto_get_key (key->index_integ);
202   if (!key_cipher || !key_auth)
203     return -1;
204
205   if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
206     {
207       xform_cipher = xforms;
208       xform_auth = xforms + 1;
209       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
210       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
211     }
212   else
213     {
214       xform_cipher = xforms + 1;
215       xform_auth = xforms;
216       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
217       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
218     }
219
220   xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
221   xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
222   xforms->next = xforms + 1;
223
224   switch (key->async_alg)
225     {
226 #define _(a, b, c, d, e)                                                      \
227   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
228     cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
229     auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
230     digest_len = e;                                                           \
231     break;
232
233       foreach_cryptodev_link_async_alg
234 #undef _
235     default:
236       return -1;
237     }
238
239   xform_cipher->cipher.algo = cipher_algo;
240   xform_cipher->cipher.key.data = key_cipher->data;
241   xform_cipher->cipher.key.length = vec_len (key_cipher->data);
242   xform_cipher->cipher.iv.length = 16;
243   xform_cipher->cipher.iv.offset = 0;
244
245   xform_auth->auth.algo = auth_algo;
246   xform_auth->auth.digest_length = digest_len;
247   xform_auth->auth.key.data = key_auth->data;
248   xform_auth->auth.key.length = vec_len (key_auth->data);
249
250   return 0;
251 }
252
253 static_always_inline void
254 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
255 {
256   u32 n_devs, i;
257
258   if (sess == NULL)
259     return;
260
261   n_devs = rte_cryptodev_count ();
262
263   for (i = 0; i < n_devs; i++)
264     rte_cryptodev_sym_session_clear (i, sess);
265
266   rte_cryptodev_sym_session_free (sess);
267 }
268
269 static_always_inline int
270 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
271                           u32 aad_len)
272 {
273   cryptodev_main_t *cmt = &cryptodev_main;
274   cryptodev_numa_data_t *numa_data;
275   cryptodev_inst_t *dev_inst;
276   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
277   struct rte_mempool *sess_pool, *sess_priv_pool;
278   cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
279   struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
280   struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
281   struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
282   u32 numa_node = vm->numa_node;
283   int ret;
284
285   numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
286   sess_pool = numa_data->sess_pool;
287   sess_priv_pool = numa_data->sess_priv_pool;
288
289   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
290     rte_cryptodev_sym_session_create (sess_pool);
291   if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
292     {
293       ret = -1;
294       goto clear_key;
295     }
296
297   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
298     rte_cryptodev_sym_session_create (sess_pool);
299   if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
300     {
301       ret = -1;
302       goto clear_key;
303     }
304
305   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
306     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
307   else
308     ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key,
309                               aad_len);
310   if (ret)
311     return 0;
312
313   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
314     prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
315   else
316     prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
317
318   vec_foreach (dev_inst, cmt->cryptodev_inst)
319   {
320     u32 dev_id = dev_inst->dev_id;
321     struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
322
323     /* if the session is already configured for the driver type, avoid
324        configuring it again to increase the session data's refcnt */
325     if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[cdev->driver_id].data &&
326         sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[cdev->driver_id].data)
327       continue;
328
329     ret = rte_cryptodev_sym_session_init (
330       dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc, sess_priv_pool);
331     ret = rte_cryptodev_sym_session_init (
332       dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec, sess_priv_pool);
333     if (ret < 0)
334       return ret;
335   }
336
337   sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
338   sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
339
340   CLIB_MEMORY_STORE_BARRIER ();
341   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess =
342     sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
343   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess =
344     sessions[CRYPTODEV_OP_TYPE_DECRYPT];
345
346 clear_key:
347   if (ret != 0)
348     {
349       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
350       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
351     }
352   return ret;
353 }
354
355 static int
356 cryptodev_supports_param_value (u32 *params, u32 param_value)
357 {
358   u32 *value;
359   vec_foreach (value, params)
360     {
361       if (*value == param_value)
362         return 1;
363     }
364   return 0;
365 }
366
367 static int
368 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
369                              u32 key_size, u32 digest_size, u32 aad_size)
370 {
371   cryptodev_main_t *cmt = &cryptodev_main;
372   cryptodev_capability_t *cap;
373   vec_foreach (cap, cmt->supported_caps)
374     {
375
376       if (cap->xform_type != idx->type)
377         continue;
378
379       if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
380           cap->auth.algo == idx->algo.auth &&
381           cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
382         return 1;
383
384       if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
385           cap->cipher.algo == idx->algo.cipher &&
386           cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
387         return 1;
388
389       if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
390           cap->aead.algo == idx->algo.aead &&
391           cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
392           cryptodev_supports_param_value (cap->aead.digest_sizes,
393                                           digest_size) &&
394           cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
395         return 1;
396     }
397   return 0;
398 }
399
400 static int
401 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key)
402 {
403   vnet_crypto_alg_t alg;
404   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
405     return 0;
406
407   alg = key->alg;
408
409 #define _(a, b, c, d, e, f, g)                                                \
410   if (alg == VNET_CRYPTO_ALG_##a)                                             \
411     return 0;
412
413   foreach_vnet_aead_crypto_conversion
414 #undef _
415     return -1;
416 }
417
418 static_always_inline void
419 cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
420                         vnet_crypto_key_index_t idx, u32 aad_len)
421 {
422   cryptodev_main_t *cmt = &cryptodev_main;
423   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
424   cryptodev_key_t *ckey = 0;
425   u32 i;
426
427   vec_validate (cmt->keys, idx);
428   ckey = vec_elt_at_index (cmt->keys, idx);
429
430   if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
431     {
432       if (idx >= vec_len (cmt->keys))
433         return;
434
435       vec_foreach_index (i, cmt->per_numa_data)
436         {
437           if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess)
438             {
439               cryptodev_session_del (
440                 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess);
441               cryptodev_session_del (
442                 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess);
443
444               CLIB_MEMORY_STORE_BARRIER ();
445               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess = 0;
446               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess = 0;
447             }
448         }
449       return;
450     }
451
452   /* create key */
453
454   /* do not create session for unsupported alg */
455   if (cryptodev_check_supported_vnet_alg (key))
456     return;
457
458   vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
459   vec_foreach_index (i, ckey->keys)
460     vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
461 }
462
463 /*static*/ void
464 cryptodev_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
465                        vnet_crypto_key_index_t idx)
466 {
467   cryptodev_sess_handler (vm, kop, idx, 8);
468 }
469
470 static_always_inline void
471 cryptodev_mark_frame_err_status (vnet_crypto_async_frame_t * f,
472                                  vnet_crypto_op_status_t s)
473 {
474   u32 n_elts = f->n_elts, i;
475
476   for (i = 0; i < n_elts; i++)
477     f->elts[i].status = s;
478   f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
479 }
480
481 static_always_inline int
482 cryptodev_frame_build_sgl (vlib_main_t * vm, enum rte_iova_mode iova_mode,
483                            struct rte_crypto_vec *data_vec,
484                            u16 * n_seg, vlib_buffer_t * b, u32 size)
485 {
486   struct rte_crypto_vec *vec = data_vec + 1;
487   if (vlib_buffer_chain_linearize (vm, b) > CRYPTODEV_MAX_N_SGL)
488     return -1;
489
490   while ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && size)
491     {
492       u32 len;
493       b = vlib_get_buffer (vm, b->next_buffer);
494       len = clib_min (b->current_length, size);
495       vec->base = (void *) vlib_buffer_get_current (b);
496       if (iova_mode == RTE_IOVA_VA)
497         vec->iova = pointer_to_uword (vec->base);
498       else
499         vec->iova = vlib_buffer_get_current_pa (vm, b);
500       vec->len = len;
501       size -= len;
502       vec++;
503       *n_seg += 1;
504     }
505
506   if (size)
507     return -1;
508
509   return 0;
510 }
511
512 static_always_inline u64
513 compute_ofs_linked_alg (vnet_crypto_async_frame_elt_t * fe, i16 * min_ofs,
514                         u32 * max_end)
515 {
516   union rte_crypto_sym_ofs ofs;
517   u32 crypto_end = fe->crypto_start_offset + fe->crypto_total_length;
518   u32 integ_end = fe->integ_start_offset + fe->crypto_total_length +
519     fe->integ_length_adj;
520
521   *min_ofs = clib_min (fe->crypto_start_offset, fe->integ_start_offset);
522   *max_end = clib_max (crypto_end, integ_end);
523
524   ofs.ofs.cipher.head = fe->crypto_start_offset - *min_ofs;
525   ofs.ofs.cipher.tail = *max_end - crypto_end;
526   ofs.ofs.auth.head = fe->integ_start_offset - *min_ofs;
527   ofs.ofs.auth.tail = *max_end - integ_end;
528
529   return ofs.raw;
530 }
531
532 static_always_inline void
533 cryptodev_reset_ctx (cryptodev_engine_thread_t *cet)
534 {
535   rte_cryptodev_configure_raw_dp_ctx (cet->cryptodev_id, cet->cryptodev_q,
536                                       cet->ctx, RTE_CRYPTO_OP_WITH_SESSION,
537                                       cet->reset_sess, 0);
538 }
539
540 static_always_inline int
541 cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
542                                      vnet_crypto_async_frame_t * frame,
543                                      cryptodev_op_type_t op_type)
544 {
545   cryptodev_main_t *cmt = &cryptodev_main;
546   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
547   vnet_crypto_async_frame_elt_t *fe;
548   struct rte_crypto_vec *vec;
549   struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
550   vlib_buffer_t **b;
551   u32 n_elts;
552   u32 last_key_index = ~0;
553   i16 min_ofs;
554   u32 max_end;
555   int status;
556
557   n_elts = frame->n_elts;
558
559   if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
560     {
561       cryptodev_mark_frame_err_status (frame,
562                                        VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
563       return -1;
564     }
565
566   vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
567
568   vec = cet->vec;
569   b = cet->b;
570   fe = frame->elts;
571
572   while (n_elts)
573     {
574       union rte_crypto_sym_ofs cofs;
575       u16 n_seg = 1;
576
577       if (n_elts > 2)
578         {
579           CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
580           CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
581           vlib_prefetch_buffer_header (b[1], LOAD);
582           vlib_prefetch_buffer_header (b[2], LOAD);
583         }
584
585       if (PREDICT_FALSE (last_key_index != fe->key_index))
586         {
587           cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
588
589           if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess ==
590                              0))
591             {
592               status = cryptodev_session_create (vm, fe->key_index, 0);
593               if (PREDICT_FALSE (status < 0))
594                 goto error_exit;
595             }
596
597           status = rte_cryptodev_configure_raw_dp_ctx (
598             cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
599             RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type],
600             /*is_update */ 1);
601           if (PREDICT_FALSE (status < 0))
602             goto error_exit;
603
604           last_key_index = fe->key_index;
605         }
606
607       cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
608
609       vec->len = max_end - min_ofs;
610       if (cmt->iova_mode == RTE_IOVA_VA)
611         {
612           vec[0].base = (void *) (b[0]->data + min_ofs);
613           vec[0].iova = pointer_to_uword (b[0]->data) + min_ofs;
614           iv_vec.va = (void *) fe->iv;
615           iv_vec.iova = pointer_to_uword (fe->iv);
616           digest_vec.va = (void *) fe->tag;
617           digest_vec.iova = pointer_to_uword (fe->tag);
618         }
619       else
620         {
621           vec[0].base = (void *) (b[0]->data + min_ofs);
622           vec[0].iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
623           iv_vec.va = (void *) fe->iv;
624           iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
625           digest_vec.va = (void *) fe->tag;
626           digest_vec.iova = vlib_physmem_get_pa (vm, fe->digest);
627         }
628
629       if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
630         {
631           vec[0].len = b[0]->current_data + b[0]->current_length - min_ofs;
632           if (cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
633                                          max_end - min_ofs - vec->len) < 0)
634             goto error_exit;
635         }
636
637       status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
638                                           &digest_vec, 0, (void *) frame);
639       if (PREDICT_FALSE (status < 0))
640         goto error_exit;
641
642       b++;
643       fe++;
644       n_elts--;
645     }
646
647   status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
648   if (PREDICT_FALSE (status < 0))
649     {
650       cryptodev_reset_ctx (cet);
651       return -1;
652     }
653
654   cet->inflight += frame->n_elts;
655   return 0;
656
657 error_exit:
658   cryptodev_mark_frame_err_status (frame,
659                                    VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
660   cryptodev_reset_ctx (cet);
661   return -1;
662 }
663
664 static_always_inline int
665 cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
666                              vnet_crypto_async_frame_t * frame,
667                              cryptodev_op_type_t op_type, u8 aad_len)
668 {
669   cryptodev_main_t *cmt = &cryptodev_main;
670   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
671   vnet_crypto_async_frame_elt_t *fe;
672   vlib_buffer_t **b;
673   u32 n_elts;
674   union rte_crypto_sym_ofs cofs;
675   struct rte_crypto_vec *vec;
676   struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
677   u32 last_key_index = ~0;
678   int status;
679
680   n_elts = frame->n_elts;
681
682   if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
683     {
684       cryptodev_mark_frame_err_status (frame,
685                                        VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
686       return -1;
687     }
688
689   vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
690
691   vec = cet->vec;
692   fe = frame->elts;
693   b = cet->b;
694   cofs.raw = 0;
695
696   while (n_elts)
697     {
698       u32 aad_offset = ((cet->aad_index++) & CRYPTODEV_AAD_MASK) << 4;
699       u16 n_seg = 1;
700
701       if (n_elts > 1)
702         {
703           CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
704           vlib_prefetch_buffer_header (b[1], LOAD);
705         }
706
707       if (PREDICT_FALSE (last_key_index != fe->key_index))
708         {
709           cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
710
711           if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess ==
712                              0))
713             {
714               status = cryptodev_session_create (vm, fe->key_index, aad_len);
715               if (PREDICT_FALSE (status < 0))
716                 goto error_exit;
717             }
718
719           if (PREDICT_FALSE ((u8) key->keys[vm->numa_node][op_type]
720                                .crypto_sess->opaque_data != aad_len))
721             {
722               cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
723                                       fe->key_index, aad_len);
724               status = cryptodev_session_create (vm, fe->key_index, aad_len);
725               if (PREDICT_FALSE (status < 0))
726                 goto error_exit;
727             }
728
729           status = rte_cryptodev_configure_raw_dp_ctx (
730             cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
731             RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type],
732             /*is_update */ 1);
733           if (PREDICT_FALSE (status < 0))
734             goto error_exit;
735
736           last_key_index = fe->key_index;
737         }
738
739       if (cmt->iova_mode == RTE_IOVA_VA)
740         {
741           vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
742           vec[0].iova = pointer_to_uword (vec[0].base);
743           vec[0].len = fe->crypto_total_length;
744           iv_vec.va = (void *) fe->iv;
745           iv_vec.iova = pointer_to_uword (fe->iv);
746           digest_vec.va = (void *) fe->tag;
747           digest_vec.iova = pointer_to_uword (fe->tag);
748           aad_vec.va = (void *) (cet->aad_buf + aad_offset);
749           aad_vec.iova = cet->aad_phy_addr + aad_offset;
750         }
751       else
752         {
753           vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
754           vec[0].iova =
755             vlib_buffer_get_pa (vm, b[0]) + fe->crypto_start_offset;
756           vec[0].len = fe->crypto_total_length;
757           iv_vec.va = (void *) fe->iv;
758           iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
759           aad_vec.va = (void *) (cet->aad_buf + aad_offset);
760           aad_vec.iova = cet->aad_phy_addr + aad_offset;
761           digest_vec.va = (void *) fe->tag;
762           digest_vec.iova = vlib_physmem_get_pa (vm, fe->tag);
763         }
764
765       if (aad_len == 8)
766         *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
767       else
768         {
769           /* aad_len == 12 */
770           *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
771           *(u32 *) (cet->aad_buf + aad_offset + 8) = *(u32 *) (fe->aad + 8);
772         }
773
774       if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
775         {
776           vec[0].len = b[0]->current_data + b[0]->current_length -
777                        fe->crypto_start_offset;
778           status =
779             cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
780                                        fe->crypto_total_length - vec[0].len);
781           if (status < 0)
782             goto error_exit;
783         }
784
785       status =
786         rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
787                                    &digest_vec, &aad_vec, (void *) frame);
788       if (PREDICT_FALSE (status < 0))
789         goto error_exit;
790
791       fe++;
792       b++;
793       n_elts--;
794     }
795
796   status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
797   if (PREDICT_FALSE (status < 0))
798     goto error_exit;
799
800   cet->inflight += frame->n_elts;
801
802   return 0;
803
804 error_exit:
805   cryptodev_mark_frame_err_status (frame,
806                                    VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
807   cryptodev_reset_ctx (cet);
808   return -1;
809 }
810
811 static u32
812 cryptodev_get_frame_n_elts (void *frame)
813 {
814   vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
815   return f->n_elts;
816 }
817
818 static void
819 cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success)
820 {
821   vnet_crypto_async_frame_t *f = (vnet_crypto_async_frame_t *) frame;
822
823   f->elts[index].status = is_op_success ? VNET_CRYPTO_OP_STATUS_COMPLETED :
824     VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
825 }
826
827 #define GET_RING_OBJ(r, pos, f) do { \
828         vnet_crypto_async_frame_t **ring = (void *)&r[1];     \
829         f = ring[(r->cons.head + pos) & r->mask]; \
830 } while (0)
831
832 static_always_inline vnet_crypto_async_frame_t *
833 cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
834                          u32 * enqueue_thread_idx)
835 {
836   cryptodev_main_t *cmt = &cryptodev_main;
837   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
838   vnet_crypto_async_frame_t *frame, *frame_ret = 0;
839   u32 n_deq, n_success;
840   u32 n_cached_frame = rte_ring_count (cet->cached_frame), n_room_left;
841   u8 no_job_to_deq = 0;
842   u16 inflight = cet->inflight;
843   int dequeue_status;
844
845   n_room_left = CRYPTODEV_DEQ_CACHE_SZ - n_cached_frame - 1;
846
847   if (n_cached_frame)
848     {
849       u32 i;
850       for (i = 0; i < n_cached_frame; i++)
851         {
852           vnet_crypto_async_frame_t *f;
853           void *f_ret;
854           enum rte_crypto_op_status op_status;
855           u8 n_left, err, j;
856
857           GET_RING_OBJ (cet->cached_frame, i, f);
858
859           if (i < n_cached_frame - 2)
860             {
861               vnet_crypto_async_frame_t *f1, *f2;
862               GET_RING_OBJ (cet->cached_frame, i + 1, f1);
863               GET_RING_OBJ (cet->cached_frame, i + 2, f2);
864               CLIB_PREFETCH (f1, CLIB_CACHE_LINE_BYTES, LOAD);
865               CLIB_PREFETCH (f2, CLIB_CACHE_LINE_BYTES, LOAD);
866             }
867
868           n_left = f->state & 0x7f;
869           err = f->state & 0x80;
870
871           for (j = f->n_elts - n_left; j < f->n_elts && inflight; j++)
872             {
873               int ret;
874               f_ret = rte_cryptodev_raw_dequeue (cet->ctx, &ret, &op_status);
875
876               if (!f_ret)
877                 break;
878
879               switch (op_status)
880                 {
881                 case RTE_CRYPTO_OP_STATUS_SUCCESS:
882                   f->elts[j].status = VNET_CRYPTO_OP_STATUS_COMPLETED;
883                   break;
884                 default:
885                   f->elts[j].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
886                   err |= 1 << 7;
887                 }
888
889               inflight--;
890             }
891
892           if (j == f->n_elts)
893             {
894               if (i == 0)
895                 {
896                   frame_ret = f;
897                   f->state = err ? VNET_CRYPTO_FRAME_STATE_ELT_ERROR :
898                     VNET_CRYPTO_FRAME_STATE_SUCCESS;
899                 }
900               else
901                 {
902                   f->state = f->n_elts - j;
903                   f->state |= err;
904                 }
905               if (inflight)
906                 continue;
907             }
908
909           /* to here f is not completed dequeued and no more job can be
910            * dequeued
911            */
912           f->state = f->n_elts - j;
913           f->state |= err;
914           no_job_to_deq = 1;
915           break;
916         }
917
918       if (frame_ret)
919         {
920           rte_ring_sc_dequeue (cet->cached_frame, (void **) &frame_ret);
921           n_room_left++;
922         }
923     }
924
925   /* no point to dequeue further */
926   if (!inflight || no_job_to_deq || !n_room_left)
927     goto end_deq;
928
929   n_deq = rte_cryptodev_raw_dequeue_burst (cet->ctx,
930                                            cryptodev_get_frame_n_elts,
931                                            cryptodev_post_dequeue,
932                                            (void **) &frame, 0, &n_success,
933                                            &dequeue_status);
934   if (!n_deq)
935     goto end_deq;
936
937   inflight -= n_deq;
938   no_job_to_deq = n_deq < frame->n_elts;
939   /* we have to cache the frame */
940   if (frame_ret || n_cached_frame || no_job_to_deq)
941     {
942       frame->state = frame->n_elts - n_deq;
943       frame->state |= ((n_success < n_deq) << 7);
944       rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
945       n_room_left--;
946     }
947   else
948     {
949       frame->state = n_success == frame->n_elts ?
950         VNET_CRYPTO_FRAME_STATE_SUCCESS : VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
951       frame_ret = frame;
952     }
953
954   /* see if we can dequeue more */
955   while (inflight && n_room_left && !no_job_to_deq)
956     {
957       n_deq = rte_cryptodev_raw_dequeue_burst (cet->ctx,
958                                                cryptodev_get_frame_n_elts,
959                                                cryptodev_post_dequeue,
960                                                (void **) &frame, 0,
961                                                &n_success, &dequeue_status);
962       if (!n_deq)
963         break;
964       inflight -= n_deq;
965       no_job_to_deq = n_deq < frame->n_elts;
966       frame->state = frame->n_elts - n_deq;
967       frame->state |= ((n_success < n_deq) << 7);
968       rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
969       n_room_left--;
970     }
971
972 end_deq:
973   if (inflight < cet->inflight)
974     {
975       int res =
976         rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
977       ASSERT (res == 0);
978       cet->inflight = inflight;
979     }
980
981   if (frame_ret)
982     {
983       *nb_elts_processed = frame_ret->n_elts;
984       *enqueue_thread_idx = frame_ret->enqueue_thread_index;
985     }
986
987   return frame_ret;
988 }
989
990 /* *INDENT-OFF* */
991 static_always_inline int
992 cryptodev_enqueue_gcm_aad_8_enc (vlib_main_t * vm,
993                                  vnet_crypto_async_frame_t * frame)
994 {
995   return cryptodev_frame_gcm_enqueue (vm, frame,
996                                       CRYPTODEV_OP_TYPE_ENCRYPT, 8);
997 }
998 static_always_inline int
999 cryptodev_enqueue_gcm_aad_12_enc (vlib_main_t * vm,
1000                                  vnet_crypto_async_frame_t * frame)
1001 {
1002   return cryptodev_frame_gcm_enqueue (vm, frame,
1003                                       CRYPTODEV_OP_TYPE_ENCRYPT, 12);
1004 }
1005
1006 static_always_inline int
1007 cryptodev_enqueue_gcm_aad_8_dec (vlib_main_t * vm,
1008                                  vnet_crypto_async_frame_t * frame)
1009 {
1010   return cryptodev_frame_gcm_enqueue (vm, frame,
1011                                       CRYPTODEV_OP_TYPE_DECRYPT, 8);
1012 }
1013 static_always_inline int
1014 cryptodev_enqueue_gcm_aad_12_dec (vlib_main_t * vm,
1015                                  vnet_crypto_async_frame_t * frame)
1016 {
1017   return cryptodev_frame_gcm_enqueue (vm, frame,
1018                                       CRYPTODEV_OP_TYPE_DECRYPT, 12);
1019 }
1020
1021 static_always_inline int
1022 cryptodev_enqueue_linked_alg_enc (vlib_main_t * vm,
1023                                   vnet_crypto_async_frame_t * frame)
1024 {
1025   return cryptodev_frame_linked_algs_enqueue (vm, frame,
1026                                               CRYPTODEV_OP_TYPE_ENCRYPT);
1027 }
1028
1029 static_always_inline int
1030 cryptodev_enqueue_linked_alg_dec (vlib_main_t * vm,
1031                                   vnet_crypto_async_frame_t * frame)
1032 {
1033   return cryptodev_frame_linked_algs_enqueue (vm, frame,
1034                                               CRYPTODEV_OP_TYPE_DECRYPT);
1035 }
1036
1037 typedef enum
1038 {
1039   CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
1040   CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
1041 } cryptodev_resource_assign_op_t;
1042
1043 /**
1044  *  assign a cryptodev resource to a worker.
1045  *  @param cet: the worker thread data
1046  *  @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
1047  *  @param op: the assignment method.
1048  *  @return: 0 if successfully, negative number otherwise.
1049  **/
1050 static_always_inline int
1051 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
1052                            u32 cryptodev_inst_index,
1053                            cryptodev_resource_assign_op_t op)
1054 {
1055   cryptodev_main_t *cmt = &cryptodev_main;
1056   cryptodev_inst_t *cinst = 0;
1057   uword idx;
1058
1059   /* assign resource is only allowed when no inflight op is in the queue */
1060   if (cet->inflight)
1061     return -EBUSY;
1062
1063   switch (op)
1064     {
1065     case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
1066       if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
1067           vec_len (cmt->cryptodev_inst))
1068         return -1;
1069
1070       clib_spinlock_lock (&cmt->tlock);
1071       idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
1072       clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
1073       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
1074       cet->cryptodev_id = cinst->dev_id;
1075       cet->cryptodev_q = cinst->q_id;
1076       cryptodev_reset_ctx (cet);
1077       clib_spinlock_unlock (&cmt->tlock);
1078       break;
1079     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
1080       /* assigning a used cryptodev resource is not allowed */
1081       if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
1082           == 1)
1083         return -EBUSY;
1084       vec_foreach_index (idx, cmt->cryptodev_inst)
1085       {
1086         cinst = cmt->cryptodev_inst + idx;
1087         if (cinst->dev_id == cet->cryptodev_id &&
1088             cinst->q_id == cet->cryptodev_q)
1089           break;
1090       }
1091       /* invalid existing worker resource assignment */
1092       if (idx == vec_len (cmt->cryptodev_inst))
1093         return -EINVAL;
1094       clib_spinlock_lock (&cmt->tlock);
1095       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
1096       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
1097                                 cryptodev_inst_index, 1);
1098       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
1099       cet->cryptodev_id = cinst->dev_id;
1100       cet->cryptodev_q = cinst->q_id;
1101       cryptodev_reset_ctx (cet);
1102       clib_spinlock_unlock (&cmt->tlock);
1103       break;
1104     default:
1105       return -EINVAL;
1106     }
1107   return 0;
1108 }
1109
1110 static u8 *
1111 format_cryptodev_inst (u8 * s, va_list * args)
1112 {
1113   cryptodev_main_t *cmt = &cryptodev_main;
1114   u32 inst = va_arg (*args, u32);
1115   cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
1116   u32 thread_index = 0;
1117   struct rte_cryptodev_info info;
1118
1119   rte_cryptodev_info_get (cit->dev_id, &info);
1120   s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
1121
1122   vec_foreach_index (thread_index, cmt->per_thread_data)
1123   {
1124     cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
1125     if (vlib_num_workers () > 0 && thread_index == 0)
1126       continue;
1127
1128     if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
1129       {
1130         s = format (s, "%u (%v)\n", thread_index,
1131                     vlib_worker_threads[thread_index].name);
1132         break;
1133       }
1134   }
1135
1136   if (thread_index == vec_len (cmt->per_thread_data))
1137     s = format (s, "%s\n", "free");
1138
1139   return s;
1140 }
1141
1142 static clib_error_t *
1143 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
1144                               vlib_cli_command_t * cmd)
1145 {
1146   cryptodev_main_t *cmt = &cryptodev_main;
1147   u32 inst;
1148
1149   vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
1150                    "Assigned-to");
1151   if (vec_len (cmt->cryptodev_inst) == 0)
1152     {
1153       vlib_cli_output (vm, "(nil)\n");
1154       return 0;
1155     }
1156
1157   vec_foreach_index (inst, cmt->cryptodev_inst)
1158     vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
1159
1160   return 0;
1161 }
1162
1163 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
1164     .path = "show cryptodev assignment",
1165     .short_help = "show cryptodev assignment",
1166     .function = cryptodev_show_assignment_fn,
1167 };
1168
1169 static clib_error_t *
1170 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
1171                              vlib_cli_command_t * cmd)
1172 {
1173   cryptodev_main_t *cmt = &cryptodev_main;
1174   cryptodev_engine_thread_t *cet;
1175   unformat_input_t _line_input, *line_input = &_line_input;
1176   u32 thread_index, inst_index;
1177   u32 thread_present = 0, inst_present = 0;
1178   clib_error_t *error = 0;
1179   int ret;
1180
1181   /* Get a line of input. */
1182   if (!unformat_user (input, unformat_line_input, line_input))
1183     return 0;
1184
1185   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1186     {
1187       if (unformat (line_input, "thread %u", &thread_index))
1188         thread_present = 1;
1189       else if (unformat (line_input, "resource %u", &inst_index))
1190         inst_present = 1;
1191       else
1192         {
1193           error = clib_error_return (0, "unknown input `%U'",
1194                                      format_unformat_error, line_input);
1195           return error;
1196         }
1197     }
1198
1199   if (!thread_present || !inst_present)
1200     {
1201       error = clib_error_return (0, "mandatory argument(s) missing");
1202       return error;
1203     }
1204
1205   if (thread_index == 0 && vlib_num_workers () > 0)
1206     {
1207       error =
1208         clib_error_return (0, "assign crypto resource for master thread");
1209       return error;
1210     }
1211
1212   if (thread_index > vec_len (cmt->per_thread_data) ||
1213       inst_index > vec_len (cmt->cryptodev_inst))
1214     {
1215       error = clib_error_return (0, "wrong thread id or resource id");
1216       return error;
1217     }
1218
1219   cet = cmt->per_thread_data + thread_index;
1220   ret = cryptodev_assign_resource (cet, inst_index,
1221                                    CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
1222   if (ret)
1223     {
1224       error = clib_error_return (0, "cryptodev_assign_resource returned %i",
1225                                  ret);
1226       return error;
1227     }
1228
1229   return 0;
1230 }
1231
1232 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
1233     .path = "set cryptodev assignment",
1234     .short_help = "set cryptodev assignment thread <thread_index> "
1235         "resource <inst_index>",
1236     .function = cryptodev_set_assignment_fn,
1237 };
1238
1239 static u32
1240 cryptodev_count_queue (u32 numa)
1241 {
1242   struct rte_cryptodev_info info;
1243   u32 n_cryptodev = rte_cryptodev_count ();
1244   u32 i, q_count = 0;
1245
1246   for (i = 0; i < n_cryptodev; i++)
1247     {
1248       rte_cryptodev_info_get (i, &info);
1249       q_count += info.max_nb_queue_pairs;
1250     }
1251
1252   return q_count;
1253 }
1254
1255 static int
1256 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
1257 {
1258   struct rte_cryptodev_config cfg;
1259   struct rte_cryptodev_info info;
1260   cryptodev_main_t *cmt = &cryptodev_main;
1261   u32 i;
1262   int ret;
1263
1264   rte_cryptodev_info_get (cryptodev_id, &info);
1265
1266   if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))
1267     return -1;
1268
1269   cfg.socket_id = info.device->numa_node;
1270   cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1271
1272   rte_cryptodev_configure (cryptodev_id, &cfg);
1273
1274   for (i = 0; i < info.max_nb_queue_pairs; i++)
1275     {
1276       struct rte_cryptodev_qp_conf qp_cfg;
1277
1278       qp_cfg.mp_session = 0;
1279       qp_cfg.mp_session_private = 0;
1280       qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
1281
1282       ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1283                                             info.device->numa_node);
1284       if (ret)
1285         {
1286           clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
1287                         cryptodev_id, i, ret);
1288           break;
1289         }
1290     }
1291
1292   if (i != info.max_nb_queue_pairs)
1293     return -1;
1294
1295   /* start the device */
1296   rte_cryptodev_start (cryptodev_id);
1297
1298   for (i = 0; i < info.max_nb_queue_pairs; i++)
1299     {
1300       cryptodev_inst_t *cdev_inst;
1301       vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
1302       cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
1303       cdev_inst->dev_id = cryptodev_id;
1304       cdev_inst->q_id = i;
1305
1306       snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
1307                 "%s_q%u", info.device->name, i);
1308     }
1309
1310   return 0;
1311 }
1312
1313 static int
1314 cryptodev_cmp (void *v1, void *v2)
1315 {
1316   cryptodev_inst_t *a1 = v1;
1317   cryptodev_inst_t *a2 = v2;
1318
1319   if (a1->q_id > a2->q_id)
1320     return 1;
1321   if (a1->q_id < a2->q_id)
1322     return -1;
1323   return 0;
1324 }
1325
1326 static void
1327 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
1328                                u32 param_size_max, u32 increment)
1329 {
1330   u32 i = 0;
1331   u32 cap_param_size;
1332
1333   while (i < vec_len (*param_sizes))
1334     {
1335       u32 found_param = 0;
1336       for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
1337            cap_param_size += increment)
1338         {
1339           if ((*param_sizes)[i] == cap_param_size)
1340             {
1341               found_param = 1;
1342               break;
1343             }
1344           if (increment == 0)
1345             break;
1346         }
1347       if (!found_param)
1348         /* no such param_size in cap so delete  this size in temp_cap params */
1349         vec_delete (*param_sizes, 1, i);
1350       else
1351         i++;
1352     }
1353 }
1354
1355 static void
1356 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
1357 {
1358   cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
1359
1360   switch (temp_cap.xform_type)
1361     {
1362     case RTE_CRYPTO_SYM_XFORM_AUTH:
1363       vec_free (temp_cap.auth.digest_sizes);
1364       break;
1365     case RTE_CRYPTO_SYM_XFORM_CIPHER:
1366       vec_free (temp_cap.cipher.key_sizes);
1367       break;
1368     case RTE_CRYPTO_SYM_XFORM_AEAD:
1369       vec_free (temp_cap.aead.key_sizes);
1370       vec_free (temp_cap.aead.aad_sizes);
1371       vec_free (temp_cap.aead.digest_sizes);
1372       break;
1373     default:
1374       break;
1375     }
1376   vec_delete (*temp_caps, 1, temp_cap_id);
1377 }
1378
1379 static u32
1380 cryptodev_remove_unsupported_param_sizes (
1381   cryptodev_capability_t *temp_cap,
1382   const struct rte_cryptodev_capabilities *dev_caps)
1383 {
1384   u32 cap_found = 0;
1385   const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
1386
1387   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1388     {
1389       if (cap->sym.xform_type == temp_cap->xform_type)
1390         switch (cap->sym.xform_type)
1391           {
1392           case RTE_CRYPTO_SYM_XFORM_CIPHER:
1393             if (cap->sym.cipher.algo == temp_cap->cipher.algo)
1394               {
1395                 remove_unsupported_param_size (
1396                   &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
1397                   cap->sym.cipher.key_size.max,
1398                   cap->sym.cipher.key_size.increment);
1399                 if (vec_len (temp_cap->cipher.key_sizes) > 0)
1400                   cap_found = 1;
1401               }
1402             break;
1403           case RTE_CRYPTO_SYM_XFORM_AUTH:
1404             if (cap->sym.auth.algo == temp_cap->auth.algo)
1405               {
1406                 remove_unsupported_param_size (
1407                   &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
1408                   cap->sym.auth.digest_size.max,
1409                   cap->sym.auth.digest_size.increment);
1410                 if (vec_len (temp_cap->auth.digest_sizes) > 0)
1411                   cap_found = 1;
1412               }
1413             break;
1414           case RTE_CRYPTO_SYM_XFORM_AEAD:
1415             if (cap->sym.aead.algo == temp_cap->aead.algo)
1416               {
1417                 remove_unsupported_param_size (
1418                   &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
1419                   cap->sym.aead.key_size.max,
1420                   cap->sym.aead.key_size.increment);
1421                 remove_unsupported_param_size (
1422                   &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
1423                   cap->sym.aead.aad_size.max,
1424                   cap->sym.aead.aad_size.increment);
1425                 remove_unsupported_param_size (
1426                   &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
1427                   cap->sym.aead.digest_size.max,
1428                   cap->sym.aead.digest_size.increment);
1429                 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1430                     vec_len (temp_cap->aead.aad_sizes) > 0 &&
1431                     vec_len (temp_cap->aead.digest_sizes) > 0)
1432                   cap_found = 1;
1433               }
1434             break;
1435           default:
1436             break;
1437           }
1438       if (cap_found)
1439         break;
1440       cap++;
1441     }
1442
1443   return cap_found;
1444 }
1445
1446 static void
1447 cryptodev_get_common_capabilities ()
1448 {
1449   cryptodev_main_t *cmt = &cryptodev_main;
1450   cryptodev_inst_t *dev_inst;
1451   struct rte_cryptodev_info dev_info;
1452   u32 previous_dev_id, dev_id;
1453   u32 cap_id = 0;
1454   u32 param;
1455   cryptodev_capability_t tmp_cap;
1456   const struct rte_cryptodev_capabilities *cap;
1457   const struct rte_cryptodev_capabilities *dev_caps;
1458
1459   if (vec_len (cmt->cryptodev_inst) == 0)
1460     return;
1461   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1462   rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1463   cap = &dev_info.capabilities[0];
1464
1465   /*init capabilities vector*/
1466   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1467     {
1468       ASSERT (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC);
1469       tmp_cap.xform_type = cap->sym.xform_type;
1470       switch (cap->sym.xform_type)
1471         {
1472         case RTE_CRYPTO_SYM_XFORM_CIPHER:
1473           tmp_cap.cipher.key_sizes = 0;
1474           tmp_cap.cipher.algo = cap->sym.cipher.algo;
1475           for (param = cap->sym.cipher.key_size.min;
1476                param <= cap->sym.cipher.key_size.max;
1477                param += cap->sym.cipher.key_size.increment)
1478             {
1479               vec_add1 (tmp_cap.cipher.key_sizes, param);
1480               if (cap->sym.cipher.key_size.increment == 0)
1481                 break;
1482             }
1483           break;
1484         case RTE_CRYPTO_SYM_XFORM_AUTH:
1485           tmp_cap.auth.algo = cap->sym.auth.algo;
1486           tmp_cap.auth.digest_sizes = 0;
1487           for (param = cap->sym.auth.digest_size.min;
1488                param <= cap->sym.auth.digest_size.max;
1489                param += cap->sym.auth.digest_size.increment)
1490             {
1491               vec_add1 (tmp_cap.auth.digest_sizes, param);
1492               if (cap->sym.auth.digest_size.increment == 0)
1493                 break;
1494             }
1495           break;
1496         case RTE_CRYPTO_SYM_XFORM_AEAD:
1497           tmp_cap.aead.key_sizes = 0;
1498           tmp_cap.aead.aad_sizes = 0;
1499           tmp_cap.aead.digest_sizes = 0;
1500           tmp_cap.aead.algo = cap->sym.aead.algo;
1501           for (param = cap->sym.aead.key_size.min;
1502                param <= cap->sym.aead.key_size.max;
1503                param += cap->sym.aead.key_size.increment)
1504             {
1505               vec_add1 (tmp_cap.aead.key_sizes, param);
1506               if (cap->sym.aead.key_size.increment == 0)
1507                 break;
1508             }
1509           for (param = cap->sym.aead.aad_size.min;
1510                param <= cap->sym.aead.aad_size.max;
1511                param += cap->sym.aead.aad_size.increment)
1512             {
1513               vec_add1 (tmp_cap.aead.aad_sizes, param);
1514               if (cap->sym.aead.aad_size.increment == 0)
1515                 break;
1516             }
1517           for (param = cap->sym.aead.digest_size.min;
1518                param <= cap->sym.aead.digest_size.max;
1519                param += cap->sym.aead.digest_size.increment)
1520             {
1521               vec_add1 (tmp_cap.aead.digest_sizes, param);
1522               if (cap->sym.aead.digest_size.increment == 0)
1523                 break;
1524             }
1525           break;
1526         default:
1527           break;
1528         }
1529
1530       vec_add1 (cmt->supported_caps, tmp_cap);
1531       cap++;
1532     }
1533
1534   while (cap_id < vec_len (cmt->supported_caps))
1535     {
1536       u32 cap_is_supported = 1;
1537       previous_dev_id = cmt->cryptodev_inst->dev_id;
1538
1539       vec_foreach (dev_inst, cmt->cryptodev_inst)
1540         {
1541           dev_id = dev_inst->dev_id;
1542           if (previous_dev_id != dev_id)
1543             {
1544               previous_dev_id = dev_id;
1545               rte_cryptodev_info_get (dev_id, &dev_info);
1546               dev_caps = &dev_info.capabilities[0];
1547               cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1548                 &cmt->supported_caps[cap_id], dev_caps);
1549               if (!cap_is_supported)
1550                 {
1551                   cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1552                   /*no need to check other devices as this one doesn't support
1553                    * this temp_cap*/
1554                   break;
1555                 }
1556             }
1557         }
1558       if (cap_is_supported)
1559         cap_id++;
1560     }
1561 }
1562
1563 static int
1564 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1565 {
1566   cryptodev_main_t *cmt = &cryptodev_main;
1567   u32 n_queues = cryptodev_count_queue (vm->numa_node);
1568   u32 i;
1569
1570   if (n_queues < n_workers)
1571     return -1;
1572
1573   for (i = 0; i < rte_cryptodev_count (); i++)
1574     cryptodev_configure (vm, i);
1575
1576   cryptodev_get_common_capabilities ();
1577   vec_sort_with_function(cmt->cryptodev_inst, cryptodev_cmp);
1578
1579   /* if there is not enough device stop cryptodev */
1580   if (vec_len (cmt->cryptodev_inst) < n_workers)
1581     return -1;
1582
1583   return 0;
1584 }
1585
1586 static void
1587 cryptodev_get_max_sz (u32 *max_sess_sz, u32 *max_dp_sz)
1588 {
1589   cryptodev_main_t *cmt = &cryptodev_main;
1590   cryptodev_inst_t *cinst;
1591   u32 max_sess = 0, max_dp = 0;
1592
1593   vec_foreach (cinst, cmt->cryptodev_inst)
1594     {
1595       u32 sess_sz = rte_cryptodev_sym_get_private_session_size (cinst->dev_id);
1596       u32 dp_sz = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
1597
1598       max_sess = clib_max (sess_sz, max_sess);
1599       max_dp = clib_max (dp_sz, max_dp);
1600     }
1601
1602   *max_sess_sz = max_sess;
1603   *max_dp_sz = max_dp;
1604 }
1605
1606 static void
1607 dpdk_disable_cryptodev_engine (vlib_main_t * vm)
1608 {
1609   cryptodev_main_t *cmt = &cryptodev_main;
1610   cryptodev_numa_data_t *numa_data;
1611   cryptodev_engine_thread_t *ptd;
1612
1613   vec_validate (cmt->per_numa_data, vm->numa_node);
1614   numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
1615
1616   if (numa_data->sess_pool)
1617     rte_mempool_free (numa_data->sess_pool);
1618   if (numa_data->sess_priv_pool)
1619     rte_mempool_free (numa_data->sess_priv_pool);
1620
1621   vec_foreach (ptd, cmt->per_thread_data)
1622     {
1623       if (ptd->aad_buf)
1624         rte_free (ptd->aad_buf);
1625       if (ptd->cached_frame)
1626         rte_ring_free (ptd->cached_frame);
1627       if (ptd->reset_sess.crypto_sess)
1628         {
1629           struct rte_mempool *mp =
1630             rte_mempool_from_obj ((void *) ptd->reset_sess.crypto_sess);
1631
1632           rte_mempool_free (mp);
1633           ptd->reset_sess.crypto_sess = 0;
1634         }
1635     }
1636 }
1637
1638 static clib_error_t *
1639 create_reset_sess (cryptodev_engine_thread_t *ptd, u32 lcore, u32 numa,
1640                    u32 sess_sz)
1641 {
1642   struct rte_crypto_sym_xform xform = { 0 };
1643   struct rte_crypto_aead_xform *aead_xform = &xform.aead;
1644   struct rte_cryptodev_sym_session *sess;
1645   struct rte_mempool *mp = 0;
1646   u8 key[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1647   u8 *name = 0;
1648   clib_error_t *error = 0;
1649
1650   /* create session pool for the numa node */
1651   name = format (0, "vcryptodev_s_reset_%u_%u", numa, lcore);
1652   mp = rte_cryptodev_sym_session_pool_create ((char *) name, 2, sess_sz, 0, 0,
1653                                               numa);
1654   if (!mp)
1655     {
1656       error = clib_error_return (0, "Not enough memory for mp %s", name);
1657       goto error_exit;
1658     }
1659   vec_free (name);
1660
1661   xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1662   aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
1663   aead_xform->op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
1664   aead_xform->aad_length = 8;
1665   aead_xform->digest_length = 16;
1666   aead_xform->iv.offset = 0;
1667   aead_xform->iv.length = 12;
1668   aead_xform->key.data = key;
1669   aead_xform->key.length = 16;
1670
1671   sess = rte_cryptodev_sym_session_create (mp);
1672   if (!sess)
1673     {
1674       error = clib_error_return (0, "failed to create session");
1675       goto error_exit;
1676     }
1677
1678   if (rte_cryptodev_sym_session_init (ptd->cryptodev_id, sess, &xform, mp) < 0)
1679     {
1680       error = clib_error_return (0, "failed to create session private");
1681       goto error_exit;
1682     }
1683
1684   ptd->reset_sess.crypto_sess = sess;
1685
1686   return 0;
1687
1688 error_exit:
1689   if (mp)
1690     rte_mempool_free (mp);
1691   if (name)
1692     vec_free (name);
1693
1694   return error;
1695 }
1696
1697 clib_error_t *
1698 dpdk_cryptodev_init (vlib_main_t * vm)
1699 {
1700   cryptodev_main_t *cmt = &cryptodev_main;
1701   vlib_thread_main_t *tm = vlib_get_thread_main ();
1702   cryptodev_engine_thread_t *ptd;
1703   cryptodev_numa_data_t *numa_data;
1704   struct rte_mempool *mp;
1705   u32 skip_master = vlib_num_workers () > 0;
1706   u32 n_workers = tm->n_vlib_mains - skip_master;
1707   u32 numa = vm->numa_node;
1708   u32 sess_sz, dp_sz;
1709   u32 eidx;
1710   u32 i;
1711   u8 *name = 0;
1712   clib_error_t *error;
1713   struct rte_cryptodev_sym_capability_idx cap_auth_idx;
1714   struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
1715   struct rte_cryptodev_sym_capability_idx cap_aead_idx;
1716
1717   cmt->iova_mode = rte_eal_iova_mode ();
1718
1719   vec_validate (cmt->per_numa_data, vm->numa_node);
1720
1721   /* probe all cryptodev devices and get queue info */
1722   if (cryptodev_probe (vm, n_workers) < 0)
1723     {
1724       error = clib_error_return (0, "Failed to configure cryptodev");
1725       goto err_handling;
1726     }
1727
1728   cryptodev_get_max_sz (&sess_sz, &dp_sz);
1729
1730   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1731   clib_spinlock_init (&cmt->tlock);
1732
1733   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1734                        CLIB_CACHE_LINE_BYTES);
1735   for (i = skip_master; i < tm->n_vlib_mains; i++)
1736     {
1737       ptd = cmt->per_thread_data + i;
1738       numa = vlib_get_main_by_index (i)->numa_node;
1739
1740       ptd->aad_buf = rte_zmalloc_socket (0, CRYPTODEV_NB_CRYPTO_OPS *
1741                                          CRYPTODEV_MAX_AAD_SIZE,
1742                                          CLIB_CACHE_LINE_BYTES,
1743                                          numa);
1744
1745       if (ptd->aad_buf == 0)
1746         {
1747           error = clib_error_return (0, "Failed to alloc aad buf");
1748           goto err_handling;
1749         }
1750
1751       ptd->aad_phy_addr = rte_malloc_virt2iova (ptd->aad_buf);
1752
1753       ptd->ctx = rte_zmalloc_socket (0, dp_sz, CLIB_CACHE_LINE_BYTES, numa);
1754       if (!ptd->ctx)
1755         {
1756           error = clib_error_return (0, "Failed to alloc raw dp ctx");
1757           goto err_handling;
1758         }
1759
1760       name = format (0, "cache_frame_ring_%u%u", numa, i);
1761       ptd->cached_frame = rte_ring_create ((char *)name,
1762                                            CRYPTODEV_DEQ_CACHE_SZ, numa,
1763                                            RING_F_SC_DEQ | RING_F_SP_ENQ);
1764
1765       if (ptd->cached_frame == 0)
1766         {
1767           error = clib_error_return (0, "Failed to alloc frame ring");
1768           goto err_handling;
1769         }
1770       vec_free (name);
1771
1772       vec_validate (cmt->per_numa_data, numa);
1773       numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1774
1775       if (!numa_data->sess_pool)
1776         {
1777           /* create session pool for the numa node */
1778           name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1779           mp = rte_cryptodev_sym_session_pool_create (
1780             (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
1781           if (!mp)
1782             {
1783               error =
1784                 clib_error_return (0, "Not enough memory for mp %s", name);
1785               goto err_handling;
1786             }
1787           vec_free (name);
1788
1789           numa_data->sess_pool = mp;
1790
1791           /* create session private pool for the numa node */
1792           name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1793           mp =
1794             rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz,
1795                                 0, 0, NULL, NULL, NULL, NULL, numa, 0);
1796           if (!mp)
1797             {
1798               error =
1799                 clib_error_return (0, "Not enough memory for mp %s", name);
1800               vec_free (name);
1801               goto err_handling;
1802             }
1803
1804           vec_free (name);
1805
1806           numa_data->sess_priv_pool = mp;
1807         }
1808
1809       error = create_reset_sess (ptd, i, numa, sess_sz);
1810       if (error)
1811         goto err_handling;
1812
1813       cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
1814     }
1815
1816   /* register handler */
1817   eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1818                                       "DPDK Cryptodev Engine");
1819
1820 #define _(a, b, c, d, e, f, g)                                                \
1821   cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;                              \
1822   cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c;                              \
1823   if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f))                   \
1824     {                                                                         \
1825       vnet_crypto_register_async_handler (                                    \
1826         vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC,                 \
1827         cryptodev_enqueue_gcm_aad_##f##_enc, cryptodev_frame_dequeue);        \
1828       vnet_crypto_register_async_handler (                                    \
1829         vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC,                 \
1830         cryptodev_enqueue_gcm_aad_##f##_dec, cryptodev_frame_dequeue);        \
1831     }
1832
1833   foreach_vnet_aead_crypto_conversion
1834 #undef _
1835 /* clang-format off */
1836 #define _(a, b, c, d, e)                                                      \
1837   cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;                              \
1838   cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC;                        \
1839   cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;                          \
1840   cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b;                         \
1841   if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) &&             \
1842       cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1))                 \
1843     {                                                                         \
1844       vnet_crypto_register_async_handler (                                    \
1845         vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC,                    \
1846         cryptodev_enqueue_linked_alg_enc, cryptodev_frame_dequeue);           \
1847       vnet_crypto_register_async_handler (                                    \
1848         vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC,                    \
1849         cryptodev_enqueue_linked_alg_dec, cryptodev_frame_dequeue);           \
1850     }
1851
1852     foreach_cryptodev_link_async_alg
1853 #undef _
1854
1855   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1856   /* clang-format on */
1857
1858   /* this engine is only enabled when cryptodev device(s) are presented in
1859    * startup.conf. Assume it is wanted to be used, turn on async mode here.
1860    */
1861   vnet_crypto_request_async_mode (1);
1862   ipsec_set_async_mode (1);
1863
1864   return 0;
1865
1866 err_handling:
1867   dpdk_disable_cryptodev_engine (vm);
1868
1869   return error;
1870 }
1871 /* *INDENT-On* */
1872
1873 /*
1874  * fd.io coding-style-patch-verification: ON
1875  *
1876  * Local Variables:
1877  * eval: (c-set-style "gnu")
1878  * End:
1879  */