dpdk-cryptodev: enq/deq scheme rework
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
33
34 #include "cryptodev.h"
35
36 #if CLIB_DEBUG > 0
37 #define always_inline static inline
38 #else
39 #define always_inline static inline __attribute__ ((__always_inline__))
40 #endif
41
42 cryptodev_main_t cryptodev_main;
43
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46                     cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
47                     u32 aad_len)
48 {
49   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50   memset (xform, 0, sizeof (*xform));
51   xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
52   xform->next = 0;
53
54   if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55       key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56       key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
57     {
58       aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
59     }
60   else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
61     {
62       aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
63     }
64   else
65     return -1;
66
67   aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68     RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69   aead_xform->aad_length = aad_len;
70   aead_xform->digest_length = 16;
71   aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72   aead_xform->iv.length = 12;
73   aead_xform->key.data = key->data;
74   aead_xform->key.length = vec_len (key->data);
75
76   return 0;
77 }
78
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81                       cryptodev_op_type_t op_type,
82                       const vnet_crypto_key_t *key)
83 {
84   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85   vnet_crypto_key_t *key_cipher, *key_auth;
86   enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87   enum rte_crypto_auth_algorithm auth_algo = ~0;
88   u32 digest_len = ~0;
89
90   key_cipher = vnet_crypto_get_key (key->index_crypto);
91   key_auth = vnet_crypto_get_key (key->index_integ);
92   if (!key_cipher || !key_auth)
93     return -1;
94
95   if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
96     {
97       xform_cipher = xforms;
98       xform_auth = xforms + 1;
99       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
101     }
102   else
103     {
104       xform_cipher = xforms + 1;
105       xform_auth = xforms;
106       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
108     }
109
110   xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111   xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112   xforms->next = xforms + 1;
113
114   switch (key->async_alg)
115     {
116 #define _(a, b, c, d, e)                                                      \
117   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
118     cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
119     auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
120     digest_len = e;                                                           \
121     break;
122
123       foreach_cryptodev_link_async_alg
124 #undef _
125     default:
126       return -1;
127     }
128
129   xform_cipher->cipher.algo = cipher_algo;
130   xform_cipher->cipher.key.data = key_cipher->data;
131   xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132   xform_cipher->cipher.iv.length = 16;
133   xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
134
135   xform_auth->auth.algo = auth_algo;
136   xform_auth->auth.digest_length = digest_len;
137   xform_auth->auth.key.data = key_auth->data;
138   xform_auth->auth.key.length = vec_len (key_auth->data);
139
140   return 0;
141 }
142
143 static_always_inline void
144 cryptodev_session_del (cryptodev_session_t *sess)
145 {
146   u32 n_devs, i;
147
148   if (sess == NULL)
149     return;
150
151   n_devs = rte_cryptodev_count ();
152
153   for (i = 0; i < n_devs; i++)
154 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
155     if (rte_cryptodev_sym_session_free (i, sess) == 0)
156       break;
157 #else
158     rte_cryptodev_sym_session_clear (i, sess);
159
160   rte_cryptodev_sym_session_free (sess);
161 #endif
162 }
163
164 static int
165 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
166 {
167   cryptodev_main_t *cmt = &cryptodev_main;
168   cryptodev_capability_t *vcap;
169   u32 *s;
170
171   vec_foreach (vcap, cmt->supported_caps)
172     {
173       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
174         continue;
175       if (vcap->cipher.algo != algo)
176         continue;
177       vec_foreach (s, vcap->cipher.key_sizes)
178         if (*s == key_size)
179           return 1;
180     }
181
182   return 0;
183 }
184
185 static int
186 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
187 {
188   cryptodev_main_t *cmt = &cryptodev_main;
189   cryptodev_capability_t *vcap;
190   u32 *s;
191
192   vec_foreach (vcap, cmt->supported_caps)
193     {
194       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
195         continue;
196       if (vcap->auth.algo != algo)
197         continue;
198       vec_foreach (s, vcap->auth.digest_sizes)
199         if (*s == digest_size)
200           return 1;
201     }
202
203   return 0;
204 }
205
206 static_always_inline int
207 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
208                     u32 digest_size, u32 aad_size)
209 {
210   cryptodev_main_t *cmt = &cryptodev_main;
211   cryptodev_capability_t *vcap;
212   u32 *s;
213   u32 key_match = 0, digest_match = 0, aad_match = 0;
214
215   vec_foreach (vcap, cmt->supported_caps)
216     {
217       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
218         continue;
219       if (vcap->aead.algo != algo)
220         continue;
221       vec_foreach (s, vcap->aead.digest_sizes)
222         if (*s == digest_size)
223           {
224             digest_match = 1;
225             break;
226           }
227       vec_foreach (s, vcap->aead.key_sizes)
228         if (*s == key_size)
229           {
230             key_match = 1;
231             break;
232           }
233       vec_foreach (s, vcap->aead.aad_sizes)
234         if (*s == aad_size)
235           {
236             aad_match = 1;
237             break;
238           }
239     }
240
241   if (key_match == 1 && digest_match == 1 && aad_match == 1)
242     return 1;
243
244   return 0;
245 }
246
247 static_always_inline int
248 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
249 {
250   u32 matched = 0;
251
252   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
253     {
254       switch (key->async_alg)
255         {
256 #define _(a, b, c, d, e)                                                      \
257   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
258     if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) &&                    \
259         check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e))                   \
260       return 1;
261           foreach_cryptodev_link_async_alg
262 #undef _
263             default : return 0;
264         }
265       return 0;
266     }
267
268 #define _(a, b, c, d, e, f, g)                                                \
269   if (key->alg == VNET_CRYPTO_ALG_##a)                                        \
270     {                                                                         \
271       if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f))                  \
272         matched++;                                                            \
273     }
274   foreach_vnet_aead_crypto_conversion
275 #undef _
276
277     if (matched < 2) return 0;
278
279   return 1;
280 }
281
282 void
283 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
284                         vnet_crypto_key_index_t idx, u32 aad_len)
285 {
286   cryptodev_main_t *cmt = &cryptodev_main;
287   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
288   cryptodev_key_t *ckey = 0;
289   u32 i;
290
291   vec_validate (cmt->keys, idx);
292   ckey = vec_elt_at_index (cmt->keys, idx);
293
294   if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
295     {
296       if (idx >= vec_len (cmt->keys))
297         return;
298
299       vec_foreach_index (i, cmt->per_numa_data)
300         {
301           if (!ckey->keys)
302             continue;
303           if (!ckey->keys[i])
304             continue;
305           if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
306             {
307               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
308               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
309
310               CLIB_MEMORY_STORE_BARRIER ();
311               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
312               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
313             }
314         }
315       return;
316     }
317
318   /* create key */
319
320   /* do not create session for unsupported alg */
321   if (cryptodev_check_supported_vnet_alg (key) == 0)
322     return;
323
324   vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
325   vec_foreach_index (i, ckey->keys)
326     vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
327 }
328
329 /*static*/ void
330 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
331                        vnet_crypto_key_index_t idx)
332 {
333   cryptodev_sess_handler (vm, kop, idx, 8);
334 }
335
336 clib_error_t *
337 allocate_session_pools (u32 numa_node,
338                         cryptodev_session_pool_t *sess_pools_elt, u32 len)
339 {
340   cryptodev_main_t *cmt = &cryptodev_main;
341   u8 *name;
342   clib_error_t *error = NULL;
343
344   name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
345 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
346   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
347     (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
348 #else
349   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
350     (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
351 #endif
352
353   if (!sess_pools_elt->sess_pool)
354     {
355       error = clib_error_return (0, "Not enough memory for mp %s", name);
356       goto clear_mempools;
357     }
358   vec_free (name);
359
360 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
361   name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
362   sess_pools_elt->sess_priv_pool = rte_mempool_create (
363     (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
364     0, NULL, NULL, NULL, NULL, numa_node, 0);
365
366   if (!sess_pools_elt->sess_priv_pool)
367     {
368       error = clib_error_return (0, "Not enough memory for mp %s", name);
369       goto clear_mempools;
370     }
371   vec_free (name);
372 #endif
373
374 clear_mempools:
375   if (error)
376     {
377       vec_free (name);
378       if (sess_pools_elt->sess_pool)
379         rte_mempool_free (sess_pools_elt->sess_pool);
380 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
381       if (sess_pools_elt->sess_priv_pool)
382         rte_mempool_free (sess_pools_elt->sess_priv_pool);
383 #endif
384       return error;
385     }
386   return 0;
387 }
388
389 int
390 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
391                           u32 aad_len)
392 {
393   cryptodev_main_t *cmt = &cryptodev_main;
394   cryptodev_numa_data_t *numa_data;
395   cryptodev_inst_t *dev_inst;
396   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
397   struct rte_mempool *sess_pool;
398   cryptodev_session_pool_t *sess_pools_elt;
399   cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
400   struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
401   struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
402   cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
403 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
404   struct rte_mempool *sess_priv_pool;
405   struct rte_cryptodev_info dev_info;
406 #endif
407   u32 numa_node = vm->numa_node;
408   clib_error_t *error;
409   int ret = 0;
410   u8 found = 0;
411
412   numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
413
414   clib_spinlock_lock (&cmt->tlock);
415   vec_foreach (sess_pools_elt, numa_data->sess_pools)
416     {
417       if (sess_pools_elt->sess_pool == NULL)
418         {
419           error = allocate_session_pools (numa_node, sess_pools_elt,
420                                           vec_len (numa_data->sess_pools) - 1);
421           if (error)
422             {
423               ret = -1;
424               goto clear_key;
425             }
426         }
427       if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
428         {
429           found = 1;
430           break;
431         }
432     }
433
434   if (found == 0)
435     {
436       vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
437       error = allocate_session_pools (numa_node, sess_pools_elt,
438                                       vec_len (numa_data->sess_pools) - 1);
439       if (error)
440         {
441           ret = -1;
442           goto clear_key;
443         }
444     }
445
446   sess_pool = sess_pools_elt->sess_pool;
447 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
448   sess_priv_pool = sess_pools_elt->sess_priv_pool;
449
450   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
451     rte_cryptodev_sym_session_create (sess_pool);
452
453   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
454     rte_cryptodev_sym_session_create (sess_pool);
455 #endif
456
457   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
458     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
459   else
460     ret =
461       prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
462   if (ret)
463     {
464       ret = -1;
465       goto clear_key;
466     }
467
468   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
469     prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
470   else
471     prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
472
473 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
474   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
475   u32 dev_id = dev_inst->dev_id;
476   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
477     rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
478   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
479     rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
480   if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
481       !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
482     {
483       ret = -1;
484       goto clear_key;
485     }
486
487   rte_cryptodev_sym_session_opaque_data_set (
488     sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
489   rte_cryptodev_sym_session_opaque_data_set (
490     sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
491 #else
492   vec_foreach (dev_inst, cmt->cryptodev_inst)
493     {
494       u32 dev_id = dev_inst->dev_id;
495       rte_cryptodev_info_get (dev_id, &dev_info);
496       u32 driver_id = dev_info.driver_id;
497
498       /* if the session is already configured for the driver type, avoid
499          configuring it again to increase the session data's refcnt */
500       if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
501           sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
502         continue;
503
504       ret = rte_cryptodev_sym_session_init (
505         dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
506         sess_priv_pool);
507       ret = rte_cryptodev_sym_session_init (
508         dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
509         sess_priv_pool);
510       if (ret < 0)
511         goto clear_key;
512     }
513
514   sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
515   sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
516 #endif
517
518   CLIB_MEMORY_STORE_BARRIER ();
519   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
520     sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
521   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
522     sessions[CRYPTODEV_OP_TYPE_DECRYPT];
523
524 clear_key:
525   if (ret != 0)
526     {
527       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
528       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
529     }
530   clib_spinlock_unlock (&cmt->tlock);
531   return ret;
532 }
533
534 typedef enum
535 {
536   CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
537   CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
538 } cryptodev_resource_assign_op_t;
539
540 /**
541  *  assign a cryptodev resource to a worker.
542  *  @param cet: the worker thread data
543  *  @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
544  *  @param op: the assignment method.
545  *  @return: 0 if successfully, negative number otherwise.
546  **/
547 static_always_inline int
548 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
549                            u32 cryptodev_inst_index,
550                            cryptodev_resource_assign_op_t op)
551 {
552   cryptodev_main_t *cmt = &cryptodev_main;
553   cryptodev_inst_t *cinst = 0;
554   uword idx;
555
556   /* assign resource is only allowed when no inflight op is in the queue */
557   if (cet->inflight)
558     return -EBUSY;
559
560   switch (op)
561     {
562     case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
563       if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
564           vec_len (cmt->cryptodev_inst))
565         return -1;
566
567       clib_spinlock_lock (&cmt->tlock);
568       idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
569       clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
570       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
571       cet->cryptodev_id = cinst->dev_id;
572       cet->cryptodev_q = cinst->q_id;
573       clib_spinlock_unlock (&cmt->tlock);
574       break;
575     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
576       /* assigning a used cryptodev resource is not allowed */
577       if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
578           == 1)
579         return -EBUSY;
580
581       vec_foreach_index (idx, cmt->cryptodev_inst)
582         {
583           cinst = cmt->cryptodev_inst + idx;
584           if (cinst->dev_id == cet->cryptodev_id &&
585               cinst->q_id == cet->cryptodev_q)
586             break;
587         }
588       /* invalid existing worker resource assignment */
589       if (idx >= vec_len (cmt->cryptodev_inst))
590         return -EINVAL;
591       clib_spinlock_lock (&cmt->tlock);
592       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
593       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
594                                 cryptodev_inst_index, 1);
595       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
596       cet->cryptodev_id = cinst->dev_id;
597       cet->cryptodev_q = cinst->q_id;
598       clib_spinlock_unlock (&cmt->tlock);
599       break;
600     default:
601       return -EINVAL;
602     }
603   return 0;
604 }
605
606 static u8 *
607 format_cryptodev_inst (u8 * s, va_list * args)
608 {
609   cryptodev_main_t *cmt = &cryptodev_main;
610   u32 inst = va_arg (*args, u32);
611   cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
612   u32 thread_index = 0;
613   struct rte_cryptodev_info info;
614
615   rte_cryptodev_info_get (cit->dev_id, &info);
616   s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
617
618   vec_foreach_index (thread_index, cmt->per_thread_data)
619   {
620     cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
621     if (vlib_num_workers () > 0 && thread_index == 0)
622       continue;
623
624     if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
625       {
626         s = format (s, "%u (%v)\n", thread_index,
627                     vlib_worker_threads[thread_index].name);
628         break;
629       }
630   }
631
632   if (thread_index == vec_len (cmt->per_thread_data))
633     s = format (s, "%s\n", "free");
634
635   return s;
636 }
637
638 static clib_error_t *
639 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
640                               vlib_cli_command_t * cmd)
641 {
642   cryptodev_main_t *cmt = &cryptodev_main;
643   u32 inst;
644
645   vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
646                    "Assigned-to");
647   if (vec_len (cmt->cryptodev_inst) == 0)
648     {
649       vlib_cli_output (vm, "(nil)\n");
650       return 0;
651     }
652
653   vec_foreach_index (inst, cmt->cryptodev_inst)
654     vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
655
656   if (cmt->is_raw_api)
657     vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
658   else
659     vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
660   return 0;
661 }
662
663 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
664     .path = "show cryptodev assignment",
665     .short_help = "show cryptodev assignment",
666     .function = cryptodev_show_assignment_fn,
667 };
668
669 static clib_error_t *
670 cryptodev_show_sw_rings_fn (vlib_main_t *vm, unformat_input_t *input,
671                             vlib_cli_command_t *cmd)
672 {
673   cryptodev_main_t *cmt = &cryptodev_main;
674   u32 thread_index = 0;
675   vec_foreach_index (thread_index, cmt->per_thread_data)
676     {
677       cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
678       if (vlib_num_workers () > 0 && thread_index == 0)
679         continue;
680       vlib_cli_output (vm, "\n\n");
681       vlib_cli_output (vm, "Frames total: %d", cet->frames_on_ring);
682       vlib_cli_output (vm, "Frames pending in a ring: %d",
683                        cet->frames_on_ring - cet->enqueued_not_dequeueq -
684                          cet->deqeued_not_returned);
685       vlib_cli_output (vm, "Frames enqueued but not dequeued: %d",
686                        cet->enqueued_not_dequeueq);
687       vlib_cli_output (vm, "Frames dequed but not returned: %d",
688                        cet->deqeued_not_returned);
689       vlib_cli_output (vm, "inflight: %d", cet->inflight);
690       vlib_cli_output (vm, "Head: %d", cet->frame_ring.head);
691       vlib_cli_output (vm, "Tail: %d", cet->frame_ring.tail);
692       vlib_cli_output (vm, "\n\n");
693     }
694   return 0;
695 }
696
697 VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
698   .path = "show cryptodev sw-ring status",
699   .short_help = "show status of all cryptodev software rings",
700   .function = cryptodev_show_sw_rings_fn,
701 };
702
703 static clib_error_t *
704 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
705                              vlib_cli_command_t * cmd)
706 {
707   cryptodev_main_t *cmt = &cryptodev_main;
708   cryptodev_engine_thread_t *cet;
709   unformat_input_t _line_input, *line_input = &_line_input;
710   u32 thread_index, inst_index;
711   u32 thread_present = 0, inst_present = 0;
712   clib_error_t *error = 0;
713   int ret;
714
715   /* Get a line of input. */
716   if (!unformat_user (input, unformat_line_input, line_input))
717     return 0;
718
719   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
720     {
721       if (unformat (line_input, "thread %u", &thread_index))
722         thread_present = 1;
723       else if (unformat (line_input, "resource %u", &inst_index))
724         inst_present = 1;
725       else
726         {
727           error = clib_error_return (0, "unknown input `%U'",
728                                      format_unformat_error, line_input);
729           return error;
730         }
731     }
732
733   if (!thread_present || !inst_present)
734     {
735       error = clib_error_return (0, "mandatory argument(s) missing");
736       return error;
737     }
738
739   if (thread_index == 0 && vlib_num_workers () > 0)
740     {
741       error =
742         clib_error_return (0, "assign crypto resource for master thread");
743       return error;
744     }
745
746   if (thread_index > vec_len (cmt->per_thread_data) ||
747       inst_index > vec_len (cmt->cryptodev_inst))
748     {
749       error = clib_error_return (0, "wrong thread id or resource id");
750       return error;
751     }
752
753   cet = cmt->per_thread_data + thread_index;
754   ret = cryptodev_assign_resource (cet, inst_index,
755                                    CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
756   if (ret)
757     {
758       error =
759         clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
760       return error;
761     }
762
763   return 0;
764 }
765
766 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
767     .path = "set cryptodev assignment",
768     .short_help = "set cryptodev assignment thread <thread_index> "
769         "resource <inst_index>",
770     .function = cryptodev_set_assignment_fn,
771 };
772
773 static u32
774 cryptodev_count_queue (u32 numa)
775 {
776   struct rte_cryptodev_info info;
777   u32 n_cryptodev = rte_cryptodev_count ();
778   u32 i, q_count = 0;
779
780   for (i = 0; i < n_cryptodev; i++)
781     {
782       rte_cryptodev_info_get (i, &info);
783       q_count += info.max_nb_queue_pairs;
784     }
785
786   return q_count;
787 }
788
789 static int
790 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
791 {
792   struct rte_cryptodev_config cfg;
793   struct rte_cryptodev_info info;
794   cryptodev_main_t *cmt = &cryptodev_main;
795   u32 i;
796   int ret;
797
798   rte_cryptodev_info_get (cryptodev_id, &info);
799
800   /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
801      anymore. Only devices that have the same driver type as the first
802      initialized device can be initialized.
803    */
804 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
805   if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
806     return -1;
807 #endif
808
809   if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
810     return -1;
811
812   cfg.socket_id = info.device->numa_node;
813   cfg.nb_queue_pairs = info.max_nb_queue_pairs;
814
815   rte_cryptodev_configure (cryptodev_id, &cfg);
816
817   for (i = 0; i < info.max_nb_queue_pairs; i++)
818     {
819       struct rte_cryptodev_qp_conf qp_cfg;
820
821       qp_cfg.mp_session = 0;
822 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
823       qp_cfg.mp_session_private = 0;
824 #endif
825       qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
826
827       ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
828                                             info.device->numa_node);
829       if (ret)
830         {
831           clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
832                         cryptodev_id, i, ret);
833           break;
834         }
835     }
836
837   if (i != info.max_nb_queue_pairs)
838     return -1;
839
840   /* start the device */
841   rte_cryptodev_start (cryptodev_id);
842
843 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
844   if (cmt->drivers_cnt == 0)
845     {
846       cmt->drivers_cnt = 1;
847       cmt->driver_id = info.driver_id;
848       cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
849     }
850 #endif
851
852   for (i = 0; i < info.max_nb_queue_pairs; i++)
853     {
854       cryptodev_inst_t *cdev_inst;
855 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
856       const char *dev_name = rte_dev_name (info.device);
857 #else
858       const char *dev_name = info.device->name;
859 #endif
860       vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
861       cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
862       cdev_inst->dev_id = cryptodev_id;
863       cdev_inst->q_id = i;
864
865       snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
866                 info.device->name, i);
867     }
868
869   return 0;
870 }
871
872 static int
873 cryptodev_cmp (void *v1, void *v2)
874 {
875   cryptodev_inst_t *a1 = v1;
876   cryptodev_inst_t *a2 = v2;
877
878   if (a1->q_id > a2->q_id)
879     return 1;
880   if (a1->q_id < a2->q_id)
881     return -1;
882   return 0;
883 }
884
885 static int
886 cryptodev_supports_param_value (u32 *params, u32 param_value)
887 {
888   u32 *value;
889   vec_foreach (value, params)
890     {
891       if (*value == param_value)
892         return 1;
893     }
894   return 0;
895 }
896
897 int
898 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
899                              u32 key_size, u32 digest_size, u32 aad_size)
900 {
901   cryptodev_main_t *cmt = &cryptodev_main;
902   cryptodev_capability_t *cap;
903   vec_foreach (cap, cmt->supported_caps)
904     {
905
906       if (cap->xform_type != idx->type)
907         continue;
908
909       if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
910           cap->auth.algo == idx->algo.auth &&
911           cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
912         return 1;
913
914       if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
915           cap->cipher.algo == idx->algo.cipher &&
916           cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
917         return 1;
918
919       if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
920           cap->aead.algo == idx->algo.aead &&
921           cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
922           cryptodev_supports_param_value (cap->aead.digest_sizes,
923                                           digest_size) &&
924           cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
925         return 1;
926     }
927   return 0;
928 }
929
930 static void
931 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
932                                u32 param_size_max, u32 increment)
933 {
934   u32 i = 0;
935   u32 cap_param_size;
936
937   while (i < vec_len (*param_sizes))
938     {
939       u32 found_param = 0;
940       for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
941            cap_param_size += increment)
942         {
943           if ((*param_sizes)[i] == cap_param_size)
944             {
945               found_param = 1;
946               break;
947             }
948           if (increment == 0)
949             break;
950         }
951       if (!found_param)
952         /* no such param_size in cap so delete  this size in temp_cap params */
953         vec_delete (*param_sizes, 1, i);
954       else
955         i++;
956     }
957 }
958
959 static void
960 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
961 {
962   cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
963
964   switch (temp_cap.xform_type)
965     {
966     case RTE_CRYPTO_SYM_XFORM_AUTH:
967       vec_free (temp_cap.auth.digest_sizes);
968       break;
969     case RTE_CRYPTO_SYM_XFORM_CIPHER:
970       vec_free (temp_cap.cipher.key_sizes);
971       break;
972     case RTE_CRYPTO_SYM_XFORM_AEAD:
973       vec_free (temp_cap.aead.key_sizes);
974       vec_free (temp_cap.aead.aad_sizes);
975       vec_free (temp_cap.aead.digest_sizes);
976       break;
977     default:
978       break;
979     }
980   vec_delete (*temp_caps, 1, temp_cap_id);
981 }
982
983 static u32
984 cryptodev_remove_unsupported_param_sizes (
985   cryptodev_capability_t *temp_cap,
986   const struct rte_cryptodev_capabilities *dev_caps)
987 {
988   u32 cap_found = 0;
989   const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
990
991   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
992     {
993       if (cap->sym.xform_type == temp_cap->xform_type)
994         switch (cap->sym.xform_type)
995           {
996           case RTE_CRYPTO_SYM_XFORM_CIPHER:
997             if (cap->sym.cipher.algo == temp_cap->cipher.algo)
998               {
999                 remove_unsupported_param_size (
1000                   &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
1001                   cap->sym.cipher.key_size.max,
1002                   cap->sym.cipher.key_size.increment);
1003                 if (vec_len (temp_cap->cipher.key_sizes) > 0)
1004                   cap_found = 1;
1005               }
1006             break;
1007           case RTE_CRYPTO_SYM_XFORM_AUTH:
1008             if (cap->sym.auth.algo == temp_cap->auth.algo)
1009               {
1010                 remove_unsupported_param_size (
1011                   &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
1012                   cap->sym.auth.digest_size.max,
1013                   cap->sym.auth.digest_size.increment);
1014                 if (vec_len (temp_cap->auth.digest_sizes) > 0)
1015                   cap_found = 1;
1016               }
1017             break;
1018           case RTE_CRYPTO_SYM_XFORM_AEAD:
1019             if (cap->sym.aead.algo == temp_cap->aead.algo)
1020               {
1021                 remove_unsupported_param_size (
1022                   &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
1023                   cap->sym.aead.key_size.max,
1024                   cap->sym.aead.key_size.increment);
1025                 remove_unsupported_param_size (
1026                   &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
1027                   cap->sym.aead.aad_size.max,
1028                   cap->sym.aead.aad_size.increment);
1029                 remove_unsupported_param_size (
1030                   &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
1031                   cap->sym.aead.digest_size.max,
1032                   cap->sym.aead.digest_size.increment);
1033                 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1034                     vec_len (temp_cap->aead.aad_sizes) > 0 &&
1035                     vec_len (temp_cap->aead.digest_sizes) > 0)
1036                   cap_found = 1;
1037               }
1038             break;
1039           default:
1040             break;
1041           }
1042       if (cap_found)
1043         break;
1044       cap++;
1045     }
1046
1047   return cap_found;
1048 }
1049
1050 static void
1051 cryptodev_get_common_capabilities ()
1052 {
1053   cryptodev_main_t *cmt = &cryptodev_main;
1054   cryptodev_inst_t *dev_inst;
1055   struct rte_cryptodev_info dev_info;
1056   u32 previous_dev_id, dev_id;
1057   u32 cap_id = 0;
1058   u32 param;
1059   cryptodev_capability_t tmp_cap;
1060   const struct rte_cryptodev_capabilities *cap;
1061   const struct rte_cryptodev_capabilities *dev_caps;
1062
1063   clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
1064   if (vec_len (cmt->cryptodev_inst) == 0)
1065     return;
1066   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1067   rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1068   cap = &dev_info.capabilities[0];
1069
1070   /*init capabilities vector*/
1071   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1072     {
1073       if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1074         {
1075           cap++;
1076           continue;
1077         }
1078
1079       tmp_cap.xform_type = cap->sym.xform_type;
1080       switch (cap->sym.xform_type)
1081         {
1082         case RTE_CRYPTO_SYM_XFORM_CIPHER:
1083           tmp_cap.cipher.key_sizes = 0;
1084           tmp_cap.cipher.algo = cap->sym.cipher.algo;
1085           for (param = cap->sym.cipher.key_size.min;
1086                param <= cap->sym.cipher.key_size.max;
1087                param += cap->sym.cipher.key_size.increment)
1088             {
1089               vec_add1 (tmp_cap.cipher.key_sizes, param);
1090               if (cap->sym.cipher.key_size.increment == 0)
1091                 break;
1092             }
1093           break;
1094         case RTE_CRYPTO_SYM_XFORM_AUTH:
1095           tmp_cap.auth.algo = cap->sym.auth.algo;
1096           tmp_cap.auth.digest_sizes = 0;
1097           for (param = cap->sym.auth.digest_size.min;
1098                param <= cap->sym.auth.digest_size.max;
1099                param += cap->sym.auth.digest_size.increment)
1100             {
1101               vec_add1 (tmp_cap.auth.digest_sizes, param);
1102               if (cap->sym.auth.digest_size.increment == 0)
1103                 break;
1104             }
1105           break;
1106         case RTE_CRYPTO_SYM_XFORM_AEAD:
1107           tmp_cap.aead.key_sizes = 0;
1108           tmp_cap.aead.aad_sizes = 0;
1109           tmp_cap.aead.digest_sizes = 0;
1110           tmp_cap.aead.algo = cap->sym.aead.algo;
1111           for (param = cap->sym.aead.key_size.min;
1112                param <= cap->sym.aead.key_size.max;
1113                param += cap->sym.aead.key_size.increment)
1114             {
1115               vec_add1 (tmp_cap.aead.key_sizes, param);
1116               if (cap->sym.aead.key_size.increment == 0)
1117                 break;
1118             }
1119           for (param = cap->sym.aead.aad_size.min;
1120                param <= cap->sym.aead.aad_size.max;
1121                param += cap->sym.aead.aad_size.increment)
1122             {
1123               vec_add1 (tmp_cap.aead.aad_sizes, param);
1124               if (cap->sym.aead.aad_size.increment == 0)
1125                 break;
1126             }
1127           for (param = cap->sym.aead.digest_size.min;
1128                param <= cap->sym.aead.digest_size.max;
1129                param += cap->sym.aead.digest_size.increment)
1130             {
1131               vec_add1 (tmp_cap.aead.digest_sizes, param);
1132               if (cap->sym.aead.digest_size.increment == 0)
1133                 break;
1134             }
1135           break;
1136         default:
1137           break;
1138         }
1139
1140       vec_add1 (cmt->supported_caps, tmp_cap);
1141       cap++;
1142     }
1143
1144   while (cap_id < vec_len (cmt->supported_caps))
1145     {
1146       u32 cap_is_supported = 1;
1147       previous_dev_id = cmt->cryptodev_inst->dev_id;
1148
1149       vec_foreach (dev_inst, cmt->cryptodev_inst)
1150         {
1151           dev_id = dev_inst->dev_id;
1152           if (previous_dev_id != dev_id)
1153             {
1154               previous_dev_id = dev_id;
1155               rte_cryptodev_info_get (dev_id, &dev_info);
1156               dev_caps = &dev_info.capabilities[0];
1157               cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1158                 &cmt->supported_caps[cap_id], dev_caps);
1159               if (!cap_is_supported)
1160                 {
1161                   cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1162                   /*no need to check other devices as this one doesn't support
1163                    * this temp_cap*/
1164                   break;
1165                 }
1166             }
1167         }
1168       if (cap_is_supported)
1169         cap_id++;
1170     }
1171 }
1172
1173 static int
1174 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1175 {
1176   cryptodev_main_t *cmt = &cryptodev_main;
1177   u32 n_queues = cryptodev_count_queue (vm->numa_node);
1178   u32 i;
1179
1180   if (n_queues < n_workers)
1181     return -1;
1182
1183   for (i = 0; i < rte_cryptodev_count (); i++)
1184     cryptodev_configure (vm, i);
1185
1186   if (vec_len (cmt->cryptodev_inst) == 0)
1187     return -1;
1188   cryptodev_get_common_capabilities ();
1189   vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1190
1191   /* if there is not enough device stop cryptodev */
1192   if (vec_len (cmt->cryptodev_inst) < n_workers)
1193     return -1;
1194
1195   return 0;
1196 }
1197
1198 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1199 static void
1200 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1201 {
1202   u32 *unique_elt;
1203   u8 found = 0;
1204
1205   vec_foreach (unique_elt, *unique_drivers)
1206     {
1207       if (*unique_elt == driver_id)
1208         {
1209           found = 1;
1210           break;
1211         }
1212     }
1213
1214   if (!found)
1215     vec_add1 (*unique_drivers, driver_id);
1216 }
1217 #endif
1218
1219 clib_error_t *
1220 dpdk_cryptodev_init (vlib_main_t * vm)
1221 {
1222   cryptodev_main_t *cmt = &cryptodev_main;
1223   vlib_thread_main_t *tm = vlib_get_thread_main ();
1224   cryptodev_engine_thread_t *cet;
1225   cryptodev_numa_data_t *numa_data;
1226   u32 node;
1227   u8 nodes = 0;
1228   u32 skip_master = vlib_num_workers () > 0;
1229   u32 n_workers = tm->n_vlib_mains - skip_master;
1230   u32 eidx;
1231   u32 i;
1232   clib_error_t *error;
1233
1234   cmt->iova_mode = rte_eal_iova_mode ();
1235
1236   clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1237     {
1238       if (node >= nodes)
1239         nodes = node;
1240     }
1241
1242   vec_validate (cmt->per_numa_data, nodes);
1243   vec_foreach (numa_data, cmt->per_numa_data)
1244     {
1245       vec_validate (numa_data->sess_pools, 0);
1246     }
1247
1248   /* probe all cryptodev devices and get queue info */
1249   if (cryptodev_probe (vm, n_workers) < 0)
1250     return 0;
1251
1252 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1253   struct rte_cryptodev_info dev_info;
1254   cryptodev_inst_t *dev_inst;
1255   u32 *unique_drivers = 0;
1256   vec_foreach (dev_inst, cmt->cryptodev_inst)
1257     {
1258       u32 dev_id = dev_inst->dev_id;
1259       rte_cryptodev_info_get (dev_id, &dev_info);
1260       u32 driver_id = dev_info.driver_id;
1261       is_drv_unique (driver_id, &unique_drivers);
1262
1263       u32 sess_sz =
1264         rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1265       cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1266     }
1267
1268   cmt->drivers_cnt = vec_len (unique_drivers);
1269   vec_free (unique_drivers);
1270 #endif
1271
1272   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers);
1273   clib_spinlock_init (&cmt->tlock);
1274
1275   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1276                        CLIB_CACHE_LINE_BYTES);
1277   for (i = skip_master; i < tm->n_vlib_mains; i++)
1278     {
1279       cet = cmt->per_thread_data + i;
1280
1281       if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1282           0)
1283         {
1284           error = clib_error_return (0, "Failed to configure cryptodev");
1285           goto err_handling;
1286         }
1287     }
1288
1289   /* register handler */
1290   eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1291                                       "DPDK Cryptodev Engine");
1292
1293   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1294
1295   if (cryptodev_register_raw_hdl)
1296     error = cryptodev_register_raw_hdl (vm, eidx);
1297   else
1298     error = cryptodev_register_cop_hdl (vm, eidx);
1299
1300   if (error)
1301     goto err_handling;
1302
1303   /* this engine is only enabled when cryptodev device(s) are presented in
1304    * startup.conf. Assume it is wanted to be used, turn on async mode here.
1305    */
1306   ipsec_set_async_mode (1);
1307
1308   return 0;
1309
1310 err_handling:
1311   return error;
1312 }