dpdk-cryptodev: improve cryptodev cache ring implementation
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
33
34 #include "cryptodev.h"
35
36 #if CLIB_DEBUG > 0
37 #define always_inline static inline
38 #else
39 #define always_inline static inline __attribute__ ((__always_inline__))
40 #endif
41
42 cryptodev_main_t cryptodev_main;
43
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46                     cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
47                     u32 aad_len)
48 {
49   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50   memset (xform, 0, sizeof (*xform));
51   xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
52   xform->next = 0;
53
54   if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55       key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56       key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
57     {
58       aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
59     }
60   else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
61     {
62       aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
63     }
64   else
65     return -1;
66
67   aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68     RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69   aead_xform->aad_length = aad_len;
70   aead_xform->digest_length = 16;
71   aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72   aead_xform->iv.length = 12;
73   aead_xform->key.data = key->data;
74   aead_xform->key.length = vec_len (key->data);
75
76   return 0;
77 }
78
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81                       cryptodev_op_type_t op_type,
82                       const vnet_crypto_key_t *key)
83 {
84   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85   vnet_crypto_key_t *key_cipher, *key_auth;
86   enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87   enum rte_crypto_auth_algorithm auth_algo = ~0;
88   u32 digest_len = ~0;
89
90   key_cipher = vnet_crypto_get_key (key->index_crypto);
91   key_auth = vnet_crypto_get_key (key->index_integ);
92   if (!key_cipher || !key_auth)
93     return -1;
94
95   if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
96     {
97       xform_cipher = xforms;
98       xform_auth = xforms + 1;
99       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
101     }
102   else
103     {
104       xform_cipher = xforms + 1;
105       xform_auth = xforms;
106       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
108     }
109
110   xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111   xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112   xforms->next = xforms + 1;
113
114   switch (key->async_alg)
115     {
116 #define _(a, b, c, d, e)                                                      \
117   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
118     cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
119     auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
120     digest_len = e;                                                           \
121     break;
122
123       foreach_cryptodev_link_async_alg
124 #undef _
125     default:
126       return -1;
127     }
128
129   xform_cipher->cipher.algo = cipher_algo;
130   xform_cipher->cipher.key.data = key_cipher->data;
131   xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132   xform_cipher->cipher.iv.length = 16;
133   xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
134
135   xform_auth->auth.algo = auth_algo;
136   xform_auth->auth.digest_length = digest_len;
137   xform_auth->auth.key.data = key_auth->data;
138   xform_auth->auth.key.length = vec_len (key_auth->data);
139
140   return 0;
141 }
142
143 static_always_inline void
144 cryptodev_session_del (cryptodev_session_t *sess)
145 {
146   u32 n_devs, i;
147
148   if (sess == NULL)
149     return;
150
151   n_devs = rte_cryptodev_count ();
152
153   for (i = 0; i < n_devs; i++)
154 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
155     if (rte_cryptodev_sym_session_free (i, sess) == 0)
156       break;
157 #else
158     rte_cryptodev_sym_session_clear (i, sess);
159
160   rte_cryptodev_sym_session_free (sess);
161 #endif
162 }
163
164 static int
165 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
166 {
167   cryptodev_main_t *cmt = &cryptodev_main;
168   cryptodev_capability_t *vcap;
169   u32 *s;
170
171   vec_foreach (vcap, cmt->supported_caps)
172     {
173       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
174         continue;
175       if (vcap->cipher.algo != algo)
176         continue;
177       vec_foreach (s, vcap->cipher.key_sizes)
178         if (*s == key_size)
179           return 1;
180     }
181
182   return 0;
183 }
184
185 static int
186 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
187 {
188   cryptodev_main_t *cmt = &cryptodev_main;
189   cryptodev_capability_t *vcap;
190   u32 *s;
191
192   vec_foreach (vcap, cmt->supported_caps)
193     {
194       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
195         continue;
196       if (vcap->auth.algo != algo)
197         continue;
198       vec_foreach (s, vcap->auth.digest_sizes)
199         if (*s == digest_size)
200           return 1;
201     }
202
203   return 0;
204 }
205
206 static_always_inline int
207 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
208                     u32 digest_size, u32 aad_size)
209 {
210   cryptodev_main_t *cmt = &cryptodev_main;
211   cryptodev_capability_t *vcap;
212   u32 *s;
213   u32 key_match = 0, digest_match = 0, aad_match = 0;
214
215   vec_foreach (vcap, cmt->supported_caps)
216     {
217       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
218         continue;
219       if (vcap->aead.algo != algo)
220         continue;
221       vec_foreach (s, vcap->aead.digest_sizes)
222         if (*s == digest_size)
223           {
224             digest_match = 1;
225             break;
226           }
227       vec_foreach (s, vcap->aead.key_sizes)
228         if (*s == key_size)
229           {
230             key_match = 1;
231             break;
232           }
233       vec_foreach (s, vcap->aead.aad_sizes)
234         if (*s == aad_size)
235           {
236             aad_match = 1;
237             break;
238           }
239     }
240
241   if (key_match == 1 && digest_match == 1 && aad_match == 1)
242     return 1;
243
244   return 0;
245 }
246
247 static_always_inline int
248 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
249 {
250   u32 matched = 0;
251
252   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
253     {
254       switch (key->async_alg)
255         {
256 #define _(a, b, c, d, e)                                                      \
257   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
258     if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) &&                    \
259         check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e))                   \
260       return 1;
261           foreach_cryptodev_link_async_alg
262 #undef _
263             default : return 0;
264         }
265       return 0;
266     }
267
268 #define _(a, b, c, d, e, f, g)                                                \
269   if (key->alg == VNET_CRYPTO_ALG_##a)                                        \
270     {                                                                         \
271       if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f))                  \
272         matched++;                                                            \
273     }
274   foreach_vnet_aead_crypto_conversion
275 #undef _
276
277     if (matched < 2) return 0;
278
279   return 1;
280 }
281
282 void
283 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
284                         vnet_crypto_key_index_t idx, u32 aad_len)
285 {
286   cryptodev_main_t *cmt = &cryptodev_main;
287   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
288   cryptodev_key_t *ckey = 0;
289   u32 i;
290
291   vec_validate (cmt->keys, idx);
292   ckey = vec_elt_at_index (cmt->keys, idx);
293
294   if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
295     {
296       if (idx >= vec_len (cmt->keys))
297         return;
298
299       vec_foreach_index (i, cmt->per_numa_data)
300         {
301           if (!ckey->keys)
302             continue;
303           if (!ckey->keys[i])
304             continue;
305           if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
306             {
307               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
308               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
309
310               CLIB_MEMORY_STORE_BARRIER ();
311               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
312               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
313             }
314         }
315       return;
316     }
317
318   /* create key */
319
320   /* do not create session for unsupported alg */
321   if (cryptodev_check_supported_vnet_alg (key) == 0)
322     return;
323
324   vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
325   vec_foreach_index (i, ckey->keys)
326     vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
327 }
328
329 /*static*/ void
330 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
331                        vnet_crypto_key_index_t idx)
332 {
333   cryptodev_sess_handler (vm, kop, idx, 8);
334 }
335
336 clib_error_t *
337 allocate_session_pools (u32 numa_node,
338                         cryptodev_session_pool_t *sess_pools_elt, u32 len)
339 {
340   cryptodev_main_t *cmt = &cryptodev_main;
341   u8 *name;
342   clib_error_t *error = NULL;
343
344   name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
345 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
346   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
347     (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
348 #else
349   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
350     (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
351 #endif
352
353   if (!sess_pools_elt->sess_pool)
354     {
355       error = clib_error_return (0, "Not enough memory for mp %s", name);
356       goto clear_mempools;
357     }
358   vec_free (name);
359
360 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
361   name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
362   sess_pools_elt->sess_priv_pool = rte_mempool_create (
363     (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
364     0, NULL, NULL, NULL, NULL, numa_node, 0);
365
366   if (!sess_pools_elt->sess_priv_pool)
367     {
368       error = clib_error_return (0, "Not enough memory for mp %s", name);
369       goto clear_mempools;
370     }
371   vec_free (name);
372 #endif
373
374 clear_mempools:
375   if (error)
376     {
377       vec_free (name);
378       if (sess_pools_elt->sess_pool)
379         rte_mempool_free (sess_pools_elt->sess_pool);
380 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
381       if (sess_pools_elt->sess_priv_pool)
382         rte_mempool_free (sess_pools_elt->sess_priv_pool);
383 #endif
384       return error;
385     }
386   return 0;
387 }
388
389 int
390 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
391                           u32 aad_len)
392 {
393   cryptodev_main_t *cmt = &cryptodev_main;
394   cryptodev_numa_data_t *numa_data;
395   cryptodev_inst_t *dev_inst;
396   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
397   struct rte_mempool *sess_pool;
398   cryptodev_session_pool_t *sess_pools_elt;
399   cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
400   struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
401   struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
402   cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
403 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
404   struct rte_mempool *sess_priv_pool;
405   struct rte_cryptodev_info dev_info;
406 #endif
407   u32 numa_node = vm->numa_node;
408   clib_error_t *error;
409   int ret = 0;
410   u8 found = 0;
411
412   numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
413
414   clib_spinlock_lock (&cmt->tlock);
415   vec_foreach (sess_pools_elt, numa_data->sess_pools)
416     {
417       if (sess_pools_elt->sess_pool == NULL)
418         {
419           error = allocate_session_pools (numa_node, sess_pools_elt,
420                                           vec_len (numa_data->sess_pools) - 1);
421           if (error)
422             {
423               ret = -1;
424               goto clear_key;
425             }
426         }
427       if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
428         {
429           found = 1;
430           break;
431         }
432     }
433
434   if (found == 0)
435     {
436       vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
437       error = allocate_session_pools (numa_node, sess_pools_elt,
438                                       vec_len (numa_data->sess_pools) - 1);
439       if (error)
440         {
441           ret = -1;
442           goto clear_key;
443         }
444     }
445
446   sess_pool = sess_pools_elt->sess_pool;
447 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
448   sess_priv_pool = sess_pools_elt->sess_priv_pool;
449
450   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
451     rte_cryptodev_sym_session_create (sess_pool);
452
453   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
454     rte_cryptodev_sym_session_create (sess_pool);
455 #endif
456
457   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
458     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
459   else
460     ret =
461       prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
462   if (ret)
463     {
464       ret = -1;
465       goto clear_key;
466     }
467
468   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
469     prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
470   else
471     prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
472
473 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
474   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
475   u32 dev_id = dev_inst->dev_id;
476   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
477     rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
478   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
479     rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
480   if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
481       !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
482     {
483       ret = -1;
484       goto clear_key;
485     }
486
487   rte_cryptodev_sym_session_opaque_data_set (
488     sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
489   rte_cryptodev_sym_session_opaque_data_set (
490     sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
491 #else
492   vec_foreach (dev_inst, cmt->cryptodev_inst)
493     {
494       u32 dev_id = dev_inst->dev_id;
495       rte_cryptodev_info_get (dev_id, &dev_info);
496       u32 driver_id = dev_info.driver_id;
497
498       /* if the session is already configured for the driver type, avoid
499          configuring it again to increase the session data's refcnt */
500       if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
501           sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
502         continue;
503
504       ret = rte_cryptodev_sym_session_init (
505         dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
506         sess_priv_pool);
507       ret = rte_cryptodev_sym_session_init (
508         dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
509         sess_priv_pool);
510       if (ret < 0)
511         goto clear_key;
512     }
513
514   sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
515   sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
516 #endif
517
518   CLIB_MEMORY_STORE_BARRIER ();
519   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
520     sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
521   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
522     sessions[CRYPTODEV_OP_TYPE_DECRYPT];
523
524 clear_key:
525   if (ret != 0)
526     {
527       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
528       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
529     }
530   clib_spinlock_unlock (&cmt->tlock);
531   return ret;
532 }
533
534 typedef enum
535 {
536   CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
537   CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
538 } cryptodev_resource_assign_op_t;
539
540 /**
541  *  assign a cryptodev resource to a worker.
542  *  @param cet: the worker thread data
543  *  @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
544  *  @param op: the assignment method.
545  *  @return: 0 if successfully, negative number otherwise.
546  **/
547 static_always_inline int
548 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
549                            u32 cryptodev_inst_index,
550                            cryptodev_resource_assign_op_t op)
551 {
552   cryptodev_main_t *cmt = &cryptodev_main;
553   cryptodev_inst_t *cinst = 0;
554   uword idx;
555
556   /* assign resource is only allowed when no inflight op is in the queue */
557   if (cet->inflight)
558     return -EBUSY;
559
560   switch (op)
561     {
562     case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
563       if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
564           vec_len (cmt->cryptodev_inst))
565         return -1;
566
567       clib_spinlock_lock (&cmt->tlock);
568       idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
569       clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
570       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
571       cet->cryptodev_id = cinst->dev_id;
572       cet->cryptodev_q = cinst->q_id;
573       clib_spinlock_unlock (&cmt->tlock);
574       break;
575     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
576       /* assigning a used cryptodev resource is not allowed */
577       if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
578           == 1)
579         return -EBUSY;
580
581       vec_foreach_index (idx, cmt->cryptodev_inst)
582         {
583           cinst = cmt->cryptodev_inst + idx;
584           if (cinst->dev_id == cet->cryptodev_id &&
585               cinst->q_id == cet->cryptodev_q)
586             break;
587         }
588       /* invalid existing worker resource assignment */
589       if (idx >= vec_len (cmt->cryptodev_inst))
590         return -EINVAL;
591       clib_spinlock_lock (&cmt->tlock);
592       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
593       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
594                                 cryptodev_inst_index, 1);
595       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
596       cet->cryptodev_id = cinst->dev_id;
597       cet->cryptodev_q = cinst->q_id;
598       clib_spinlock_unlock (&cmt->tlock);
599       break;
600     default:
601       return -EINVAL;
602     }
603   return 0;
604 }
605
606 static u8 *
607 format_cryptodev_inst (u8 * s, va_list * args)
608 {
609   cryptodev_main_t *cmt = &cryptodev_main;
610   u32 inst = va_arg (*args, u32);
611   cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
612   u32 thread_index = 0;
613   struct rte_cryptodev_info info;
614
615   rte_cryptodev_info_get (cit->dev_id, &info);
616   s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
617
618   vec_foreach_index (thread_index, cmt->per_thread_data)
619   {
620     cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
621     if (vlib_num_workers () > 0 && thread_index == 0)
622       continue;
623
624     if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
625       {
626         s = format (s, "%u (%v)\n", thread_index,
627                     vlib_worker_threads[thread_index].name);
628         break;
629       }
630   }
631
632   if (thread_index == vec_len (cmt->per_thread_data))
633     s = format (s, "%s\n", "free");
634
635   return s;
636 }
637
638 static clib_error_t *
639 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
640                               vlib_cli_command_t * cmd)
641 {
642   cryptodev_main_t *cmt = &cryptodev_main;
643   u32 inst;
644
645   vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
646                    "Assigned-to");
647   if (vec_len (cmt->cryptodev_inst) == 0)
648     {
649       vlib_cli_output (vm, "(nil)\n");
650       return 0;
651     }
652
653   vec_foreach_index (inst, cmt->cryptodev_inst)
654     vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
655
656   if (cmt->is_raw_api)
657     vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
658   else
659     vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
660   return 0;
661 }
662
663 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
664     .path = "show cryptodev assignment",
665     .short_help = "show cryptodev assignment",
666     .function = cryptodev_show_assignment_fn,
667 };
668
669 static clib_error_t *
670 cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
671                                vlib_cli_command_t *cmd)
672 {
673   cryptodev_main_t *cmt = &cryptodev_main;
674   u32 thread_index = 0;
675   vec_foreach_index (thread_index, cmt->per_thread_data)
676     {
677       cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
678       cryptodev_cache_ring_t *ring = &cet->cache_ring;
679       u16 head = ring->head;
680       u16 tail = ring->tail;
681       u16 n_cached = ((head == tail) && (ring->frames[head].f == 0)) ?
682                              0 :
683                      ((head == tail) && (ring->frames[head].f != 0)) ?
684                              (CRYPTODEV_CACHE_QUEUE_MASK + 1) :
685                      (head > tail) ?
686                              (head - tail) :
687                              (CRYPTODEV_CACHE_QUEUE_MASK - tail + head);
688
689       u16 enq_head = ring->enq_head;
690       u16 deq_tail = ring->deq_tail;
691       u16 n_frames_inflight =
692         ((enq_head == deq_tail) && (ring->frames[enq_head].f == 0)) ?
693                 0 :
694         ((enq_head == deq_tail) && (ring->frames[enq_head].f != 0)) ?
695                 CRYPTODEV_CACHE_QUEUE_MASK + 1 :
696         (enq_head > deq_tail) ?
697                 (enq_head - deq_tail) :
698                 (CRYPTODEV_CACHE_QUEUE_MASK - deq_tail + enq_head);
699
700       u16 n_frames_processed =
701         ((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ?
702                 0 :
703         ((tail == deq_tail) && (ring->frames[deq_tail].f != 0)) ?
704                                   (CRYPTODEV_CACHE_QUEUE_MASK + 1) :
705         (deq_tail > tail) ? (deq_tail - tail) :
706                                   (CRYPTODEV_CACHE_QUEUE_MASK - tail + deq_tail);
707
708       if (vlib_num_workers () > 0 && thread_index == 0)
709         continue;
710       vlib_cli_output (vm, "\n\n");
711       vlib_cli_output (vm, "Frames total: %d", n_cached);
712       vlib_cli_output (vm, "Frames pending in the ring: %d",
713                        n_cached - n_frames_inflight - n_frames_processed);
714       vlib_cli_output (vm, "Frames enqueued but not dequeued: %d",
715                        n_frames_inflight);
716       vlib_cli_output (vm, "Frames dequed but not returned: %d",
717                        n_frames_processed);
718       vlib_cli_output (vm, "inflight: %d", cet->inflight);
719       vlib_cli_output (vm, "Head: %d", ring->head);
720       vlib_cli_output (vm, "Tail: %d", ring->tail);
721       vlib_cli_output (vm, "\n\n");
722     }
723   return 0;
724 }
725
726 VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
727   .path = "show cryptodev cache status",
728   .short_help = "show status of all cryptodev cache rings",
729   .function = cryptodev_show_cache_rings_fn,
730 };
731
732 static clib_error_t *
733 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
734                              vlib_cli_command_t * cmd)
735 {
736   cryptodev_main_t *cmt = &cryptodev_main;
737   cryptodev_engine_thread_t *cet;
738   unformat_input_t _line_input, *line_input = &_line_input;
739   u32 thread_index, inst_index;
740   u32 thread_present = 0, inst_present = 0;
741   clib_error_t *error = 0;
742   int ret;
743
744   /* Get a line of input. */
745   if (!unformat_user (input, unformat_line_input, line_input))
746     return 0;
747
748   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
749     {
750       if (unformat (line_input, "thread %u", &thread_index))
751         thread_present = 1;
752       else if (unformat (line_input, "resource %u", &inst_index))
753         inst_present = 1;
754       else
755         {
756           error = clib_error_return (0, "unknown input `%U'",
757                                      format_unformat_error, line_input);
758           return error;
759         }
760     }
761
762   if (!thread_present || !inst_present)
763     {
764       error = clib_error_return (0, "mandatory argument(s) missing");
765       return error;
766     }
767
768   if (thread_index == 0 && vlib_num_workers () > 0)
769     {
770       error =
771         clib_error_return (0, "assign crypto resource for master thread");
772       return error;
773     }
774
775   if (thread_index > vec_len (cmt->per_thread_data) ||
776       inst_index > vec_len (cmt->cryptodev_inst))
777     {
778       error = clib_error_return (0, "wrong thread id or resource id");
779       return error;
780     }
781
782   cet = cmt->per_thread_data + thread_index;
783   ret = cryptodev_assign_resource (cet, inst_index,
784                                    CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
785   if (ret)
786     {
787       error =
788         clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
789       return error;
790     }
791
792   return 0;
793 }
794
795 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
796     .path = "set cryptodev assignment",
797     .short_help = "set cryptodev assignment thread <thread_index> "
798         "resource <inst_index>",
799     .function = cryptodev_set_assignment_fn,
800 };
801
802 static u32
803 cryptodev_count_queue (u32 numa)
804 {
805   struct rte_cryptodev_info info;
806   u32 n_cryptodev = rte_cryptodev_count ();
807   u32 i, q_count = 0;
808
809   for (i = 0; i < n_cryptodev; i++)
810     {
811       rte_cryptodev_info_get (i, &info);
812       q_count += info.max_nb_queue_pairs;
813     }
814
815   return q_count;
816 }
817
818 static int
819 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
820 {
821   struct rte_cryptodev_config cfg;
822   struct rte_cryptodev_info info;
823   cryptodev_main_t *cmt = &cryptodev_main;
824   u32 i;
825   int ret;
826
827   rte_cryptodev_info_get (cryptodev_id, &info);
828
829   /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
830      anymore. Only devices that have the same driver type as the first
831      initialized device can be initialized.
832    */
833 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
834   if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
835     return -1;
836 #endif
837
838   if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
839     return -1;
840
841   cfg.socket_id = info.device->numa_node;
842   cfg.nb_queue_pairs = info.max_nb_queue_pairs;
843
844   rte_cryptodev_configure (cryptodev_id, &cfg);
845
846   for (i = 0; i < info.max_nb_queue_pairs; i++)
847     {
848       struct rte_cryptodev_qp_conf qp_cfg;
849
850       qp_cfg.mp_session = 0;
851 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
852       qp_cfg.mp_session_private = 0;
853 #endif
854       qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
855
856       ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
857                                             info.device->numa_node);
858       if (ret)
859         {
860           clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
861                         cryptodev_id, i, ret);
862           break;
863         }
864     }
865
866   if (i != info.max_nb_queue_pairs)
867     return -1;
868
869   /* start the device */
870   rte_cryptodev_start (cryptodev_id);
871
872 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
873   if (cmt->drivers_cnt == 0)
874     {
875       cmt->drivers_cnt = 1;
876       cmt->driver_id = info.driver_id;
877       cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
878     }
879 #endif
880
881   for (i = 0; i < info.max_nb_queue_pairs; i++)
882     {
883       cryptodev_inst_t *cdev_inst;
884 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
885       const char *dev_name = rte_dev_name (info.device);
886 #else
887       const char *dev_name = info.device->name;
888 #endif
889       vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
890       cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
891       cdev_inst->dev_id = cryptodev_id;
892       cdev_inst->q_id = i;
893
894       snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
895                 info.device->name, i);
896     }
897
898   return 0;
899 }
900
901 static int
902 cryptodev_cmp (void *v1, void *v2)
903 {
904   cryptodev_inst_t *a1 = v1;
905   cryptodev_inst_t *a2 = v2;
906
907   if (a1->q_id > a2->q_id)
908     return 1;
909   if (a1->q_id < a2->q_id)
910     return -1;
911   return 0;
912 }
913
914 static int
915 cryptodev_supports_param_value (u32 *params, u32 param_value)
916 {
917   u32 *value;
918   vec_foreach (value, params)
919     {
920       if (*value == param_value)
921         return 1;
922     }
923   return 0;
924 }
925
926 int
927 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
928                              u32 key_size, u32 digest_size, u32 aad_size)
929 {
930   cryptodev_main_t *cmt = &cryptodev_main;
931   cryptodev_capability_t *cap;
932   vec_foreach (cap, cmt->supported_caps)
933     {
934
935       if (cap->xform_type != idx->type)
936         continue;
937
938       if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
939           cap->auth.algo == idx->algo.auth &&
940           cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
941         return 1;
942
943       if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
944           cap->cipher.algo == idx->algo.cipher &&
945           cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
946         return 1;
947
948       if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
949           cap->aead.algo == idx->algo.aead &&
950           cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
951           cryptodev_supports_param_value (cap->aead.digest_sizes,
952                                           digest_size) &&
953           cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
954         return 1;
955     }
956   return 0;
957 }
958
959 static void
960 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
961                                u32 param_size_max, u32 increment)
962 {
963   u32 i = 0;
964   u32 cap_param_size;
965
966   while (i < vec_len (*param_sizes))
967     {
968       u32 found_param = 0;
969       for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
970            cap_param_size += increment)
971         {
972           if ((*param_sizes)[i] == cap_param_size)
973             {
974               found_param = 1;
975               break;
976             }
977           if (increment == 0)
978             break;
979         }
980       if (!found_param)
981         /* no such param_size in cap so delete  this size in temp_cap params */
982         vec_delete (*param_sizes, 1, i);
983       else
984         i++;
985     }
986 }
987
988 static void
989 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
990 {
991   cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
992
993   switch (temp_cap.xform_type)
994     {
995     case RTE_CRYPTO_SYM_XFORM_AUTH:
996       vec_free (temp_cap.auth.digest_sizes);
997       break;
998     case RTE_CRYPTO_SYM_XFORM_CIPHER:
999       vec_free (temp_cap.cipher.key_sizes);
1000       break;
1001     case RTE_CRYPTO_SYM_XFORM_AEAD:
1002       vec_free (temp_cap.aead.key_sizes);
1003       vec_free (temp_cap.aead.aad_sizes);
1004       vec_free (temp_cap.aead.digest_sizes);
1005       break;
1006     default:
1007       break;
1008     }
1009   vec_delete (*temp_caps, 1, temp_cap_id);
1010 }
1011
1012 static u32
1013 cryptodev_remove_unsupported_param_sizes (
1014   cryptodev_capability_t *temp_cap,
1015   const struct rte_cryptodev_capabilities *dev_caps)
1016 {
1017   u32 cap_found = 0;
1018   const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
1019
1020   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1021     {
1022       if (cap->sym.xform_type == temp_cap->xform_type)
1023         switch (cap->sym.xform_type)
1024           {
1025           case RTE_CRYPTO_SYM_XFORM_CIPHER:
1026             if (cap->sym.cipher.algo == temp_cap->cipher.algo)
1027               {
1028                 remove_unsupported_param_size (
1029                   &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
1030                   cap->sym.cipher.key_size.max,
1031                   cap->sym.cipher.key_size.increment);
1032                 if (vec_len (temp_cap->cipher.key_sizes) > 0)
1033                   cap_found = 1;
1034               }
1035             break;
1036           case RTE_CRYPTO_SYM_XFORM_AUTH:
1037             if (cap->sym.auth.algo == temp_cap->auth.algo)
1038               {
1039                 remove_unsupported_param_size (
1040                   &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
1041                   cap->sym.auth.digest_size.max,
1042                   cap->sym.auth.digest_size.increment);
1043                 if (vec_len (temp_cap->auth.digest_sizes) > 0)
1044                   cap_found = 1;
1045               }
1046             break;
1047           case RTE_CRYPTO_SYM_XFORM_AEAD:
1048             if (cap->sym.aead.algo == temp_cap->aead.algo)
1049               {
1050                 remove_unsupported_param_size (
1051                   &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
1052                   cap->sym.aead.key_size.max,
1053                   cap->sym.aead.key_size.increment);
1054                 remove_unsupported_param_size (
1055                   &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
1056                   cap->sym.aead.aad_size.max,
1057                   cap->sym.aead.aad_size.increment);
1058                 remove_unsupported_param_size (
1059                   &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
1060                   cap->sym.aead.digest_size.max,
1061                   cap->sym.aead.digest_size.increment);
1062                 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1063                     vec_len (temp_cap->aead.aad_sizes) > 0 &&
1064                     vec_len (temp_cap->aead.digest_sizes) > 0)
1065                   cap_found = 1;
1066               }
1067             break;
1068           default:
1069             break;
1070           }
1071       if (cap_found)
1072         break;
1073       cap++;
1074     }
1075
1076   return cap_found;
1077 }
1078
1079 static void
1080 cryptodev_get_common_capabilities ()
1081 {
1082   cryptodev_main_t *cmt = &cryptodev_main;
1083   cryptodev_inst_t *dev_inst;
1084   struct rte_cryptodev_info dev_info;
1085   u32 previous_dev_id, dev_id;
1086   u32 cap_id = 0;
1087   u32 param;
1088   cryptodev_capability_t tmp_cap;
1089   const struct rte_cryptodev_capabilities *cap;
1090   const struct rte_cryptodev_capabilities *dev_caps;
1091
1092   clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
1093   if (vec_len (cmt->cryptodev_inst) == 0)
1094     return;
1095   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1096   rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1097   cap = &dev_info.capabilities[0];
1098
1099   /*init capabilities vector*/
1100   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1101     {
1102       if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1103         {
1104           cap++;
1105           continue;
1106         }
1107
1108       tmp_cap.xform_type = cap->sym.xform_type;
1109       switch (cap->sym.xform_type)
1110         {
1111         case RTE_CRYPTO_SYM_XFORM_CIPHER:
1112           tmp_cap.cipher.key_sizes = 0;
1113           tmp_cap.cipher.algo = cap->sym.cipher.algo;
1114           for (param = cap->sym.cipher.key_size.min;
1115                param <= cap->sym.cipher.key_size.max;
1116                param += cap->sym.cipher.key_size.increment)
1117             {
1118               vec_add1 (tmp_cap.cipher.key_sizes, param);
1119               if (cap->sym.cipher.key_size.increment == 0)
1120                 break;
1121             }
1122           break;
1123         case RTE_CRYPTO_SYM_XFORM_AUTH:
1124           tmp_cap.auth.algo = cap->sym.auth.algo;
1125           tmp_cap.auth.digest_sizes = 0;
1126           for (param = cap->sym.auth.digest_size.min;
1127                param <= cap->sym.auth.digest_size.max;
1128                param += cap->sym.auth.digest_size.increment)
1129             {
1130               vec_add1 (tmp_cap.auth.digest_sizes, param);
1131               if (cap->sym.auth.digest_size.increment == 0)
1132                 break;
1133             }
1134           break;
1135         case RTE_CRYPTO_SYM_XFORM_AEAD:
1136           tmp_cap.aead.key_sizes = 0;
1137           tmp_cap.aead.aad_sizes = 0;
1138           tmp_cap.aead.digest_sizes = 0;
1139           tmp_cap.aead.algo = cap->sym.aead.algo;
1140           for (param = cap->sym.aead.key_size.min;
1141                param <= cap->sym.aead.key_size.max;
1142                param += cap->sym.aead.key_size.increment)
1143             {
1144               vec_add1 (tmp_cap.aead.key_sizes, param);
1145               if (cap->sym.aead.key_size.increment == 0)
1146                 break;
1147             }
1148           for (param = cap->sym.aead.aad_size.min;
1149                param <= cap->sym.aead.aad_size.max;
1150                param += cap->sym.aead.aad_size.increment)
1151             {
1152               vec_add1 (tmp_cap.aead.aad_sizes, param);
1153               if (cap->sym.aead.aad_size.increment == 0)
1154                 break;
1155             }
1156           for (param = cap->sym.aead.digest_size.min;
1157                param <= cap->sym.aead.digest_size.max;
1158                param += cap->sym.aead.digest_size.increment)
1159             {
1160               vec_add1 (tmp_cap.aead.digest_sizes, param);
1161               if (cap->sym.aead.digest_size.increment == 0)
1162                 break;
1163             }
1164           break;
1165         default:
1166           break;
1167         }
1168
1169       vec_add1 (cmt->supported_caps, tmp_cap);
1170       cap++;
1171     }
1172
1173   while (cap_id < vec_len (cmt->supported_caps))
1174     {
1175       u32 cap_is_supported = 1;
1176       previous_dev_id = cmt->cryptodev_inst->dev_id;
1177
1178       vec_foreach (dev_inst, cmt->cryptodev_inst)
1179         {
1180           dev_id = dev_inst->dev_id;
1181           if (previous_dev_id != dev_id)
1182             {
1183               previous_dev_id = dev_id;
1184               rte_cryptodev_info_get (dev_id, &dev_info);
1185               dev_caps = &dev_info.capabilities[0];
1186               cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1187                 &cmt->supported_caps[cap_id], dev_caps);
1188               if (!cap_is_supported)
1189                 {
1190                   cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1191                   /*no need to check other devices as this one doesn't support
1192                    * this temp_cap*/
1193                   break;
1194                 }
1195             }
1196         }
1197       if (cap_is_supported)
1198         cap_id++;
1199     }
1200 }
1201
1202 static int
1203 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1204 {
1205   cryptodev_main_t *cmt = &cryptodev_main;
1206   u32 n_queues = cryptodev_count_queue (vm->numa_node);
1207   u32 i;
1208
1209   if (n_queues < n_workers)
1210     return -1;
1211
1212   for (i = 0; i < rte_cryptodev_count (); i++)
1213     cryptodev_configure (vm, i);
1214
1215   if (vec_len (cmt->cryptodev_inst) == 0)
1216     return -1;
1217   cryptodev_get_common_capabilities ();
1218   vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1219
1220   /* if there is not enough device stop cryptodev */
1221   if (vec_len (cmt->cryptodev_inst) < n_workers)
1222     return -1;
1223
1224   return 0;
1225 }
1226
1227 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1228 static void
1229 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1230 {
1231   u32 *unique_elt;
1232   u8 found = 0;
1233
1234   vec_foreach (unique_elt, *unique_drivers)
1235     {
1236       if (*unique_elt == driver_id)
1237         {
1238           found = 1;
1239           break;
1240         }
1241     }
1242
1243   if (!found)
1244     vec_add1 (*unique_drivers, driver_id);
1245 }
1246 #endif
1247
1248 clib_error_t *
1249 dpdk_cryptodev_init (vlib_main_t * vm)
1250 {
1251   cryptodev_main_t *cmt = &cryptodev_main;
1252   vlib_thread_main_t *tm = vlib_get_thread_main ();
1253   cryptodev_engine_thread_t *cet;
1254   cryptodev_numa_data_t *numa_data;
1255   u32 node;
1256   u8 nodes = 0;
1257   u32 skip_master = vlib_num_workers () > 0;
1258   u32 n_workers = tm->n_vlib_mains - skip_master;
1259   u32 eidx;
1260   u32 i;
1261   clib_error_t *error;
1262
1263   cmt->iova_mode = rte_eal_iova_mode ();
1264
1265   clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1266     {
1267       if (node >= nodes)
1268         nodes = node;
1269     }
1270
1271   vec_validate (cmt->per_numa_data, nodes);
1272   vec_foreach (numa_data, cmt->per_numa_data)
1273     {
1274       vec_validate (numa_data->sess_pools, 0);
1275     }
1276
1277   /* probe all cryptodev devices and get queue info */
1278   if (cryptodev_probe (vm, n_workers) < 0)
1279     return 0;
1280
1281 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1282   struct rte_cryptodev_info dev_info;
1283   cryptodev_inst_t *dev_inst;
1284   u32 *unique_drivers = 0;
1285   vec_foreach (dev_inst, cmt->cryptodev_inst)
1286     {
1287       u32 dev_id = dev_inst->dev_id;
1288       rte_cryptodev_info_get (dev_id, &dev_info);
1289       u32 driver_id = dev_info.driver_id;
1290       is_drv_unique (driver_id, &unique_drivers);
1291
1292       u32 sess_sz =
1293         rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1294       cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1295     }
1296
1297   cmt->drivers_cnt = vec_len (unique_drivers);
1298   vec_free (unique_drivers);
1299 #endif
1300
1301   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers);
1302   clib_spinlock_init (&cmt->tlock);
1303
1304   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1305                        CLIB_CACHE_LINE_BYTES);
1306   for (i = skip_master; i < tm->n_vlib_mains; i++)
1307     {
1308       cet = cmt->per_thread_data + i;
1309
1310       if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1311           0)
1312         {
1313           error = clib_error_return (0, "Failed to configure cryptodev");
1314           goto err_handling;
1315         }
1316     }
1317
1318   /* register handler */
1319   eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1320                                       "DPDK Cryptodev Engine");
1321
1322   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1323
1324   if (cryptodev_register_raw_hdl)
1325     error = cryptodev_register_raw_hdl (vm, eidx);
1326   else
1327     error = cryptodev_register_cop_hdl (vm, eidx);
1328
1329   if (error)
1330     goto err_handling;
1331
1332   /* this engine is only enabled when cryptodev device(s) are presented in
1333    * startup.conf. Assume it is wanted to be used, turn on async mode here.
1334    */
1335   ipsec_set_async_mode (1);
1336
1337   return 0;
1338
1339 err_handling:
1340   return error;
1341 }