4e8bc026f58c760753da9882c63a6e911c4bde45
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
33
34 #include "cryptodev.h"
35
36 #if CLIB_DEBUG > 0
37 #define always_inline static inline
38 #else
39 #define always_inline static inline __attribute__ ((__always_inline__))
40 #endif
41
42 cryptodev_main_t cryptodev_main;
43
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46                     cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
47                     u32 aad_len)
48 {
49   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50   memset (xform, 0, sizeof (*xform));
51   xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
52   xform->next = 0;
53
54   if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55       key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56       key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
57     {
58       aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
59     }
60   else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
61     {
62       aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
63     }
64   else
65     return -1;
66
67   aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68     RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69   aead_xform->aad_length = aad_len;
70   aead_xform->digest_length = 16;
71   aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72   aead_xform->iv.length = 12;
73   aead_xform->key.data = key->data;
74   aead_xform->key.length = vec_len (key->data);
75
76   return 0;
77 }
78
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81                       cryptodev_op_type_t op_type,
82                       const vnet_crypto_key_t *key)
83 {
84   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85   vnet_crypto_key_t *key_cipher, *key_auth;
86   enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87   enum rte_crypto_auth_algorithm auth_algo = ~0;
88   u32 digest_len = ~0;
89
90   key_cipher = vnet_crypto_get_key (key->index_crypto);
91   key_auth = vnet_crypto_get_key (key->index_integ);
92   if (!key_cipher || !key_auth)
93     return -1;
94
95   if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
96     {
97       xform_cipher = xforms;
98       xform_auth = xforms + 1;
99       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
101     }
102   else
103     {
104       xform_cipher = xforms + 1;
105       xform_auth = xforms;
106       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
108     }
109
110   xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111   xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112   xforms->next = xforms + 1;
113
114   switch (key->async_alg)
115     {
116 #define _(a, b, c, d, e)                                                      \
117   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
118     cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
119     auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
120     digest_len = e;                                                           \
121     break;
122
123       foreach_cryptodev_link_async_alg
124 #undef _
125     default:
126       return -1;
127     }
128
129   xform_cipher->cipher.algo = cipher_algo;
130   xform_cipher->cipher.key.data = key_cipher->data;
131   xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132   xform_cipher->cipher.iv.length = 16;
133   xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
134
135   xform_auth->auth.algo = auth_algo;
136   xform_auth->auth.digest_length = digest_len;
137   xform_auth->auth.key.data = key_auth->data;
138   xform_auth->auth.key.length = vec_len (key_auth->data);
139
140   return 0;
141 }
142
143 static_always_inline void
144 cryptodev_session_del (cryptodev_session_t *sess)
145 {
146   u32 n_devs, i;
147
148   if (sess == NULL)
149     return;
150
151   n_devs = rte_cryptodev_count ();
152
153   for (i = 0; i < n_devs; i++)
154 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
155     if (rte_cryptodev_sym_session_free (i, sess) == 0)
156       break;
157 #else
158     rte_cryptodev_sym_session_clear (i, sess);
159
160   rte_cryptodev_sym_session_free (sess);
161 #endif
162 }
163
164 static int
165 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
166 {
167   cryptodev_main_t *cmt = &cryptodev_main;
168   cryptodev_capability_t *vcap;
169   u32 *s;
170
171   vec_foreach (vcap, cmt->supported_caps)
172     {
173       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
174         continue;
175       if (vcap->cipher.algo != algo)
176         continue;
177       vec_foreach (s, vcap->cipher.key_sizes)
178         if (*s == key_size)
179           return 1;
180     }
181
182   return 0;
183 }
184
185 static int
186 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
187 {
188   cryptodev_main_t *cmt = &cryptodev_main;
189   cryptodev_capability_t *vcap;
190   u32 *s;
191
192   vec_foreach (vcap, cmt->supported_caps)
193     {
194       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
195         continue;
196       if (vcap->auth.algo != algo)
197         continue;
198       vec_foreach (s, vcap->auth.digest_sizes)
199         if (*s == digest_size)
200           return 1;
201     }
202
203   return 0;
204 }
205
206 static_always_inline int
207 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
208                     u32 digest_size, u32 aad_size)
209 {
210   cryptodev_main_t *cmt = &cryptodev_main;
211   cryptodev_capability_t *vcap;
212   u32 *s;
213   u32 key_match = 0, digest_match = 0, aad_match = 0;
214
215   vec_foreach (vcap, cmt->supported_caps)
216     {
217       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
218         continue;
219       if (vcap->aead.algo != algo)
220         continue;
221       vec_foreach (s, vcap->aead.digest_sizes)
222         if (*s == digest_size)
223           {
224             digest_match = 1;
225             break;
226           }
227       vec_foreach (s, vcap->aead.key_sizes)
228         if (*s == key_size)
229           {
230             key_match = 1;
231             break;
232           }
233       vec_foreach (s, vcap->aead.aad_sizes)
234         if (*s == aad_size)
235           {
236             aad_match = 1;
237             break;
238           }
239     }
240
241   if (key_match == 1 && digest_match == 1 && aad_match == 1)
242     return 1;
243
244   return 0;
245 }
246
247 static_always_inline int
248 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
249 {
250   u32 matched = 0;
251
252   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
253     {
254       switch (key->async_alg)
255         {
256 #define _(a, b, c, d, e)                                                      \
257   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
258     if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) &&                    \
259         check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e))                   \
260       return 1;
261           foreach_cryptodev_link_async_alg
262 #undef _
263             default : return 0;
264         }
265       return 0;
266     }
267
268 #define _(a, b, c, d, e, f, g)                                                \
269   if (key->alg == VNET_CRYPTO_ALG_##a)                                        \
270     {                                                                         \
271       if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f))                  \
272         matched++;                                                            \
273     }
274   foreach_vnet_aead_crypto_conversion
275 #undef _
276
277     if (matched < 2) return 0;
278
279   return 1;
280 }
281
282 void
283 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
284                         vnet_crypto_key_index_t idx, u32 aad_len)
285 {
286   cryptodev_main_t *cmt = &cryptodev_main;
287   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
288   cryptodev_key_t *ckey = 0;
289   u32 i;
290
291   vec_validate (cmt->keys, idx);
292   ckey = vec_elt_at_index (cmt->keys, idx);
293
294   if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
295     {
296       if (idx >= vec_len (cmt->keys))
297         return;
298
299       vec_foreach_index (i, cmt->per_numa_data)
300         {
301           if (!ckey->keys)
302             continue;
303           if (!ckey->keys[i])
304             continue;
305           if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
306             {
307               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
308               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
309
310               CLIB_MEMORY_STORE_BARRIER ();
311               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
312               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
313             }
314         }
315       return;
316     }
317
318   /* create key */
319
320   /* do not create session for unsupported alg */
321   if (cryptodev_check_supported_vnet_alg (key) == 0)
322     return;
323
324   vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
325   vec_foreach_index (i, ckey->keys)
326     vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
327 }
328
329 /*static*/ void
330 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
331                        vnet_crypto_key_index_t idx)
332 {
333   cryptodev_sess_handler (vm, kop, idx, 8);
334 }
335
336 clib_error_t *
337 allocate_session_pools (u32 numa_node,
338                         cryptodev_session_pool_t *sess_pools_elt, u32 len)
339 {
340   cryptodev_main_t *cmt = &cryptodev_main;
341   u8 *name;
342   clib_error_t *error = NULL;
343
344   name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
345 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
346   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
347     (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
348 #else
349   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
350     (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
351 #endif
352
353   if (!sess_pools_elt->sess_pool)
354     {
355       error = clib_error_return (0, "Not enough memory for mp %s", name);
356       goto clear_mempools;
357     }
358   vec_free (name);
359
360 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
361   name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
362   sess_pools_elt->sess_priv_pool = rte_mempool_create (
363     (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
364     0, NULL, NULL, NULL, NULL, numa_node, 0);
365
366   if (!sess_pools_elt->sess_priv_pool)
367     {
368       error = clib_error_return (0, "Not enough memory for mp %s", name);
369       goto clear_mempools;
370     }
371   vec_free (name);
372 #endif
373
374 clear_mempools:
375   if (error)
376     {
377       vec_free (name);
378       if (sess_pools_elt->sess_pool)
379         rte_mempool_free (sess_pools_elt->sess_pool);
380 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
381       if (sess_pools_elt->sess_priv_pool)
382         rte_mempool_free (sess_pools_elt->sess_priv_pool);
383 #endif
384       return error;
385     }
386   return 0;
387 }
388
389 int
390 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
391                           u32 aad_len)
392 {
393   cryptodev_main_t *cmt = &cryptodev_main;
394   cryptodev_numa_data_t *numa_data;
395   cryptodev_inst_t *dev_inst;
396   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
397   struct rte_mempool *sess_pool;
398   cryptodev_session_pool_t *sess_pools_elt;
399   cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
400   struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
401   struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
402   cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
403 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
404   struct rte_mempool *sess_priv_pool;
405   struct rte_cryptodev_info dev_info;
406 #endif
407   u32 numa_node = vm->numa_node;
408   clib_error_t *error;
409   int ret = 0;
410   u8 found = 0;
411
412   numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
413
414   clib_spinlock_lock (&cmt->tlock);
415   vec_foreach (sess_pools_elt, numa_data->sess_pools)
416     {
417       if (sess_pools_elt->sess_pool == NULL)
418         {
419           error = allocate_session_pools (numa_node, sess_pools_elt,
420                                           vec_len (numa_data->sess_pools) - 1);
421           if (error)
422             {
423               ret = -1;
424               goto clear_key;
425             }
426         }
427       if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
428         {
429           found = 1;
430           break;
431         }
432     }
433
434   if (found == 0)
435     {
436       vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
437       error = allocate_session_pools (numa_node, sess_pools_elt,
438                                       vec_len (numa_data->sess_pools) - 1);
439       if (error)
440         {
441           ret = -1;
442           goto clear_key;
443         }
444     }
445
446   sess_pool = sess_pools_elt->sess_pool;
447 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
448   sess_priv_pool = sess_pools_elt->sess_priv_pool;
449
450   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
451     rte_cryptodev_sym_session_create (sess_pool);
452
453   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
454     rte_cryptodev_sym_session_create (sess_pool);
455 #endif
456
457   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
458     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
459   else
460     ret =
461       prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
462   if (ret)
463     {
464       ret = -1;
465       goto clear_key;
466     }
467
468   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
469     prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
470   else
471     prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
472
473 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
474   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
475   u32 dev_id = dev_inst->dev_id;
476   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
477     rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
478   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
479     rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
480   if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
481       !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
482     {
483       ret = -1;
484       goto clear_key;
485     }
486
487   rte_cryptodev_sym_session_opaque_data_set (
488     sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
489   rte_cryptodev_sym_session_opaque_data_set (
490     sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
491 #else
492   vec_foreach (dev_inst, cmt->cryptodev_inst)
493     {
494       u32 dev_id = dev_inst->dev_id;
495       rte_cryptodev_info_get (dev_id, &dev_info);
496       u32 driver_id = dev_info.driver_id;
497
498       /* if the session is already configured for the driver type, avoid
499          configuring it again to increase the session data's refcnt */
500       if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
501           sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
502         continue;
503
504       ret = rte_cryptodev_sym_session_init (
505         dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
506         sess_priv_pool);
507       ret = rte_cryptodev_sym_session_init (
508         dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
509         sess_priv_pool);
510       if (ret < 0)
511         goto clear_key;
512     }
513
514   sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
515   sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
516 #endif
517
518   CLIB_MEMORY_STORE_BARRIER ();
519   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
520     sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
521   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
522     sessions[CRYPTODEV_OP_TYPE_DECRYPT];
523
524 clear_key:
525   if (ret != 0)
526     {
527       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
528       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
529     }
530   clib_spinlock_unlock (&cmt->tlock);
531   return ret;
532 }
533
534 typedef enum
535 {
536   CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
537   CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
538 } cryptodev_resource_assign_op_t;
539
540 /**
541  *  assign a cryptodev resource to a worker.
542  *  @param cet: the worker thread data
543  *  @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
544  *  @param op: the assignment method.
545  *  @return: 0 if successfully, negative number otherwise.
546  **/
547 static_always_inline int
548 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
549                            u32 cryptodev_inst_index,
550                            cryptodev_resource_assign_op_t op)
551 {
552   cryptodev_main_t *cmt = &cryptodev_main;
553   cryptodev_inst_t *cinst = 0;
554   uword idx;
555
556   /* assign resource is only allowed when no inflight op is in the queue */
557   if (cet->inflight)
558     return -EBUSY;
559
560   switch (op)
561     {
562     case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
563       if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
564           vec_len (cmt->cryptodev_inst))
565         return -1;
566
567       clib_spinlock_lock (&cmt->tlock);
568       idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
569       clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
570       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
571       cet->cryptodev_id = cinst->dev_id;
572       cet->cryptodev_q = cinst->q_id;
573       clib_spinlock_unlock (&cmt->tlock);
574       break;
575     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
576       /* assigning a used cryptodev resource is not allowed */
577       if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
578           == 1)
579         return -EBUSY;
580
581       vec_foreach_index (idx, cmt->cryptodev_inst)
582         {
583           cinst = cmt->cryptodev_inst + idx;
584           if (cinst->dev_id == cet->cryptodev_id &&
585               cinst->q_id == cet->cryptodev_q)
586             break;
587         }
588       /* invalid existing worker resource assignment */
589       if (idx >= vec_len (cmt->cryptodev_inst))
590         return -EINVAL;
591       clib_spinlock_lock (&cmt->tlock);
592       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
593       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
594                                 cryptodev_inst_index, 1);
595       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
596       cet->cryptodev_id = cinst->dev_id;
597       cet->cryptodev_q = cinst->q_id;
598       clib_spinlock_unlock (&cmt->tlock);
599       break;
600     default:
601       return -EINVAL;
602     }
603   return 0;
604 }
605
606 static u8 *
607 format_cryptodev_inst (u8 * s, va_list * args)
608 {
609   cryptodev_main_t *cmt = &cryptodev_main;
610   u32 inst = va_arg (*args, u32);
611   cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
612   u32 thread_index = 0;
613   struct rte_cryptodev_info info;
614
615   rte_cryptodev_info_get (cit->dev_id, &info);
616   s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
617
618   vec_foreach_index (thread_index, cmt->per_thread_data)
619   {
620     cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
621     if (vlib_num_workers () > 0 && thread_index == 0)
622       continue;
623
624     if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
625       {
626         s = format (s, "%u (%v)\n", thread_index,
627                     vlib_worker_threads[thread_index].name);
628         break;
629       }
630   }
631
632   if (thread_index == vec_len (cmt->per_thread_data))
633     s = format (s, "%s\n", "free");
634
635   return s;
636 }
637
638 static clib_error_t *
639 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
640                               vlib_cli_command_t * cmd)
641 {
642   cryptodev_main_t *cmt = &cryptodev_main;
643   u32 inst;
644
645   vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
646                    "Assigned-to");
647   if (vec_len (cmt->cryptodev_inst) == 0)
648     {
649       vlib_cli_output (vm, "(nil)\n");
650       return 0;
651     }
652
653   vec_foreach_index (inst, cmt->cryptodev_inst)
654     vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
655
656   if (cmt->is_raw_api)
657     vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
658   else
659     vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
660   return 0;
661 }
662
663 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
664     .path = "show cryptodev assignment",
665     .short_help = "show cryptodev assignment",
666     .function = cryptodev_show_assignment_fn,
667 };
668
669 static clib_error_t *
670 cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
671                                vlib_cli_command_t *cmd)
672 {
673   cryptodev_main_t *cmt = &cryptodev_main;
674   u32 thread_index = 0;
675   vec_foreach_index (thread_index, cmt->per_thread_data)
676     {
677       cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
678       cryptodev_cache_ring_t *ring = &cet->cache_ring;
679       u16 head = ring->head;
680       u16 tail = ring->tail;
681       u16 n_cached = ((head == tail) && (ring->frames[head].f == 0)) ?
682                              0 :
683                      ((head == tail) && (ring->frames[head].f != 0)) ?
684                              (CRYPTODEV_CACHE_QUEUE_MASK + 1) :
685                      (head > tail) ?
686                              (head - tail) :
687                              (CRYPTODEV_CACHE_QUEUE_MASK - tail + head);
688
689       u16 enq_head = ring->enq_head;
690       u16 deq_tail = ring->deq_tail;
691       u16 n_frames_inflight =
692         ((enq_head == deq_tail) && (ring->frames[enq_head].f == 0)) ?
693                 0 :
694         ((enq_head == deq_tail) && (ring->frames[enq_head].f != 0)) ?
695                 CRYPTODEV_CACHE_QUEUE_MASK + 1 :
696         (enq_head > deq_tail) ?
697                 (enq_head - deq_tail) :
698                 (CRYPTODEV_CACHE_QUEUE_MASK - deq_tail + enq_head);
699
700       u16 n_frames_processed =
701         ((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ? 0 :
702         ((tail == deq_tail) && (ring->frames[deq_tail].f != 0)) ? 1 :
703         (deq_tail > tail) ? (deq_tail - tail + 1) :
704                                   (CRYPTODEV_CACHE_QUEUE_MASK - tail + deq_tail - 1);
705
706       if (vlib_num_workers () > 0 && thread_index == 0)
707         continue;
708
709       vlib_cli_output (vm, "\n\n");
710       vlib_cli_output (vm, "Frames total: %u", n_cached);
711       vlib_cli_output (vm, "Frames pending in the ring: %u",
712                        n_cached - n_frames_inflight - n_frames_processed);
713       vlib_cli_output (vm, "Frames inflight: %u", n_frames_inflight);
714       vlib_cli_output (vm, "Frames dequed but not returned: %u",
715                        n_frames_processed);
716       vlib_cli_output (vm, "Elements inflight: %u", cet->inflight);
717       vlib_cli_output (vm, "Head: %u", head);
718       vlib_cli_output (vm, "Tail: %u", tail);
719       vlib_cli_output (vm, "\n\n");
720     }
721   return 0;
722 }
723
724 VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
725   .path = "show cryptodev cache status",
726   .short_help = "show status of all cryptodev cache rings",
727   .function = cryptodev_show_cache_rings_fn,
728 };
729
730 static clib_error_t *
731 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
732                              vlib_cli_command_t * cmd)
733 {
734   cryptodev_main_t *cmt = &cryptodev_main;
735   cryptodev_engine_thread_t *cet;
736   unformat_input_t _line_input, *line_input = &_line_input;
737   u32 thread_index, inst_index;
738   u32 thread_present = 0, inst_present = 0;
739   clib_error_t *error = 0;
740   int ret;
741
742   /* Get a line of input. */
743   if (!unformat_user (input, unformat_line_input, line_input))
744     return 0;
745
746   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
747     {
748       if (unformat (line_input, "thread %u", &thread_index))
749         thread_present = 1;
750       else if (unformat (line_input, "resource %u", &inst_index))
751         inst_present = 1;
752       else
753         {
754           error = clib_error_return (0, "unknown input `%U'",
755                                      format_unformat_error, line_input);
756           return error;
757         }
758     }
759
760   if (!thread_present || !inst_present)
761     {
762       error = clib_error_return (0, "mandatory argument(s) missing");
763       return error;
764     }
765
766   if (thread_index == 0 && vlib_num_workers () > 0)
767     {
768       error =
769         clib_error_return (0, "assign crypto resource for master thread");
770       return error;
771     }
772
773   if (thread_index > vec_len (cmt->per_thread_data) ||
774       inst_index > vec_len (cmt->cryptodev_inst))
775     {
776       error = clib_error_return (0, "wrong thread id or resource id");
777       return error;
778     }
779
780   cet = cmt->per_thread_data + thread_index;
781   ret = cryptodev_assign_resource (cet, inst_index,
782                                    CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
783   if (ret)
784     {
785       error =
786         clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
787       return error;
788     }
789
790   return 0;
791 }
792
793 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
794     .path = "set cryptodev assignment",
795     .short_help = "set cryptodev assignment thread <thread_index> "
796         "resource <inst_index>",
797     .function = cryptodev_set_assignment_fn,
798 };
799
800 static u32
801 cryptodev_count_queue (u32 numa)
802 {
803   struct rte_cryptodev_info info;
804   u32 n_cryptodev = rte_cryptodev_count ();
805   u32 i, q_count = 0;
806
807   for (i = 0; i < n_cryptodev; i++)
808     {
809       rte_cryptodev_info_get (i, &info);
810       q_count += info.max_nb_queue_pairs;
811     }
812
813   return q_count;
814 }
815
816 static int
817 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
818 {
819   struct rte_cryptodev_config cfg;
820   struct rte_cryptodev_info info;
821   cryptodev_main_t *cmt = &cryptodev_main;
822   u32 i;
823   int ret;
824
825   rte_cryptodev_info_get (cryptodev_id, &info);
826
827   /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
828      anymore. Only devices that have the same driver type as the first
829      initialized device can be initialized.
830    */
831 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
832   if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
833     return -1;
834 #endif
835
836   if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
837     return -1;
838
839   cfg.socket_id = info.device->numa_node;
840   cfg.nb_queue_pairs = info.max_nb_queue_pairs;
841
842   rte_cryptodev_configure (cryptodev_id, &cfg);
843
844   for (i = 0; i < info.max_nb_queue_pairs; i++)
845     {
846       struct rte_cryptodev_qp_conf qp_cfg;
847
848       qp_cfg.mp_session = 0;
849 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
850       qp_cfg.mp_session_private = 0;
851 #endif
852       qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
853
854       ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
855                                             info.device->numa_node);
856       if (ret)
857         {
858           clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
859                         cryptodev_id, i, ret);
860           break;
861         }
862     }
863
864   if (i != info.max_nb_queue_pairs)
865     return -1;
866
867   /* start the device */
868   rte_cryptodev_start (cryptodev_id);
869
870 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
871   if (cmt->drivers_cnt == 0)
872     {
873       cmt->drivers_cnt = 1;
874       cmt->driver_id = info.driver_id;
875       cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
876     }
877 #endif
878
879   for (i = 0; i < info.max_nb_queue_pairs; i++)
880     {
881       cryptodev_inst_t *cdev_inst;
882 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
883       const char *dev_name = rte_dev_name (info.device);
884 #else
885       const char *dev_name = info.device->name;
886 #endif
887       vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
888       cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
889       cdev_inst->dev_id = cryptodev_id;
890       cdev_inst->q_id = i;
891
892       snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
893                 info.device->name, i);
894     }
895
896   return 0;
897 }
898
899 static int
900 cryptodev_cmp (void *v1, void *v2)
901 {
902   cryptodev_inst_t *a1 = v1;
903   cryptodev_inst_t *a2 = v2;
904
905   if (a1->q_id > a2->q_id)
906     return 1;
907   if (a1->q_id < a2->q_id)
908     return -1;
909   return 0;
910 }
911
912 static int
913 cryptodev_supports_param_value (u32 *params, u32 param_value)
914 {
915   u32 *value;
916   vec_foreach (value, params)
917     {
918       if (*value == param_value)
919         return 1;
920     }
921   return 0;
922 }
923
924 int
925 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
926                              u32 key_size, u32 digest_size, u32 aad_size)
927 {
928   cryptodev_main_t *cmt = &cryptodev_main;
929   cryptodev_capability_t *cap;
930   vec_foreach (cap, cmt->supported_caps)
931     {
932
933       if (cap->xform_type != idx->type)
934         continue;
935
936       if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
937           cap->auth.algo == idx->algo.auth &&
938           cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
939         return 1;
940
941       if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
942           cap->cipher.algo == idx->algo.cipher &&
943           cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
944         return 1;
945
946       if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
947           cap->aead.algo == idx->algo.aead &&
948           cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
949           cryptodev_supports_param_value (cap->aead.digest_sizes,
950                                           digest_size) &&
951           cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
952         return 1;
953     }
954   return 0;
955 }
956
957 static void
958 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
959                                u32 param_size_max, u32 increment)
960 {
961   u32 i = 0;
962   u32 cap_param_size;
963
964   while (i < vec_len (*param_sizes))
965     {
966       u32 found_param = 0;
967       for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
968            cap_param_size += increment)
969         {
970           if ((*param_sizes)[i] == cap_param_size)
971             {
972               found_param = 1;
973               break;
974             }
975           if (increment == 0)
976             break;
977         }
978       if (!found_param)
979         /* no such param_size in cap so delete  this size in temp_cap params */
980         vec_delete (*param_sizes, 1, i);
981       else
982         i++;
983     }
984 }
985
986 static void
987 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
988 {
989   cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
990
991   switch (temp_cap.xform_type)
992     {
993     case RTE_CRYPTO_SYM_XFORM_AUTH:
994       vec_free (temp_cap.auth.digest_sizes);
995       break;
996     case RTE_CRYPTO_SYM_XFORM_CIPHER:
997       vec_free (temp_cap.cipher.key_sizes);
998       break;
999     case RTE_CRYPTO_SYM_XFORM_AEAD:
1000       vec_free (temp_cap.aead.key_sizes);
1001       vec_free (temp_cap.aead.aad_sizes);
1002       vec_free (temp_cap.aead.digest_sizes);
1003       break;
1004     default:
1005       break;
1006     }
1007   vec_delete (*temp_caps, 1, temp_cap_id);
1008 }
1009
1010 static u32
1011 cryptodev_remove_unsupported_param_sizes (
1012   cryptodev_capability_t *temp_cap,
1013   const struct rte_cryptodev_capabilities *dev_caps)
1014 {
1015   u32 cap_found = 0;
1016   const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
1017
1018   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1019     {
1020       if (cap->sym.xform_type == temp_cap->xform_type)
1021         switch (cap->sym.xform_type)
1022           {
1023           case RTE_CRYPTO_SYM_XFORM_CIPHER:
1024             if (cap->sym.cipher.algo == temp_cap->cipher.algo)
1025               {
1026                 remove_unsupported_param_size (
1027                   &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
1028                   cap->sym.cipher.key_size.max,
1029                   cap->sym.cipher.key_size.increment);
1030                 if (vec_len (temp_cap->cipher.key_sizes) > 0)
1031                   cap_found = 1;
1032               }
1033             break;
1034           case RTE_CRYPTO_SYM_XFORM_AUTH:
1035             if (cap->sym.auth.algo == temp_cap->auth.algo)
1036               {
1037                 remove_unsupported_param_size (
1038                   &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
1039                   cap->sym.auth.digest_size.max,
1040                   cap->sym.auth.digest_size.increment);
1041                 if (vec_len (temp_cap->auth.digest_sizes) > 0)
1042                   cap_found = 1;
1043               }
1044             break;
1045           case RTE_CRYPTO_SYM_XFORM_AEAD:
1046             if (cap->sym.aead.algo == temp_cap->aead.algo)
1047               {
1048                 remove_unsupported_param_size (
1049                   &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
1050                   cap->sym.aead.key_size.max,
1051                   cap->sym.aead.key_size.increment);
1052                 remove_unsupported_param_size (
1053                   &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
1054                   cap->sym.aead.aad_size.max,
1055                   cap->sym.aead.aad_size.increment);
1056                 remove_unsupported_param_size (
1057                   &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
1058                   cap->sym.aead.digest_size.max,
1059                   cap->sym.aead.digest_size.increment);
1060                 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1061                     vec_len (temp_cap->aead.aad_sizes) > 0 &&
1062                     vec_len (temp_cap->aead.digest_sizes) > 0)
1063                   cap_found = 1;
1064               }
1065             break;
1066           default:
1067             break;
1068           }
1069       if (cap_found)
1070         break;
1071       cap++;
1072     }
1073
1074   return cap_found;
1075 }
1076
1077 static void
1078 cryptodev_get_common_capabilities ()
1079 {
1080   cryptodev_main_t *cmt = &cryptodev_main;
1081   cryptodev_inst_t *dev_inst;
1082   struct rte_cryptodev_info dev_info;
1083   u32 previous_dev_id, dev_id;
1084   u32 cap_id = 0;
1085   u32 param;
1086   cryptodev_capability_t tmp_cap;
1087   const struct rte_cryptodev_capabilities *cap;
1088   const struct rte_cryptodev_capabilities *dev_caps;
1089
1090   clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
1091   if (vec_len (cmt->cryptodev_inst) == 0)
1092     return;
1093   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1094   rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1095   cap = &dev_info.capabilities[0];
1096
1097   /*init capabilities vector*/
1098   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1099     {
1100       if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1101         {
1102           cap++;
1103           continue;
1104         }
1105
1106       tmp_cap.xform_type = cap->sym.xform_type;
1107       switch (cap->sym.xform_type)
1108         {
1109         case RTE_CRYPTO_SYM_XFORM_CIPHER:
1110           tmp_cap.cipher.key_sizes = 0;
1111           tmp_cap.cipher.algo = cap->sym.cipher.algo;
1112           for (param = cap->sym.cipher.key_size.min;
1113                param <= cap->sym.cipher.key_size.max;
1114                param += cap->sym.cipher.key_size.increment)
1115             {
1116               vec_add1 (tmp_cap.cipher.key_sizes, param);
1117               if (cap->sym.cipher.key_size.increment == 0)
1118                 break;
1119             }
1120           break;
1121         case RTE_CRYPTO_SYM_XFORM_AUTH:
1122           tmp_cap.auth.algo = cap->sym.auth.algo;
1123           tmp_cap.auth.digest_sizes = 0;
1124           for (param = cap->sym.auth.digest_size.min;
1125                param <= cap->sym.auth.digest_size.max;
1126                param += cap->sym.auth.digest_size.increment)
1127             {
1128               vec_add1 (tmp_cap.auth.digest_sizes, param);
1129               if (cap->sym.auth.digest_size.increment == 0)
1130                 break;
1131             }
1132           break;
1133         case RTE_CRYPTO_SYM_XFORM_AEAD:
1134           tmp_cap.aead.key_sizes = 0;
1135           tmp_cap.aead.aad_sizes = 0;
1136           tmp_cap.aead.digest_sizes = 0;
1137           tmp_cap.aead.algo = cap->sym.aead.algo;
1138           for (param = cap->sym.aead.key_size.min;
1139                param <= cap->sym.aead.key_size.max;
1140                param += cap->sym.aead.key_size.increment)
1141             {
1142               vec_add1 (tmp_cap.aead.key_sizes, param);
1143               if (cap->sym.aead.key_size.increment == 0)
1144                 break;
1145             }
1146           for (param = cap->sym.aead.aad_size.min;
1147                param <= cap->sym.aead.aad_size.max;
1148                param += cap->sym.aead.aad_size.increment)
1149             {
1150               vec_add1 (tmp_cap.aead.aad_sizes, param);
1151               if (cap->sym.aead.aad_size.increment == 0)
1152                 break;
1153             }
1154           for (param = cap->sym.aead.digest_size.min;
1155                param <= cap->sym.aead.digest_size.max;
1156                param += cap->sym.aead.digest_size.increment)
1157             {
1158               vec_add1 (tmp_cap.aead.digest_sizes, param);
1159               if (cap->sym.aead.digest_size.increment == 0)
1160                 break;
1161             }
1162           break;
1163         default:
1164           break;
1165         }
1166
1167       vec_add1 (cmt->supported_caps, tmp_cap);
1168       cap++;
1169     }
1170
1171   while (cap_id < vec_len (cmt->supported_caps))
1172     {
1173       u32 cap_is_supported = 1;
1174       previous_dev_id = cmt->cryptodev_inst->dev_id;
1175
1176       vec_foreach (dev_inst, cmt->cryptodev_inst)
1177         {
1178           dev_id = dev_inst->dev_id;
1179           if (previous_dev_id != dev_id)
1180             {
1181               previous_dev_id = dev_id;
1182               rte_cryptodev_info_get (dev_id, &dev_info);
1183               dev_caps = &dev_info.capabilities[0];
1184               cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1185                 &cmt->supported_caps[cap_id], dev_caps);
1186               if (!cap_is_supported)
1187                 {
1188                   cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1189                   /*no need to check other devices as this one doesn't support
1190                    * this temp_cap*/
1191                   break;
1192                 }
1193             }
1194         }
1195       if (cap_is_supported)
1196         cap_id++;
1197     }
1198 }
1199
1200 static int
1201 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1202 {
1203   cryptodev_main_t *cmt = &cryptodev_main;
1204   u32 n_queues = cryptodev_count_queue (vm->numa_node);
1205   u32 i;
1206
1207   if (n_queues < n_workers)
1208     return -1;
1209
1210   for (i = 0; i < rte_cryptodev_count (); i++)
1211     cryptodev_configure (vm, i);
1212
1213   if (vec_len (cmt->cryptodev_inst) == 0)
1214     return -1;
1215   cryptodev_get_common_capabilities ();
1216   vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1217
1218   /* if there is not enough device stop cryptodev */
1219   if (vec_len (cmt->cryptodev_inst) < n_workers)
1220     return -1;
1221
1222   return 0;
1223 }
1224
1225 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1226 static void
1227 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1228 {
1229   u32 *unique_elt;
1230   u8 found = 0;
1231
1232   vec_foreach (unique_elt, *unique_drivers)
1233     {
1234       if (*unique_elt == driver_id)
1235         {
1236           found = 1;
1237           break;
1238         }
1239     }
1240
1241   if (!found)
1242     vec_add1 (*unique_drivers, driver_id);
1243 }
1244 #endif
1245
1246 clib_error_t *
1247 dpdk_cryptodev_init (vlib_main_t * vm)
1248 {
1249   cryptodev_main_t *cmt = &cryptodev_main;
1250   vlib_thread_main_t *tm = vlib_get_thread_main ();
1251   cryptodev_engine_thread_t *cet;
1252   cryptodev_numa_data_t *numa_data;
1253   u32 node;
1254   u8 nodes = 0;
1255   u32 skip_master = vlib_num_workers () > 0;
1256   u32 n_workers = tm->n_vlib_mains - skip_master;
1257   u32 eidx;
1258   u32 i;
1259   clib_error_t *error;
1260
1261   cmt->iova_mode = rte_eal_iova_mode ();
1262
1263   clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1264     {
1265       if (node >= nodes)
1266         nodes = node;
1267     }
1268
1269   vec_validate (cmt->per_numa_data, nodes);
1270   vec_foreach (numa_data, cmt->per_numa_data)
1271     {
1272       vec_validate (numa_data->sess_pools, 0);
1273     }
1274
1275   /* probe all cryptodev devices and get queue info */
1276   if (cryptodev_probe (vm, n_workers) < 0)
1277     return 0;
1278
1279 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1280   struct rte_cryptodev_info dev_info;
1281   cryptodev_inst_t *dev_inst;
1282   u32 *unique_drivers = 0;
1283   vec_foreach (dev_inst, cmt->cryptodev_inst)
1284     {
1285       u32 dev_id = dev_inst->dev_id;
1286       rte_cryptodev_info_get (dev_id, &dev_info);
1287       u32 driver_id = dev_info.driver_id;
1288       is_drv_unique (driver_id, &unique_drivers);
1289
1290       u32 sess_sz =
1291         rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1292       cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1293     }
1294
1295   cmt->drivers_cnt = vec_len (unique_drivers);
1296   vec_free (unique_drivers);
1297 #endif
1298
1299   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers);
1300   clib_spinlock_init (&cmt->tlock);
1301
1302   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1303                        CLIB_CACHE_LINE_BYTES);
1304   for (i = skip_master; i < tm->n_vlib_mains; i++)
1305     {
1306       cet = cmt->per_thread_data + i;
1307
1308       if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1309           0)
1310         {
1311           error = clib_error_return (0, "Failed to configure cryptodev");
1312           goto err_handling;
1313         }
1314     }
1315
1316   /* register handler */
1317   eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1318                                       "DPDK Cryptodev Engine");
1319
1320   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1321
1322   if (cryptodev_register_raw_hdl)
1323     error = cryptodev_register_raw_hdl (vm, eidx);
1324   else
1325     error = cryptodev_register_cop_hdl (vm, eidx);
1326
1327   if (error)
1328     goto err_handling;
1329
1330   /* this engine is only enabled when cryptodev device(s) are presented in
1331    * startup.conf. Assume it is wanted to be used, turn on async mode here.
1332    */
1333   ipsec_set_async_mode (1);
1334
1335   return 0;
1336
1337 err_handling:
1338   return error;
1339 }