dpdk-cryptodev: improve dequeue behavior, fix cache stats logging
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
33
34 #include "cryptodev.h"
35
36 #if CLIB_DEBUG > 0
37 #define always_inline static inline
38 #else
39 #define always_inline static inline __attribute__ ((__always_inline__))
40 #endif
41
42 cryptodev_main_t cryptodev_main;
43
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46                     cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
47                     u32 aad_len)
48 {
49   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50   memset (xform, 0, sizeof (*xform));
51   xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
52   xform->next = 0;
53
54   if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55       key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56       key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
57     {
58       aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
59     }
60   else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
61     {
62       aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
63     }
64   else
65     return -1;
66
67   aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68     RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69   aead_xform->aad_length = aad_len;
70   aead_xform->digest_length = 16;
71   aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72   aead_xform->iv.length = 12;
73   aead_xform->key.data = key->data;
74   aead_xform->key.length = vec_len (key->data);
75
76   return 0;
77 }
78
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81                       cryptodev_op_type_t op_type,
82                       const vnet_crypto_key_t *key)
83 {
84   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85   vnet_crypto_key_t *key_cipher, *key_auth;
86   enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87   enum rte_crypto_auth_algorithm auth_algo = ~0;
88   u32 digest_len = ~0;
89
90   key_cipher = vnet_crypto_get_key (key->index_crypto);
91   key_auth = vnet_crypto_get_key (key->index_integ);
92   if (!key_cipher || !key_auth)
93     return -1;
94
95   if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
96     {
97       xform_cipher = xforms;
98       xform_auth = xforms + 1;
99       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
101     }
102   else
103     {
104       xform_cipher = xforms + 1;
105       xform_auth = xforms;
106       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
108     }
109
110   xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111   xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112   xforms->next = xforms + 1;
113
114   switch (key->async_alg)
115     {
116 #define _(a, b, c, d, e)                                                      \
117   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
118     cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
119     auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
120     digest_len = e;                                                           \
121     break;
122
123       foreach_cryptodev_link_async_alg
124 #undef _
125     default:
126       return -1;
127     }
128
129   xform_cipher->cipher.algo = cipher_algo;
130   xform_cipher->cipher.key.data = key_cipher->data;
131   xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132   xform_cipher->cipher.iv.length = 16;
133   xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
134
135   xform_auth->auth.algo = auth_algo;
136   xform_auth->auth.digest_length = digest_len;
137   xform_auth->auth.key.data = key_auth->data;
138   xform_auth->auth.key.length = vec_len (key_auth->data);
139
140   return 0;
141 }
142
143 static_always_inline void
144 cryptodev_session_del (cryptodev_session_t *sess)
145 {
146   u32 n_devs, i;
147
148   if (sess == NULL)
149     return;
150
151   n_devs = rte_cryptodev_count ();
152
153   for (i = 0; i < n_devs; i++)
154 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
155     if (rte_cryptodev_sym_session_free (i, sess) == 0)
156       break;
157 #else
158     rte_cryptodev_sym_session_clear (i, sess);
159
160   rte_cryptodev_sym_session_free (sess);
161 #endif
162 }
163
164 static int
165 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
166 {
167   cryptodev_main_t *cmt = &cryptodev_main;
168   cryptodev_capability_t *vcap;
169   u32 *s;
170
171   vec_foreach (vcap, cmt->supported_caps)
172     {
173       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
174         continue;
175       if (vcap->cipher.algo != algo)
176         continue;
177       vec_foreach (s, vcap->cipher.key_sizes)
178         if (*s == key_size)
179           return 1;
180     }
181
182   return 0;
183 }
184
185 static int
186 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
187 {
188   cryptodev_main_t *cmt = &cryptodev_main;
189   cryptodev_capability_t *vcap;
190   u32 *s;
191
192   vec_foreach (vcap, cmt->supported_caps)
193     {
194       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
195         continue;
196       if (vcap->auth.algo != algo)
197         continue;
198       vec_foreach (s, vcap->auth.digest_sizes)
199         if (*s == digest_size)
200           return 1;
201     }
202
203   return 0;
204 }
205
206 static_always_inline int
207 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
208                     u32 digest_size, u32 aad_size)
209 {
210   cryptodev_main_t *cmt = &cryptodev_main;
211   cryptodev_capability_t *vcap;
212   u32 *s;
213   u32 key_match = 0, digest_match = 0, aad_match = 0;
214
215   vec_foreach (vcap, cmt->supported_caps)
216     {
217       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
218         continue;
219       if (vcap->aead.algo != algo)
220         continue;
221       vec_foreach (s, vcap->aead.digest_sizes)
222         if (*s == digest_size)
223           {
224             digest_match = 1;
225             break;
226           }
227       vec_foreach (s, vcap->aead.key_sizes)
228         if (*s == key_size)
229           {
230             key_match = 1;
231             break;
232           }
233       vec_foreach (s, vcap->aead.aad_sizes)
234         if (*s == aad_size)
235           {
236             aad_match = 1;
237             break;
238           }
239     }
240
241   if (key_match == 1 && digest_match == 1 && aad_match == 1)
242     return 1;
243
244   return 0;
245 }
246
247 static_always_inline int
248 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
249 {
250   u32 matched = 0;
251
252   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
253     {
254       switch (key->async_alg)
255         {
256 #define _(a, b, c, d, e)                                                      \
257   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
258     if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) &&                    \
259         check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e))                   \
260       return 1;
261           foreach_cryptodev_link_async_alg
262 #undef _
263             default : return 0;
264         }
265       return 0;
266     }
267
268 #define _(a, b, c, d, e, f, g)                                                \
269   if (key->alg == VNET_CRYPTO_ALG_##a)                                        \
270     {                                                                         \
271       if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f))                  \
272         matched++;                                                            \
273     }
274   foreach_vnet_aead_crypto_conversion
275 #undef _
276
277     if (matched < 2) return 0;
278
279   return 1;
280 }
281
282 void
283 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
284                         vnet_crypto_key_index_t idx, u32 aad_len)
285 {
286   cryptodev_main_t *cmt = &cryptodev_main;
287   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
288   cryptodev_key_t *ckey = 0;
289   u32 i;
290
291   vec_validate (cmt->keys, idx);
292   ckey = vec_elt_at_index (cmt->keys, idx);
293
294   if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
295     {
296       if (idx >= vec_len (cmt->keys))
297         return;
298
299       vec_foreach_index (i, cmt->per_numa_data)
300         {
301           if (!ckey->keys)
302             continue;
303           if (!ckey->keys[i])
304             continue;
305           if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
306             {
307               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
308               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
309
310               CLIB_MEMORY_STORE_BARRIER ();
311               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
312               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
313             }
314         }
315       return;
316     }
317
318   /* create key */
319
320   /* do not create session for unsupported alg */
321   if (cryptodev_check_supported_vnet_alg (key) == 0)
322     return;
323
324   vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
325   vec_foreach_index (i, ckey->keys)
326     vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
327 }
328
329 /*static*/ void
330 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
331                        vnet_crypto_key_index_t idx)
332 {
333   cryptodev_sess_handler (vm, kop, idx, 8);
334 }
335
336 clib_error_t *
337 allocate_session_pools (u32 numa_node,
338                         cryptodev_session_pool_t *sess_pools_elt, u32 len)
339 {
340   cryptodev_main_t *cmt = &cryptodev_main;
341   u8 *name;
342   clib_error_t *error = NULL;
343
344   name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
345 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
346   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
347     (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
348 #else
349   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
350     (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
351 #endif
352
353   if (!sess_pools_elt->sess_pool)
354     {
355       error = clib_error_return (0, "Not enough memory for mp %s", name);
356       goto clear_mempools;
357     }
358   vec_free (name);
359
360 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
361   name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
362   sess_pools_elt->sess_priv_pool = rte_mempool_create (
363     (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
364     0, NULL, NULL, NULL, NULL, numa_node, 0);
365
366   if (!sess_pools_elt->sess_priv_pool)
367     {
368       error = clib_error_return (0, "Not enough memory for mp %s", name);
369       goto clear_mempools;
370     }
371   vec_free (name);
372 #endif
373
374 clear_mempools:
375   if (error)
376     {
377       vec_free (name);
378       if (sess_pools_elt->sess_pool)
379         rte_mempool_free (sess_pools_elt->sess_pool);
380 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
381       if (sess_pools_elt->sess_priv_pool)
382         rte_mempool_free (sess_pools_elt->sess_priv_pool);
383 #endif
384       return error;
385     }
386   return 0;
387 }
388
389 int
390 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
391                           u32 aad_len)
392 {
393   cryptodev_main_t *cmt = &cryptodev_main;
394   cryptodev_numa_data_t *numa_data;
395   cryptodev_inst_t *dev_inst;
396   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
397   struct rte_mempool *sess_pool;
398   cryptodev_session_pool_t *sess_pools_elt;
399   cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
400   struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
401   struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
402   cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
403 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
404   struct rte_mempool *sess_priv_pool;
405   struct rte_cryptodev_info dev_info;
406 #endif
407   u32 numa_node = vm->numa_node;
408   clib_error_t *error;
409   int ret = 0;
410   u8 found = 0;
411
412   numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
413
414   clib_spinlock_lock (&cmt->tlock);
415   vec_foreach (sess_pools_elt, numa_data->sess_pools)
416     {
417       if (sess_pools_elt->sess_pool == NULL)
418         {
419           error = allocate_session_pools (numa_node, sess_pools_elt,
420                                           vec_len (numa_data->sess_pools) - 1);
421           if (error)
422             {
423               ret = -1;
424               goto clear_key;
425             }
426         }
427       if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
428         {
429           found = 1;
430           break;
431         }
432     }
433
434   if (found == 0)
435     {
436       vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
437       error = allocate_session_pools (numa_node, sess_pools_elt,
438                                       vec_len (numa_data->sess_pools) - 1);
439       if (error)
440         {
441           ret = -1;
442           goto clear_key;
443         }
444     }
445
446   sess_pool = sess_pools_elt->sess_pool;
447 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
448   sess_priv_pool = sess_pools_elt->sess_priv_pool;
449
450   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
451     rte_cryptodev_sym_session_create (sess_pool);
452
453   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
454     rte_cryptodev_sym_session_create (sess_pool);
455 #endif
456
457   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
458     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
459   else
460     ret =
461       prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
462   if (ret)
463     {
464       ret = -1;
465       goto clear_key;
466     }
467
468   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
469     prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
470   else
471     prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
472
473 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
474   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
475   u32 dev_id = dev_inst->dev_id;
476   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
477     rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
478   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
479     rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
480   if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
481       !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
482     {
483       ret = -1;
484       goto clear_key;
485     }
486
487   rte_cryptodev_sym_session_opaque_data_set (
488     sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
489   rte_cryptodev_sym_session_opaque_data_set (
490     sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
491 #else
492   vec_foreach (dev_inst, cmt->cryptodev_inst)
493     {
494       u32 dev_id = dev_inst->dev_id;
495       rte_cryptodev_info_get (dev_id, &dev_info);
496       u32 driver_id = dev_info.driver_id;
497
498       /* if the session is already configured for the driver type, avoid
499          configuring it again to increase the session data's refcnt */
500       if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
501           sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
502         continue;
503
504       ret = rte_cryptodev_sym_session_init (
505         dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
506         sess_priv_pool);
507       ret = rte_cryptodev_sym_session_init (
508         dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
509         sess_priv_pool);
510       if (ret < 0)
511         goto clear_key;
512     }
513
514   sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
515   sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
516 #endif
517
518   CLIB_MEMORY_STORE_BARRIER ();
519   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
520     sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
521   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
522     sessions[CRYPTODEV_OP_TYPE_DECRYPT];
523
524 clear_key:
525   if (ret != 0)
526     {
527       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
528       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
529     }
530   clib_spinlock_unlock (&cmt->tlock);
531   return ret;
532 }
533
534 typedef enum
535 {
536   CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
537   CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
538 } cryptodev_resource_assign_op_t;
539
540 /**
541  *  assign a cryptodev resource to a worker.
542  *  @param cet: the worker thread data
543  *  @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
544  *  @param op: the assignment method.
545  *  @return: 0 if successfully, negative number otherwise.
546  **/
547 static_always_inline int
548 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
549                            u32 cryptodev_inst_index,
550                            cryptodev_resource_assign_op_t op)
551 {
552   cryptodev_main_t *cmt = &cryptodev_main;
553   cryptodev_inst_t *cinst = 0;
554   uword idx;
555
556   /* assign resource is only allowed when no inflight op is in the queue */
557   if (cet->inflight)
558     return -EBUSY;
559
560   switch (op)
561     {
562     case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
563       if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
564           vec_len (cmt->cryptodev_inst))
565         return -1;
566
567       clib_spinlock_lock (&cmt->tlock);
568       idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
569       clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
570       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
571       cet->cryptodev_id = cinst->dev_id;
572       cet->cryptodev_q = cinst->q_id;
573       clib_spinlock_unlock (&cmt->tlock);
574       break;
575     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
576       /* assigning a used cryptodev resource is not allowed */
577       if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
578           == 1)
579         return -EBUSY;
580
581       vec_foreach_index (idx, cmt->cryptodev_inst)
582         {
583           cinst = cmt->cryptodev_inst + idx;
584           if (cinst->dev_id == cet->cryptodev_id &&
585               cinst->q_id == cet->cryptodev_q)
586             break;
587         }
588       /* invalid existing worker resource assignment */
589       if (idx >= vec_len (cmt->cryptodev_inst))
590         return -EINVAL;
591       clib_spinlock_lock (&cmt->tlock);
592       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
593       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
594                                 cryptodev_inst_index, 1);
595       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
596       cet->cryptodev_id = cinst->dev_id;
597       cet->cryptodev_q = cinst->q_id;
598       clib_spinlock_unlock (&cmt->tlock);
599       break;
600     default:
601       return -EINVAL;
602     }
603   return 0;
604 }
605
606 static u8 *
607 format_cryptodev_inst (u8 * s, va_list * args)
608 {
609   cryptodev_main_t *cmt = &cryptodev_main;
610   u32 inst = va_arg (*args, u32);
611   cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
612   u32 thread_index = 0;
613   struct rte_cryptodev_info info;
614
615   rte_cryptodev_info_get (cit->dev_id, &info);
616   s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
617
618   vec_foreach_index (thread_index, cmt->per_thread_data)
619   {
620     cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
621     if (vlib_num_workers () > 0 && thread_index == 0)
622       continue;
623
624     if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
625       {
626         s = format (s, "%u (%v)\n", thread_index,
627                     vlib_worker_threads[thread_index].name);
628         break;
629       }
630   }
631
632   if (thread_index == vec_len (cmt->per_thread_data))
633     s = format (s, "%s\n", "free");
634
635   return s;
636 }
637
638 static clib_error_t *
639 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
640                               vlib_cli_command_t * cmd)
641 {
642   cryptodev_main_t *cmt = &cryptodev_main;
643   u32 inst;
644
645   vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
646                    "Assigned-to");
647   if (vec_len (cmt->cryptodev_inst) == 0)
648     {
649       vlib_cli_output (vm, "(nil)\n");
650       return 0;
651     }
652
653   vec_foreach_index (inst, cmt->cryptodev_inst)
654     vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
655
656   if (cmt->is_raw_api)
657     vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
658   else
659     vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
660   return 0;
661 }
662
663 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
664     .path = "show cryptodev assignment",
665     .short_help = "show cryptodev assignment",
666     .function = cryptodev_show_assignment_fn,
667 };
668
669 static clib_error_t *
670 cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
671                                vlib_cli_command_t *cmd)
672 {
673   cryptodev_main_t *cmt = &cryptodev_main;
674   u32 thread_index = 0;
675   u16 i;
676   vec_foreach_index (thread_index, cmt->per_thread_data)
677     {
678       cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
679       cryptodev_cache_ring_t *ring = &cet->cache_ring;
680       u16 head = ring->head;
681       u16 tail = ring->tail;
682       u16 n_cached = (CRYPTODEV_CACHE_QUEUE_SIZE - tail + head) &
683                      CRYPTODEV_CACHE_QUEUE_MASK;
684
685       u16 enq_head = ring->enq_head;
686       u16 deq_tail = ring->deq_tail;
687       u16 n_frames_inflight =
688         (enq_head == deq_tail) ?
689                 0 :
690                 ((CRYPTODEV_CACHE_QUEUE_SIZE + enq_head - deq_tail) &
691            CRYPTODEV_CACHE_QUEUE_MASK);
692       /* even if some elements of dequeued frame are still pending for deq
693        * we consider the frame as processed */
694       u16 n_frames_processed =
695         ((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ?
696                 0 :
697                 ((CRYPTODEV_CACHE_QUEUE_SIZE - tail + deq_tail) &
698            CRYPTODEV_CACHE_QUEUE_MASK) +
699             1;
700       /* even if some elements of enqueued frame are still pending for enq
701        * we consider the frame as enqueued */
702       u16 n_frames_pending =
703         (head == enq_head) ? 0 :
704                                    ((CRYPTODEV_CACHE_QUEUE_SIZE - enq_head + head) &
705                               CRYPTODEV_CACHE_QUEUE_MASK) -
706                                1;
707
708       u16 elts_to_enq =
709         (ring->frames[enq_head].n_elts - ring->frames[enq_head].enq_elts_head);
710       u16 elts_to_deq =
711         (ring->frames[deq_tail].n_elts - ring->frames[deq_tail].deq_elts_tail);
712
713       u32 elts_total = 0;
714
715       for (i = 0; i < CRYPTODEV_CACHE_QUEUE_SIZE; i++)
716         elts_total += ring->frames[i].n_elts;
717
718       if (vlib_num_workers () > 0 && thread_index == 0)
719         continue;
720
721       vlib_cli_output (vm, "\n\n");
722       vlib_cli_output (vm, "Frames cached in the ring: %u", n_cached);
723       vlib_cli_output (vm, "Frames cached but not processed: %u",
724                        n_frames_pending);
725       vlib_cli_output (vm, "Frames inflight: %u", n_frames_inflight);
726       vlib_cli_output (vm, "Frames processed: %u", n_frames_processed);
727       vlib_cli_output (vm, "Elements total: %u", elts_total);
728       vlib_cli_output (vm, "Elements inflight: %u", cet->inflight);
729       vlib_cli_output (vm, "Head index: %u", head);
730       vlib_cli_output (vm, "Tail index: %u", tail);
731       vlib_cli_output (vm, "Current frame index beeing enqueued: %u",
732                        enq_head);
733       vlib_cli_output (vm, "Current frame index being dequeued: %u", deq_tail);
734       vlib_cli_output (vm,
735                        "Elements in current frame to be enqueued: %u, waiting "
736                        "to be enqueued: %u",
737                        ring->frames[enq_head].n_elts, elts_to_enq);
738       vlib_cli_output (vm,
739                        "Elements in current frame to be dequeued: %u, waiting "
740                        "to be dequeued: %u",
741                        ring->frames[deq_tail].n_elts, elts_to_deq);
742       vlib_cli_output (vm, "\n\n");
743     }
744   return 0;
745 }
746
747 VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
748   .path = "show cryptodev cache status",
749   .short_help = "show status of all cryptodev cache rings",
750   .function = cryptodev_show_cache_rings_fn,
751 };
752
753 static clib_error_t *
754 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
755                              vlib_cli_command_t * cmd)
756 {
757   cryptodev_main_t *cmt = &cryptodev_main;
758   cryptodev_engine_thread_t *cet;
759   unformat_input_t _line_input, *line_input = &_line_input;
760   u32 thread_index, inst_index;
761   u32 thread_present = 0, inst_present = 0;
762   clib_error_t *error = 0;
763   int ret;
764
765   /* Get a line of input. */
766   if (!unformat_user (input, unformat_line_input, line_input))
767     return 0;
768
769   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
770     {
771       if (unformat (line_input, "thread %u", &thread_index))
772         thread_present = 1;
773       else if (unformat (line_input, "resource %u", &inst_index))
774         inst_present = 1;
775       else
776         {
777           error = clib_error_return (0, "unknown input `%U'",
778                                      format_unformat_error, line_input);
779           return error;
780         }
781     }
782
783   if (!thread_present || !inst_present)
784     {
785       error = clib_error_return (0, "mandatory argument(s) missing");
786       return error;
787     }
788
789   if (thread_index == 0 && vlib_num_workers () > 0)
790     {
791       error =
792         clib_error_return (0, "assign crypto resource for master thread");
793       return error;
794     }
795
796   if (thread_index > vec_len (cmt->per_thread_data) ||
797       inst_index > vec_len (cmt->cryptodev_inst))
798     {
799       error = clib_error_return (0, "wrong thread id or resource id");
800       return error;
801     }
802
803   cet = cmt->per_thread_data + thread_index;
804   ret = cryptodev_assign_resource (cet, inst_index,
805                                    CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
806   if (ret)
807     {
808       error =
809         clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
810       return error;
811     }
812
813   return 0;
814 }
815
816 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
817     .path = "set cryptodev assignment",
818     .short_help = "set cryptodev assignment thread <thread_index> "
819         "resource <inst_index>",
820     .function = cryptodev_set_assignment_fn,
821 };
822
823 static u32
824 cryptodev_count_queue (u32 numa)
825 {
826   struct rte_cryptodev_info info;
827   u32 n_cryptodev = rte_cryptodev_count ();
828   u32 i, q_count = 0;
829
830   for (i = 0; i < n_cryptodev; i++)
831     {
832       rte_cryptodev_info_get (i, &info);
833       q_count += info.max_nb_queue_pairs;
834     }
835
836   return q_count;
837 }
838
839 static int
840 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
841 {
842   struct rte_cryptodev_config cfg;
843   struct rte_cryptodev_info info;
844   cryptodev_main_t *cmt = &cryptodev_main;
845   u32 i;
846   int ret;
847
848   rte_cryptodev_info_get (cryptodev_id, &info);
849
850   /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
851      anymore. Only devices that have the same driver type as the first
852      initialized device can be initialized.
853    */
854 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
855   if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
856     return -1;
857 #endif
858
859   if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
860     return -1;
861
862   cfg.socket_id = info.device->numa_node;
863   cfg.nb_queue_pairs = info.max_nb_queue_pairs;
864
865   rte_cryptodev_configure (cryptodev_id, &cfg);
866
867   for (i = 0; i < info.max_nb_queue_pairs; i++)
868     {
869       struct rte_cryptodev_qp_conf qp_cfg;
870
871       qp_cfg.mp_session = 0;
872 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
873       qp_cfg.mp_session_private = 0;
874 #endif
875       qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
876
877       ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
878                                             info.device->numa_node);
879       if (ret)
880         {
881           clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
882                         cryptodev_id, i, ret);
883           break;
884         }
885     }
886
887   if (i != info.max_nb_queue_pairs)
888     return -1;
889
890   /* start the device */
891   rte_cryptodev_start (cryptodev_id);
892
893 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
894   if (cmt->drivers_cnt == 0)
895     {
896       cmt->drivers_cnt = 1;
897       cmt->driver_id = info.driver_id;
898       cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
899     }
900 #endif
901
902   for (i = 0; i < info.max_nb_queue_pairs; i++)
903     {
904       cryptodev_inst_t *cdev_inst;
905 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
906       const char *dev_name = rte_dev_name (info.device);
907 #else
908       const char *dev_name = info.device->name;
909 #endif
910       vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
911       cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
912       cdev_inst->dev_id = cryptodev_id;
913       cdev_inst->q_id = i;
914
915       snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
916                 info.device->name, i);
917     }
918
919   return 0;
920 }
921
922 static int
923 cryptodev_cmp (void *v1, void *v2)
924 {
925   cryptodev_inst_t *a1 = v1;
926   cryptodev_inst_t *a2 = v2;
927
928   if (a1->q_id > a2->q_id)
929     return 1;
930   if (a1->q_id < a2->q_id)
931     return -1;
932   return 0;
933 }
934
935 static int
936 cryptodev_supports_param_value (u32 *params, u32 param_value)
937 {
938   u32 *value;
939   vec_foreach (value, params)
940     {
941       if (*value == param_value)
942         return 1;
943     }
944   return 0;
945 }
946
947 int
948 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
949                              u32 key_size, u32 digest_size, u32 aad_size)
950 {
951   cryptodev_main_t *cmt = &cryptodev_main;
952   cryptodev_capability_t *cap;
953   vec_foreach (cap, cmt->supported_caps)
954     {
955
956       if (cap->xform_type != idx->type)
957         continue;
958
959       if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
960           cap->auth.algo == idx->algo.auth &&
961           cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
962         return 1;
963
964       if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
965           cap->cipher.algo == idx->algo.cipher &&
966           cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
967         return 1;
968
969       if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
970           cap->aead.algo == idx->algo.aead &&
971           cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
972           cryptodev_supports_param_value (cap->aead.digest_sizes,
973                                           digest_size) &&
974           cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
975         return 1;
976     }
977   return 0;
978 }
979
980 static void
981 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
982                                u32 param_size_max, u32 increment)
983 {
984   u32 i = 0;
985   u32 cap_param_size;
986
987   while (i < vec_len (*param_sizes))
988     {
989       u32 found_param = 0;
990       for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
991            cap_param_size += increment)
992         {
993           if ((*param_sizes)[i] == cap_param_size)
994             {
995               found_param = 1;
996               break;
997             }
998           if (increment == 0)
999             break;
1000         }
1001       if (!found_param)
1002         /* no such param_size in cap so delete  this size in temp_cap params */
1003         vec_delete (*param_sizes, 1, i);
1004       else
1005         i++;
1006     }
1007 }
1008
1009 static void
1010 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
1011 {
1012   cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
1013
1014   switch (temp_cap.xform_type)
1015     {
1016     case RTE_CRYPTO_SYM_XFORM_AUTH:
1017       vec_free (temp_cap.auth.digest_sizes);
1018       break;
1019     case RTE_CRYPTO_SYM_XFORM_CIPHER:
1020       vec_free (temp_cap.cipher.key_sizes);
1021       break;
1022     case RTE_CRYPTO_SYM_XFORM_AEAD:
1023       vec_free (temp_cap.aead.key_sizes);
1024       vec_free (temp_cap.aead.aad_sizes);
1025       vec_free (temp_cap.aead.digest_sizes);
1026       break;
1027     default:
1028       break;
1029     }
1030   vec_delete (*temp_caps, 1, temp_cap_id);
1031 }
1032
1033 static u32
1034 cryptodev_remove_unsupported_param_sizes (
1035   cryptodev_capability_t *temp_cap,
1036   const struct rte_cryptodev_capabilities *dev_caps)
1037 {
1038   u32 cap_found = 0;
1039   const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
1040
1041   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1042     {
1043       if (cap->sym.xform_type == temp_cap->xform_type)
1044         switch (cap->sym.xform_type)
1045           {
1046           case RTE_CRYPTO_SYM_XFORM_CIPHER:
1047             if (cap->sym.cipher.algo == temp_cap->cipher.algo)
1048               {
1049                 remove_unsupported_param_size (
1050                   &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
1051                   cap->sym.cipher.key_size.max,
1052                   cap->sym.cipher.key_size.increment);
1053                 if (vec_len (temp_cap->cipher.key_sizes) > 0)
1054                   cap_found = 1;
1055               }
1056             break;
1057           case RTE_CRYPTO_SYM_XFORM_AUTH:
1058             if (cap->sym.auth.algo == temp_cap->auth.algo)
1059               {
1060                 remove_unsupported_param_size (
1061                   &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
1062                   cap->sym.auth.digest_size.max,
1063                   cap->sym.auth.digest_size.increment);
1064                 if (vec_len (temp_cap->auth.digest_sizes) > 0)
1065                   cap_found = 1;
1066               }
1067             break;
1068           case RTE_CRYPTO_SYM_XFORM_AEAD:
1069             if (cap->sym.aead.algo == temp_cap->aead.algo)
1070               {
1071                 remove_unsupported_param_size (
1072                   &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
1073                   cap->sym.aead.key_size.max,
1074                   cap->sym.aead.key_size.increment);
1075                 remove_unsupported_param_size (
1076                   &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
1077                   cap->sym.aead.aad_size.max,
1078                   cap->sym.aead.aad_size.increment);
1079                 remove_unsupported_param_size (
1080                   &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
1081                   cap->sym.aead.digest_size.max,
1082                   cap->sym.aead.digest_size.increment);
1083                 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1084                     vec_len (temp_cap->aead.aad_sizes) > 0 &&
1085                     vec_len (temp_cap->aead.digest_sizes) > 0)
1086                   cap_found = 1;
1087               }
1088             break;
1089           default:
1090             break;
1091           }
1092       if (cap_found)
1093         break;
1094       cap++;
1095     }
1096
1097   return cap_found;
1098 }
1099
1100 static void
1101 cryptodev_get_common_capabilities ()
1102 {
1103   cryptodev_main_t *cmt = &cryptodev_main;
1104   cryptodev_inst_t *dev_inst;
1105   struct rte_cryptodev_info dev_info;
1106   u32 previous_dev_id, dev_id;
1107   u32 cap_id = 0;
1108   u32 param;
1109   cryptodev_capability_t tmp_cap;
1110   const struct rte_cryptodev_capabilities *cap;
1111   const struct rte_cryptodev_capabilities *dev_caps;
1112
1113   clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
1114   if (vec_len (cmt->cryptodev_inst) == 0)
1115     return;
1116   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1117   rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1118   cap = &dev_info.capabilities[0];
1119
1120   /*init capabilities vector*/
1121   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1122     {
1123       if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1124         {
1125           cap++;
1126           continue;
1127         }
1128
1129       tmp_cap.xform_type = cap->sym.xform_type;
1130       switch (cap->sym.xform_type)
1131         {
1132         case RTE_CRYPTO_SYM_XFORM_CIPHER:
1133           tmp_cap.cipher.key_sizes = 0;
1134           tmp_cap.cipher.algo = cap->sym.cipher.algo;
1135           for (param = cap->sym.cipher.key_size.min;
1136                param <= cap->sym.cipher.key_size.max;
1137                param += cap->sym.cipher.key_size.increment)
1138             {
1139               vec_add1 (tmp_cap.cipher.key_sizes, param);
1140               if (cap->sym.cipher.key_size.increment == 0)
1141                 break;
1142             }
1143           break;
1144         case RTE_CRYPTO_SYM_XFORM_AUTH:
1145           tmp_cap.auth.algo = cap->sym.auth.algo;
1146           tmp_cap.auth.digest_sizes = 0;
1147           for (param = cap->sym.auth.digest_size.min;
1148                param <= cap->sym.auth.digest_size.max;
1149                param += cap->sym.auth.digest_size.increment)
1150             {
1151               vec_add1 (tmp_cap.auth.digest_sizes, param);
1152               if (cap->sym.auth.digest_size.increment == 0)
1153                 break;
1154             }
1155           break;
1156         case RTE_CRYPTO_SYM_XFORM_AEAD:
1157           tmp_cap.aead.key_sizes = 0;
1158           tmp_cap.aead.aad_sizes = 0;
1159           tmp_cap.aead.digest_sizes = 0;
1160           tmp_cap.aead.algo = cap->sym.aead.algo;
1161           for (param = cap->sym.aead.key_size.min;
1162                param <= cap->sym.aead.key_size.max;
1163                param += cap->sym.aead.key_size.increment)
1164             {
1165               vec_add1 (tmp_cap.aead.key_sizes, param);
1166               if (cap->sym.aead.key_size.increment == 0)
1167                 break;
1168             }
1169           for (param = cap->sym.aead.aad_size.min;
1170                param <= cap->sym.aead.aad_size.max;
1171                param += cap->sym.aead.aad_size.increment)
1172             {
1173               vec_add1 (tmp_cap.aead.aad_sizes, param);
1174               if (cap->sym.aead.aad_size.increment == 0)
1175                 break;
1176             }
1177           for (param = cap->sym.aead.digest_size.min;
1178                param <= cap->sym.aead.digest_size.max;
1179                param += cap->sym.aead.digest_size.increment)
1180             {
1181               vec_add1 (tmp_cap.aead.digest_sizes, param);
1182               if (cap->sym.aead.digest_size.increment == 0)
1183                 break;
1184             }
1185           break;
1186         default:
1187           break;
1188         }
1189
1190       vec_add1 (cmt->supported_caps, tmp_cap);
1191       cap++;
1192     }
1193
1194   while (cap_id < vec_len (cmt->supported_caps))
1195     {
1196       u32 cap_is_supported = 1;
1197       previous_dev_id = cmt->cryptodev_inst->dev_id;
1198
1199       vec_foreach (dev_inst, cmt->cryptodev_inst)
1200         {
1201           dev_id = dev_inst->dev_id;
1202           if (previous_dev_id != dev_id)
1203             {
1204               previous_dev_id = dev_id;
1205               rte_cryptodev_info_get (dev_id, &dev_info);
1206               dev_caps = &dev_info.capabilities[0];
1207               cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1208                 &cmt->supported_caps[cap_id], dev_caps);
1209               if (!cap_is_supported)
1210                 {
1211                   cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1212                   /*no need to check other devices as this one doesn't support
1213                    * this temp_cap*/
1214                   break;
1215                 }
1216             }
1217         }
1218       if (cap_is_supported)
1219         cap_id++;
1220     }
1221 }
1222
1223 static int
1224 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1225 {
1226   cryptodev_main_t *cmt = &cryptodev_main;
1227   u32 n_queues = cryptodev_count_queue (vm->numa_node);
1228   u32 i;
1229
1230   if (n_queues < n_workers)
1231     return -1;
1232
1233   for (i = 0; i < rte_cryptodev_count (); i++)
1234     cryptodev_configure (vm, i);
1235
1236   if (vec_len (cmt->cryptodev_inst) == 0)
1237     return -1;
1238   cryptodev_get_common_capabilities ();
1239   vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1240
1241   /* if there is not enough device stop cryptodev */
1242   if (vec_len (cmt->cryptodev_inst) < n_workers)
1243     return -1;
1244
1245   return 0;
1246 }
1247
1248 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1249 static void
1250 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1251 {
1252   u32 *unique_elt;
1253   u8 found = 0;
1254
1255   vec_foreach (unique_elt, *unique_drivers)
1256     {
1257       if (*unique_elt == driver_id)
1258         {
1259           found = 1;
1260           break;
1261         }
1262     }
1263
1264   if (!found)
1265     vec_add1 (*unique_drivers, driver_id);
1266 }
1267 #endif
1268
1269 clib_error_t *
1270 dpdk_cryptodev_init (vlib_main_t * vm)
1271 {
1272   cryptodev_main_t *cmt = &cryptodev_main;
1273   vlib_thread_main_t *tm = vlib_get_thread_main ();
1274   cryptodev_engine_thread_t *cet;
1275   cryptodev_numa_data_t *numa_data;
1276   u32 node;
1277   u8 nodes = 0;
1278   u32 skip_master = vlib_num_workers () > 0;
1279   u32 n_workers = tm->n_vlib_mains - skip_master;
1280   u32 eidx;
1281   u32 i;
1282   clib_error_t *error;
1283
1284   cmt->iova_mode = rte_eal_iova_mode ();
1285
1286   clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1287     {
1288       if (node >= nodes)
1289         nodes = node;
1290     }
1291
1292   vec_validate (cmt->per_numa_data, nodes);
1293   vec_foreach (numa_data, cmt->per_numa_data)
1294     {
1295       vec_validate (numa_data->sess_pools, 0);
1296     }
1297
1298   /* probe all cryptodev devices and get queue info */
1299   if (cryptodev_probe (vm, n_workers) < 0)
1300     return 0;
1301
1302 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1303   struct rte_cryptodev_info dev_info;
1304   cryptodev_inst_t *dev_inst;
1305   u32 *unique_drivers = 0;
1306   vec_foreach (dev_inst, cmt->cryptodev_inst)
1307     {
1308       u32 dev_id = dev_inst->dev_id;
1309       rte_cryptodev_info_get (dev_id, &dev_info);
1310       u32 driver_id = dev_info.driver_id;
1311       is_drv_unique (driver_id, &unique_drivers);
1312
1313       u32 sess_sz =
1314         rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1315       cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1316     }
1317
1318   cmt->drivers_cnt = vec_len (unique_drivers);
1319   vec_free (unique_drivers);
1320 #endif
1321
1322   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers);
1323   clib_spinlock_init (&cmt->tlock);
1324
1325   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1326                        CLIB_CACHE_LINE_BYTES);
1327   for (i = skip_master; i < tm->n_vlib_mains; i++)
1328     {
1329       cet = cmt->per_thread_data + i;
1330
1331       if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1332           0)
1333         {
1334           error = clib_error_return (0, "Failed to configure cryptodev");
1335           goto err_handling;
1336         }
1337     }
1338
1339   /* register handler */
1340   eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1341                                       "DPDK Cryptodev Engine");
1342
1343   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1344
1345   if (cryptodev_register_raw_hdl)
1346     error = cryptodev_register_raw_hdl (vm, eidx);
1347   else
1348     error = cryptodev_register_cop_hdl (vm, eidx);
1349
1350   if (error)
1351     goto err_handling;
1352
1353   /* this engine is only enabled when cryptodev device(s) are presented in
1354    * startup.conf. Assume it is wanted to be used, turn on async mode here.
1355    */
1356   ipsec_set_async_mode (1);
1357
1358   return 0;
1359
1360 err_handling:
1361   return error;
1362 }