dpdk: code preparation for bumping to DPDK 22.11
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
33
34 #include "cryptodev.h"
35
36 #if CLIB_DEBUG > 0
37 #define always_inline static inline
38 #else
39 #define always_inline static inline __attribute__ ((__always_inline__))
40 #endif
41
42 cryptodev_main_t cryptodev_main;
43
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46                     cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
47                     u32 aad_len)
48 {
49   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50   memset (xform, 0, sizeof (*xform));
51   xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
52   xform->next = 0;
53
54   if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55       key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56       key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
57     {
58       aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
59     }
60   else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
61     {
62       aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
63     }
64   else
65     return -1;
66
67   aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68     RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69   aead_xform->aad_length = aad_len;
70   aead_xform->digest_length = 16;
71   aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72   aead_xform->iv.length = 12;
73   aead_xform->key.data = key->data;
74   aead_xform->key.length = vec_len (key->data);
75
76   return 0;
77 }
78
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81                       cryptodev_op_type_t op_type,
82                       const vnet_crypto_key_t *key)
83 {
84   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85   vnet_crypto_key_t *key_cipher, *key_auth;
86   enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87   enum rte_crypto_auth_algorithm auth_algo = ~0;
88   u32 digest_len = ~0;
89
90   key_cipher = vnet_crypto_get_key (key->index_crypto);
91   key_auth = vnet_crypto_get_key (key->index_integ);
92   if (!key_cipher || !key_auth)
93     return -1;
94
95   if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
96     {
97       xform_cipher = xforms;
98       xform_auth = xforms + 1;
99       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
101     }
102   else
103     {
104       xform_cipher = xforms + 1;
105       xform_auth = xforms;
106       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
108     }
109
110   xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111   xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112   xforms->next = xforms + 1;
113
114   switch (key->async_alg)
115     {
116 #define _(a, b, c, d, e)                                                      \
117   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
118     cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
119     auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
120     digest_len = e;                                                           \
121     break;
122
123       foreach_cryptodev_link_async_alg
124 #undef _
125     default:
126       return -1;
127     }
128
129   xform_cipher->cipher.algo = cipher_algo;
130   xform_cipher->cipher.key.data = key_cipher->data;
131   xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132   xform_cipher->cipher.iv.length = 16;
133   xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
134
135   xform_auth->auth.algo = auth_algo;
136   xform_auth->auth.digest_length = digest_len;
137   xform_auth->auth.key.data = key_auth->data;
138   xform_auth->auth.key.length = vec_len (key_auth->data);
139
140   return 0;
141 }
142
143 static_always_inline void
144 cryptodev_session_del (cryptodev_session_t *sess)
145 {
146   u32 n_devs, i;
147
148   if (sess == NULL)
149     return;
150
151   n_devs = rte_cryptodev_count ();
152
153   for (i = 0; i < n_devs; i++)
154 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
155     if (rte_cryptodev_sym_session_free (i, sess) == 0)
156       break;
157 #else
158     rte_cryptodev_sym_session_clear (i, sess);
159
160   rte_cryptodev_sym_session_free (sess);
161 #endif
162 }
163
164 static int
165 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
166 {
167   cryptodev_main_t *cmt = &cryptodev_main;
168   cryptodev_capability_t *vcap;
169   u32 *s;
170
171   vec_foreach (vcap, cmt->supported_caps)
172     {
173       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
174         continue;
175       if (vcap->cipher.algo != algo)
176         continue;
177       vec_foreach (s, vcap->cipher.key_sizes)
178         if (*s == key_size)
179           return 1;
180     }
181
182   return 0;
183 }
184
185 static int
186 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
187 {
188   cryptodev_main_t *cmt = &cryptodev_main;
189   cryptodev_capability_t *vcap;
190   u32 *s;
191
192   vec_foreach (vcap, cmt->supported_caps)
193     {
194       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
195         continue;
196       if (vcap->auth.algo != algo)
197         continue;
198       vec_foreach (s, vcap->auth.digest_sizes)
199         if (*s == digest_size)
200           return 1;
201     }
202
203   return 0;
204 }
205
206 static_always_inline int
207 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
208                     u32 digest_size, u32 aad_size)
209 {
210   cryptodev_main_t *cmt = &cryptodev_main;
211   cryptodev_capability_t *vcap;
212   u32 *s;
213   u32 key_match = 0, digest_match = 0, aad_match = 0;
214
215   vec_foreach (vcap, cmt->supported_caps)
216     {
217       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
218         continue;
219       if (vcap->aead.algo != algo)
220         continue;
221       vec_foreach (s, vcap->aead.digest_sizes)
222         if (*s == digest_size)
223           {
224             digest_match = 1;
225             break;
226           }
227       vec_foreach (s, vcap->aead.key_sizes)
228         if (*s == key_size)
229           {
230             key_match = 1;
231             break;
232           }
233       vec_foreach (s, vcap->aead.aad_sizes)
234         if (*s == aad_size)
235           {
236             aad_match = 1;
237             break;
238           }
239     }
240
241   if (key_match == 1 && digest_match == 1 && aad_match == 1)
242     return 1;
243
244   return 0;
245 }
246
247 static_always_inline int
248 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
249 {
250   u32 matched = 0;
251
252   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
253     {
254       switch (key->async_alg)
255         {
256 #define _(a, b, c, d, e)                                                      \
257   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
258     if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) &&                    \
259         check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e))                   \
260       return 1;
261           foreach_cryptodev_link_async_alg
262 #undef _
263             default : return 0;
264         }
265       return 0;
266     }
267
268 #define _(a, b, c, d, e, f, g)                                                \
269   if (key->alg == VNET_CRYPTO_ALG_##a)                                        \
270     {                                                                         \
271       if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f))                  \
272         matched++;                                                            \
273     }
274   foreach_vnet_aead_crypto_conversion
275 #undef _
276
277     if (matched < 2) return 0;
278
279   return 1;
280 }
281
282 void
283 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
284                         vnet_crypto_key_index_t idx, u32 aad_len)
285 {
286   cryptodev_main_t *cmt = &cryptodev_main;
287   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
288   cryptodev_key_t *ckey = 0;
289   u32 i;
290
291   vec_validate (cmt->keys, idx);
292   ckey = vec_elt_at_index (cmt->keys, idx);
293
294   if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
295     {
296       if (idx >= vec_len (cmt->keys))
297         return;
298
299       vec_foreach_index (i, cmt->per_numa_data)
300         {
301           if (!ckey->keys)
302             continue;
303           if (!ckey->keys[i])
304             continue;
305           if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
306             {
307               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
308               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
309
310               CLIB_MEMORY_STORE_BARRIER ();
311               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
312               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
313             }
314         }
315       return;
316     }
317
318   /* create key */
319
320   /* do not create session for unsupported alg */
321   if (cryptodev_check_supported_vnet_alg (key) == 0)
322     return;
323
324   vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
325   vec_foreach_index (i, ckey->keys)
326     vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
327 }
328
329 /*static*/ void
330 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
331                        vnet_crypto_key_index_t idx)
332 {
333   cryptodev_sess_handler (vm, kop, idx, 8);
334 }
335
336 clib_error_t *
337 allocate_session_pools (u32 numa_node,
338                         cryptodev_session_pool_t *sess_pools_elt, u32 len)
339 {
340   cryptodev_main_t *cmt = &cryptodev_main;
341   u8 *name;
342   clib_error_t *error = NULL;
343
344   name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
345 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
346   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
347     (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
348 #else
349   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
350     (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
351 #endif
352
353   if (!sess_pools_elt->sess_pool)
354     {
355       error = clib_error_return (0, "Not enough memory for mp %s", name);
356       goto clear_mempools;
357     }
358   vec_free (name);
359
360 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
361   name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
362   sess_pools_elt->sess_priv_pool = rte_mempool_create (
363     (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
364     0, NULL, NULL, NULL, NULL, numa_node, 0);
365
366   if (!sess_pools_elt->sess_priv_pool)
367     {
368       error = clib_error_return (0, "Not enough memory for mp %s", name);
369       goto clear_mempools;
370     }
371   vec_free (name);
372 #endif
373
374 clear_mempools:
375   if (error)
376     {
377       vec_free (name);
378       if (sess_pools_elt->sess_pool)
379         rte_mempool_free (sess_pools_elt->sess_pool);
380 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
381       if (sess_pools_elt->sess_priv_pool)
382         rte_mempool_free (sess_pools_elt->sess_priv_pool);
383 #endif
384       return error;
385     }
386   return 0;
387 }
388
389 int
390 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
391                           u32 aad_len)
392 {
393   cryptodev_main_t *cmt = &cryptodev_main;
394   cryptodev_numa_data_t *numa_data;
395   cryptodev_inst_t *dev_inst;
396   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
397   struct rte_mempool *sess_pool;
398   cryptodev_session_pool_t *sess_pools_elt;
399   cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
400   struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
401   struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
402   cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
403 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
404   struct rte_mempool *sess_priv_pool;
405   struct rte_cryptodev_info dev_info;
406 #endif
407   u32 numa_node = vm->numa_node;
408   clib_error_t *error;
409   int ret = 0;
410   u8 found = 0;
411
412   numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
413
414   clib_spinlock_lock (&cmt->tlock);
415   vec_foreach (sess_pools_elt, numa_data->sess_pools)
416     {
417       if (sess_pools_elt->sess_pool == NULL)
418         {
419           error = allocate_session_pools (numa_node, sess_pools_elt,
420                                           vec_len (numa_data->sess_pools) - 1);
421           if (error)
422             {
423               ret = -1;
424               goto clear_key;
425             }
426         }
427       if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
428         {
429           found = 1;
430           break;
431         }
432     }
433
434   if (found == 0)
435     {
436       vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
437       error = allocate_session_pools (numa_node, sess_pools_elt,
438                                       vec_len (numa_data->sess_pools) - 1);
439       if (error)
440         {
441           ret = -1;
442           goto clear_key;
443         }
444     }
445
446   sess_pool = sess_pools_elt->sess_pool;
447 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
448   sess_priv_pool = sess_pools_elt->sess_priv_pool;
449
450   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
451     rte_cryptodev_sym_session_create (sess_pool);
452
453   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
454     rte_cryptodev_sym_session_create (sess_pool);
455 #endif
456
457   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
458     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
459   else
460     ret =
461       prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
462   if (ret)
463     {
464       ret = -1;
465       goto clear_key;
466     }
467
468   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
469     prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
470   else
471     prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
472
473 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
474   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
475   u32 dev_id = dev_inst->dev_id;
476   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
477     rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
478   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
479     rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
480   if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
481       !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
482     {
483       ret = -1;
484       goto clear_key;
485     }
486
487   rte_cryptodev_sym_session_opaque_data_set (
488     sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
489   rte_cryptodev_sym_session_opaque_data_set (
490     sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
491 #else
492   vec_foreach (dev_inst, cmt->cryptodev_inst)
493     {
494       u32 dev_id = dev_inst->dev_id;
495       rte_cryptodev_info_get (dev_id, &dev_info);
496       u32 driver_id = dev_info.driver_id;
497
498       /* if the session is already configured for the driver type, avoid
499          configuring it again to increase the session data's refcnt */
500       if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
501           sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
502         continue;
503
504       ret = rte_cryptodev_sym_session_init (
505         dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
506         sess_priv_pool);
507       ret = rte_cryptodev_sym_session_init (
508         dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
509         sess_priv_pool);
510       if (ret < 0)
511         goto clear_key;
512     }
513
514   sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
515   sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
516 #endif
517
518   CLIB_MEMORY_STORE_BARRIER ();
519   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
520     sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
521   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
522     sessions[CRYPTODEV_OP_TYPE_DECRYPT];
523
524 clear_key:
525   if (ret != 0)
526     {
527       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
528       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
529     }
530   clib_spinlock_unlock (&cmt->tlock);
531   return ret;
532 }
533
534 typedef enum
535 {
536   CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
537   CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
538 } cryptodev_resource_assign_op_t;
539
540 /**
541  *  assign a cryptodev resource to a worker.
542  *  @param cet: the worker thread data
543  *  @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
544  *  @param op: the assignment method.
545  *  @return: 0 if successfully, negative number otherwise.
546  **/
547 static_always_inline int
548 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
549                            u32 cryptodev_inst_index,
550                            cryptodev_resource_assign_op_t op)
551 {
552   cryptodev_main_t *cmt = &cryptodev_main;
553   cryptodev_inst_t *cinst = 0;
554   uword idx;
555
556   /* assign resource is only allowed when no inflight op is in the queue */
557   if (cet->inflight)
558     return -EBUSY;
559
560   switch (op)
561     {
562     case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
563       if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
564           vec_len (cmt->cryptodev_inst))
565         return -1;
566
567       clib_spinlock_lock (&cmt->tlock);
568       idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
569       clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
570       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
571       cet->cryptodev_id = cinst->dev_id;
572       cet->cryptodev_q = cinst->q_id;
573       clib_spinlock_unlock (&cmt->tlock);
574       break;
575     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
576       /* assigning a used cryptodev resource is not allowed */
577       if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
578           == 1)
579         return -EBUSY;
580
581       vec_foreach_index (idx, cmt->cryptodev_inst)
582       {
583         cinst = cmt->cryptodev_inst + idx;
584         if (cinst->dev_id == cet->cryptodev_id &&
585             cinst->q_id == cet->cryptodev_q)
586           break;
587       }
588       /* invalid existing worker resource assignment */
589       if (idx == vec_len (cmt->cryptodev_inst))
590         return -EINVAL;
591       clib_spinlock_lock (&cmt->tlock);
592       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
593       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
594                                 cryptodev_inst_index, 1);
595       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
596       cet->cryptodev_id = cinst->dev_id;
597       cet->cryptodev_q = cinst->q_id;
598       clib_spinlock_unlock (&cmt->tlock);
599       break;
600     default:
601       return -EINVAL;
602     }
603   return 0;
604 }
605
606 static u8 *
607 format_cryptodev_inst (u8 * s, va_list * args)
608 {
609   cryptodev_main_t *cmt = &cryptodev_main;
610   u32 inst = va_arg (*args, u32);
611   cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
612   u32 thread_index = 0;
613   struct rte_cryptodev_info info;
614
615   rte_cryptodev_info_get (cit->dev_id, &info);
616   s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
617
618   vec_foreach_index (thread_index, cmt->per_thread_data)
619   {
620     cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
621     if (vlib_num_workers () > 0 && thread_index == 0)
622       continue;
623
624     if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
625       {
626         s = format (s, "%u (%v)\n", thread_index,
627                     vlib_worker_threads[thread_index].name);
628         break;
629       }
630   }
631
632   if (thread_index == vec_len (cmt->per_thread_data))
633     s = format (s, "%s\n", "free");
634
635   return s;
636 }
637
638 static clib_error_t *
639 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
640                               vlib_cli_command_t * cmd)
641 {
642   cryptodev_main_t *cmt = &cryptodev_main;
643   u32 inst;
644
645   vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
646                    "Assigned-to");
647   if (vec_len (cmt->cryptodev_inst) == 0)
648     {
649       vlib_cli_output (vm, "(nil)\n");
650       return 0;
651     }
652
653   vec_foreach_index (inst, cmt->cryptodev_inst)
654     vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
655
656   if (cmt->is_raw_api)
657     vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
658   else
659     vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
660   return 0;
661 }
662
663 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
664     .path = "show cryptodev assignment",
665     .short_help = "show cryptodev assignment",
666     .function = cryptodev_show_assignment_fn,
667 };
668
669 static clib_error_t *
670 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
671                              vlib_cli_command_t * cmd)
672 {
673   cryptodev_main_t *cmt = &cryptodev_main;
674   cryptodev_engine_thread_t *cet;
675   unformat_input_t _line_input, *line_input = &_line_input;
676   u32 thread_index, inst_index;
677   u32 thread_present = 0, inst_present = 0;
678   clib_error_t *error = 0;
679   int ret;
680
681   /* Get a line of input. */
682   if (!unformat_user (input, unformat_line_input, line_input))
683     return 0;
684
685   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
686     {
687       if (unformat (line_input, "thread %u", &thread_index))
688         thread_present = 1;
689       else if (unformat (line_input, "resource %u", &inst_index))
690         inst_present = 1;
691       else
692         {
693           error = clib_error_return (0, "unknown input `%U'",
694                                      format_unformat_error, line_input);
695           return error;
696         }
697     }
698
699   if (!thread_present || !inst_present)
700     {
701       error = clib_error_return (0, "mandatory argument(s) missing");
702       return error;
703     }
704
705   if (thread_index == 0 && vlib_num_workers () > 0)
706     {
707       error =
708         clib_error_return (0, "assign crypto resource for master thread");
709       return error;
710     }
711
712   if (thread_index > vec_len (cmt->per_thread_data) ||
713       inst_index > vec_len (cmt->cryptodev_inst))
714     {
715       error = clib_error_return (0, "wrong thread id or resource id");
716       return error;
717     }
718
719   cet = cmt->per_thread_data + thread_index;
720   ret = cryptodev_assign_resource (cet, inst_index,
721                                    CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
722   if (ret)
723     {
724       error =
725         clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
726       return error;
727     }
728
729   return 0;
730 }
731
732 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
733     .path = "set cryptodev assignment",
734     .short_help = "set cryptodev assignment thread <thread_index> "
735         "resource <inst_index>",
736     .function = cryptodev_set_assignment_fn,
737 };
738
739 static u32
740 cryptodev_count_queue (u32 numa)
741 {
742   struct rte_cryptodev_info info;
743   u32 n_cryptodev = rte_cryptodev_count ();
744   u32 i, q_count = 0;
745
746   for (i = 0; i < n_cryptodev; i++)
747     {
748       rte_cryptodev_info_get (i, &info);
749       q_count += info.max_nb_queue_pairs;
750     }
751
752   return q_count;
753 }
754
755 static int
756 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
757 {
758   struct rte_cryptodev_config cfg;
759   struct rte_cryptodev_info info;
760   cryptodev_main_t *cmt = &cryptodev_main;
761   u32 i;
762   int ret;
763
764   rte_cryptodev_info_get (cryptodev_id, &info);
765
766   /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
767      anymore. Only devices that have the same driver type as the first
768      initialized device can be initialized.
769    */
770 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
771   if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
772     return -1;
773 #endif
774
775   if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
776     return -1;
777
778   cfg.socket_id = info.device->numa_node;
779   cfg.nb_queue_pairs = info.max_nb_queue_pairs;
780
781   rte_cryptodev_configure (cryptodev_id, &cfg);
782
783   for (i = 0; i < info.max_nb_queue_pairs; i++)
784     {
785       struct rte_cryptodev_qp_conf qp_cfg;
786
787       qp_cfg.mp_session = 0;
788 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
789       qp_cfg.mp_session_private = 0;
790 #endif
791       qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
792
793       ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
794                                             info.device->numa_node);
795       if (ret)
796         {
797           clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
798                         cryptodev_id, i, ret);
799           break;
800         }
801     }
802
803   if (i != info.max_nb_queue_pairs)
804     return -1;
805
806   /* start the device */
807   rte_cryptodev_start (cryptodev_id);
808
809 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
810   if (cmt->drivers_cnt == 0)
811     {
812       cmt->drivers_cnt = 1;
813       cmt->driver_id = info.driver_id;
814       cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
815     }
816 #endif
817
818   for (i = 0; i < info.max_nb_queue_pairs; i++)
819     {
820       cryptodev_inst_t *cdev_inst;
821 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
822       const char *dev_name = rte_dev_name (info.device);
823 #else
824       const char *dev_name = info.device->name;
825 #endif
826       vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
827       cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
828       cdev_inst->dev_id = cryptodev_id;
829       cdev_inst->q_id = i;
830
831       snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
832                 info.device->name, i);
833     }
834
835   return 0;
836 }
837
838 static int
839 cryptodev_cmp (void *v1, void *v2)
840 {
841   cryptodev_inst_t *a1 = v1;
842   cryptodev_inst_t *a2 = v2;
843
844   if (a1->q_id > a2->q_id)
845     return 1;
846   if (a1->q_id < a2->q_id)
847     return -1;
848   return 0;
849 }
850
851 static int
852 cryptodev_supports_param_value (u32 *params, u32 param_value)
853 {
854   u32 *value;
855   vec_foreach (value, params)
856     {
857       if (*value == param_value)
858         return 1;
859     }
860   return 0;
861 }
862
863 int
864 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
865                              u32 key_size, u32 digest_size, u32 aad_size)
866 {
867   cryptodev_main_t *cmt = &cryptodev_main;
868   cryptodev_capability_t *cap;
869   vec_foreach (cap, cmt->supported_caps)
870     {
871
872       if (cap->xform_type != idx->type)
873         continue;
874
875       if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
876           cap->auth.algo == idx->algo.auth &&
877           cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
878         return 1;
879
880       if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
881           cap->cipher.algo == idx->algo.cipher &&
882           cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
883         return 1;
884
885       if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
886           cap->aead.algo == idx->algo.aead &&
887           cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
888           cryptodev_supports_param_value (cap->aead.digest_sizes,
889                                           digest_size) &&
890           cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
891         return 1;
892     }
893   return 0;
894 }
895
896 static void
897 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
898                                u32 param_size_max, u32 increment)
899 {
900   u32 i = 0;
901   u32 cap_param_size;
902
903   while (i < vec_len (*param_sizes))
904     {
905       u32 found_param = 0;
906       for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
907            cap_param_size += increment)
908         {
909           if ((*param_sizes)[i] == cap_param_size)
910             {
911               found_param = 1;
912               break;
913             }
914           if (increment == 0)
915             break;
916         }
917       if (!found_param)
918         /* no such param_size in cap so delete  this size in temp_cap params */
919         vec_delete (*param_sizes, 1, i);
920       else
921         i++;
922     }
923 }
924
925 static void
926 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
927 {
928   cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
929
930   switch (temp_cap.xform_type)
931     {
932     case RTE_CRYPTO_SYM_XFORM_AUTH:
933       vec_free (temp_cap.auth.digest_sizes);
934       break;
935     case RTE_CRYPTO_SYM_XFORM_CIPHER:
936       vec_free (temp_cap.cipher.key_sizes);
937       break;
938     case RTE_CRYPTO_SYM_XFORM_AEAD:
939       vec_free (temp_cap.aead.key_sizes);
940       vec_free (temp_cap.aead.aad_sizes);
941       vec_free (temp_cap.aead.digest_sizes);
942       break;
943     default:
944       break;
945     }
946   vec_delete (*temp_caps, 1, temp_cap_id);
947 }
948
949 static u32
950 cryptodev_remove_unsupported_param_sizes (
951   cryptodev_capability_t *temp_cap,
952   const struct rte_cryptodev_capabilities *dev_caps)
953 {
954   u32 cap_found = 0;
955   const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
956
957   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
958     {
959       if (cap->sym.xform_type == temp_cap->xform_type)
960         switch (cap->sym.xform_type)
961           {
962           case RTE_CRYPTO_SYM_XFORM_CIPHER:
963             if (cap->sym.cipher.algo == temp_cap->cipher.algo)
964               {
965                 remove_unsupported_param_size (
966                   &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
967                   cap->sym.cipher.key_size.max,
968                   cap->sym.cipher.key_size.increment);
969                 if (vec_len (temp_cap->cipher.key_sizes) > 0)
970                   cap_found = 1;
971               }
972             break;
973           case RTE_CRYPTO_SYM_XFORM_AUTH:
974             if (cap->sym.auth.algo == temp_cap->auth.algo)
975               {
976                 remove_unsupported_param_size (
977                   &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
978                   cap->sym.auth.digest_size.max,
979                   cap->sym.auth.digest_size.increment);
980                 if (vec_len (temp_cap->auth.digest_sizes) > 0)
981                   cap_found = 1;
982               }
983             break;
984           case RTE_CRYPTO_SYM_XFORM_AEAD:
985             if (cap->sym.aead.algo == temp_cap->aead.algo)
986               {
987                 remove_unsupported_param_size (
988                   &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
989                   cap->sym.aead.key_size.max,
990                   cap->sym.aead.key_size.increment);
991                 remove_unsupported_param_size (
992                   &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
993                   cap->sym.aead.aad_size.max,
994                   cap->sym.aead.aad_size.increment);
995                 remove_unsupported_param_size (
996                   &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
997                   cap->sym.aead.digest_size.max,
998                   cap->sym.aead.digest_size.increment);
999                 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
1000                     vec_len (temp_cap->aead.aad_sizes) > 0 &&
1001                     vec_len (temp_cap->aead.digest_sizes) > 0)
1002                   cap_found = 1;
1003               }
1004             break;
1005           default:
1006             break;
1007           }
1008       if (cap_found)
1009         break;
1010       cap++;
1011     }
1012
1013   return cap_found;
1014 }
1015
1016 static void
1017 cryptodev_get_common_capabilities ()
1018 {
1019   cryptodev_main_t *cmt = &cryptodev_main;
1020   cryptodev_inst_t *dev_inst;
1021   struct rte_cryptodev_info dev_info;
1022   u32 previous_dev_id, dev_id;
1023   u32 cap_id = 0;
1024   u32 param;
1025   cryptodev_capability_t tmp_cap;
1026   const struct rte_cryptodev_capabilities *cap;
1027   const struct rte_cryptodev_capabilities *dev_caps;
1028
1029   clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
1030   if (vec_len (cmt->cryptodev_inst) == 0)
1031     return;
1032   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
1033   rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
1034   cap = &dev_info.capabilities[0];
1035
1036   /*init capabilities vector*/
1037   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
1038     {
1039       if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1040         {
1041           cap++;
1042           continue;
1043         }
1044
1045       tmp_cap.xform_type = cap->sym.xform_type;
1046       switch (cap->sym.xform_type)
1047         {
1048         case RTE_CRYPTO_SYM_XFORM_CIPHER:
1049           tmp_cap.cipher.key_sizes = 0;
1050           tmp_cap.cipher.algo = cap->sym.cipher.algo;
1051           for (param = cap->sym.cipher.key_size.min;
1052                param <= cap->sym.cipher.key_size.max;
1053                param += cap->sym.cipher.key_size.increment)
1054             {
1055               vec_add1 (tmp_cap.cipher.key_sizes, param);
1056               if (cap->sym.cipher.key_size.increment == 0)
1057                 break;
1058             }
1059           break;
1060         case RTE_CRYPTO_SYM_XFORM_AUTH:
1061           tmp_cap.auth.algo = cap->sym.auth.algo;
1062           tmp_cap.auth.digest_sizes = 0;
1063           for (param = cap->sym.auth.digest_size.min;
1064                param <= cap->sym.auth.digest_size.max;
1065                param += cap->sym.auth.digest_size.increment)
1066             {
1067               vec_add1 (tmp_cap.auth.digest_sizes, param);
1068               if (cap->sym.auth.digest_size.increment == 0)
1069                 break;
1070             }
1071           break;
1072         case RTE_CRYPTO_SYM_XFORM_AEAD:
1073           tmp_cap.aead.key_sizes = 0;
1074           tmp_cap.aead.aad_sizes = 0;
1075           tmp_cap.aead.digest_sizes = 0;
1076           tmp_cap.aead.algo = cap->sym.aead.algo;
1077           for (param = cap->sym.aead.key_size.min;
1078                param <= cap->sym.aead.key_size.max;
1079                param += cap->sym.aead.key_size.increment)
1080             {
1081               vec_add1 (tmp_cap.aead.key_sizes, param);
1082               if (cap->sym.aead.key_size.increment == 0)
1083                 break;
1084             }
1085           for (param = cap->sym.aead.aad_size.min;
1086                param <= cap->sym.aead.aad_size.max;
1087                param += cap->sym.aead.aad_size.increment)
1088             {
1089               vec_add1 (tmp_cap.aead.aad_sizes, param);
1090               if (cap->sym.aead.aad_size.increment == 0)
1091                 break;
1092             }
1093           for (param = cap->sym.aead.digest_size.min;
1094                param <= cap->sym.aead.digest_size.max;
1095                param += cap->sym.aead.digest_size.increment)
1096             {
1097               vec_add1 (tmp_cap.aead.digest_sizes, param);
1098               if (cap->sym.aead.digest_size.increment == 0)
1099                 break;
1100             }
1101           break;
1102         default:
1103           break;
1104         }
1105
1106       vec_add1 (cmt->supported_caps, tmp_cap);
1107       cap++;
1108     }
1109
1110   while (cap_id < vec_len (cmt->supported_caps))
1111     {
1112       u32 cap_is_supported = 1;
1113       previous_dev_id = cmt->cryptodev_inst->dev_id;
1114
1115       vec_foreach (dev_inst, cmt->cryptodev_inst)
1116         {
1117           dev_id = dev_inst->dev_id;
1118           if (previous_dev_id != dev_id)
1119             {
1120               previous_dev_id = dev_id;
1121               rte_cryptodev_info_get (dev_id, &dev_info);
1122               dev_caps = &dev_info.capabilities[0];
1123               cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1124                 &cmt->supported_caps[cap_id], dev_caps);
1125               if (!cap_is_supported)
1126                 {
1127                   cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1128                   /*no need to check other devices as this one doesn't support
1129                    * this temp_cap*/
1130                   break;
1131                 }
1132             }
1133         }
1134       if (cap_is_supported)
1135         cap_id++;
1136     }
1137 }
1138
1139 static int
1140 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1141 {
1142   cryptodev_main_t *cmt = &cryptodev_main;
1143   u32 n_queues = cryptodev_count_queue (vm->numa_node);
1144   u32 i;
1145
1146   if (n_queues < n_workers)
1147     return -1;
1148
1149   for (i = 0; i < rte_cryptodev_count (); i++)
1150     cryptodev_configure (vm, i);
1151
1152   if (vec_len (cmt->cryptodev_inst) == 0)
1153     return -1;
1154   cryptodev_get_common_capabilities ();
1155   vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1156
1157   /* if there is not enough device stop cryptodev */
1158   if (vec_len (cmt->cryptodev_inst) < n_workers)
1159     return -1;
1160
1161   return 0;
1162 }
1163
1164 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1165 static void
1166 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1167 {
1168   u32 *unique_elt;
1169   u8 found = 0;
1170
1171   vec_foreach (unique_elt, *unique_drivers)
1172     {
1173       if (*unique_elt == driver_id)
1174         {
1175           found = 1;
1176           break;
1177         }
1178     }
1179
1180   if (!found)
1181     vec_add1 (*unique_drivers, driver_id);
1182 }
1183 #endif
1184
1185 clib_error_t *
1186 dpdk_cryptodev_init (vlib_main_t * vm)
1187 {
1188   cryptodev_main_t *cmt = &cryptodev_main;
1189   vlib_thread_main_t *tm = vlib_get_thread_main ();
1190   cryptodev_engine_thread_t *cet;
1191   cryptodev_numa_data_t *numa_data;
1192   u32 node;
1193   u8 nodes = 0;
1194   u32 skip_master = vlib_num_workers () > 0;
1195   u32 n_workers = tm->n_vlib_mains - skip_master;
1196   u32 eidx;
1197   u32 i;
1198   clib_error_t *error;
1199
1200   cmt->iova_mode = rte_eal_iova_mode ();
1201
1202   clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1203     {
1204       if (node >= nodes)
1205         nodes = node;
1206     }
1207
1208   vec_validate (cmt->per_numa_data, nodes);
1209   vec_foreach (numa_data, cmt->per_numa_data)
1210     {
1211       vec_validate (numa_data->sess_pools, 0);
1212     }
1213
1214   /* probe all cryptodev devices and get queue info */
1215   if (cryptodev_probe (vm, n_workers) < 0)
1216     return 0;
1217
1218 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1219   struct rte_cryptodev_info dev_info;
1220   cryptodev_inst_t *dev_inst;
1221   u32 *unique_drivers = 0;
1222   vec_foreach (dev_inst, cmt->cryptodev_inst)
1223     {
1224       u32 dev_id = dev_inst->dev_id;
1225       rte_cryptodev_info_get (dev_id, &dev_info);
1226       u32 driver_id = dev_info.driver_id;
1227       is_drv_unique (driver_id, &unique_drivers);
1228
1229       u32 sess_sz =
1230         rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1231       cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1232     }
1233
1234   cmt->drivers_cnt = vec_len (unique_drivers);
1235   vec_free (unique_drivers);
1236 #endif
1237
1238   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1239   clib_spinlock_init (&cmt->tlock);
1240
1241   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1242                        CLIB_CACHE_LINE_BYTES);
1243   for (i = skip_master; i < tm->n_vlib_mains; i++)
1244     {
1245       cet = cmt->per_thread_data + i;
1246
1247       if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1248           0)
1249         {
1250           error = clib_error_return (0, "Failed to configure cryptodev");
1251           goto err_handling;
1252         }
1253     }
1254
1255   /* register handler */
1256   eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1257                                       "DPDK Cryptodev Engine");
1258
1259   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1260
1261   if (cryptodev_register_raw_hdl)
1262     error = cryptodev_register_raw_hdl (vm, eidx);
1263   else
1264     error = cryptodev_register_cop_hdl (vm, eidx);
1265
1266   if (error)
1267     goto err_handling;
1268
1269   /* this engine is only enabled when cryptodev device(s) are presented in
1270    * startup.conf. Assume it is wanted to be used, turn on async mode here.
1271    */
1272   ipsec_set_async_mode (1);
1273
1274   return 0;
1275
1276 err_handling:
1277   return error;
1278 }