dpdk: bump to DPDK v21.11
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
33
34 #include "cryptodev.h"
35
36 #if CLIB_DEBUG > 0
37 #define always_inline static inline
38 #else
39 #define always_inline static inline __attribute__ ((__always_inline__))
40 #endif
41
42 cryptodev_main_t cryptodev_main;
43
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46                     cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
47                     u32 aad_len)
48 {
49   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50   memset (xform, 0, sizeof (*xform));
51   xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
52   xform->next = 0;
53
54   if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
55       key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
56       key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
57     return -1;
58
59   aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
60   aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
61     RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
62   aead_xform->aad_length = aad_len;
63   aead_xform->digest_length = 16;
64   aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
65   aead_xform->iv.length = 12;
66   aead_xform->key.data = key->data;
67   aead_xform->key.length = vec_len (key->data);
68
69   return 0;
70 }
71
72 static_always_inline int
73 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
74                       cryptodev_op_type_t op_type,
75                       const vnet_crypto_key_t *key)
76 {
77   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
78   vnet_crypto_key_t *key_cipher, *key_auth;
79   enum rte_crypto_cipher_algorithm cipher_algo = ~0;
80   enum rte_crypto_auth_algorithm auth_algo = ~0;
81   u32 digest_len = ~0;
82
83   key_cipher = vnet_crypto_get_key (key->index_crypto);
84   key_auth = vnet_crypto_get_key (key->index_integ);
85   if (!key_cipher || !key_auth)
86     return -1;
87
88   if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
89     {
90       xform_cipher = xforms;
91       xform_auth = xforms + 1;
92       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
93       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
94     }
95   else
96     {
97       xform_cipher = xforms + 1;
98       xform_auth = xforms;
99       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
100       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
101     }
102
103   xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
104   xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
105   xforms->next = xforms + 1;
106
107   switch (key->async_alg)
108     {
109 #define _(a, b, c, d, e)                                                      \
110   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
111     cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
112     auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
113     digest_len = e;                                                           \
114     break;
115
116       foreach_cryptodev_link_async_alg
117 #undef _
118     default:
119       return -1;
120     }
121
122   xform_cipher->cipher.algo = cipher_algo;
123   xform_cipher->cipher.key.data = key_cipher->data;
124   xform_cipher->cipher.key.length = vec_len (key_cipher->data);
125   xform_cipher->cipher.iv.length = 16;
126   xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
127
128   xform_auth->auth.algo = auth_algo;
129   xform_auth->auth.digest_length = digest_len;
130   xform_auth->auth.key.data = key_auth->data;
131   xform_auth->auth.key.length = vec_len (key_auth->data);
132
133   return 0;
134 }
135
136 static_always_inline void
137 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
138 {
139   u32 n_devs, i;
140
141   if (sess == NULL)
142     return;
143
144   n_devs = rte_cryptodev_count ();
145
146   for (i = 0; i < n_devs; i++)
147     rte_cryptodev_sym_session_clear (i, sess);
148
149   rte_cryptodev_sym_session_free (sess);
150 }
151
152 static int
153 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
154 {
155   cryptodev_main_t *cmt = &cryptodev_main;
156   cryptodev_capability_t *vcap;
157   u32 *s;
158
159   vec_foreach (vcap, cmt->supported_caps)
160     {
161       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
162         continue;
163       if (vcap->cipher.algo != algo)
164         continue;
165       vec_foreach (s, vcap->cipher.key_sizes)
166         if (*s == key_size)
167           return 1;
168     }
169
170   return 0;
171 }
172
173 static int
174 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
175 {
176   cryptodev_main_t *cmt = &cryptodev_main;
177   cryptodev_capability_t *vcap;
178   u32 *s;
179
180   vec_foreach (vcap, cmt->supported_caps)
181     {
182       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
183         continue;
184       if (vcap->auth.algo != algo)
185         continue;
186       vec_foreach (s, vcap->auth.digest_sizes)
187         if (*s == digest_size)
188           return 1;
189     }
190
191   return 0;
192 }
193
194 static_always_inline int
195 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
196                     u32 digest_size, u32 aad_size)
197 {
198   cryptodev_main_t *cmt = &cryptodev_main;
199   cryptodev_capability_t *vcap;
200   u32 *s;
201   u32 key_match = 0, digest_match = 0, aad_match = 0;
202
203   vec_foreach (vcap, cmt->supported_caps)
204     {
205       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
206         continue;
207       if (vcap->aead.algo != algo)
208         continue;
209       vec_foreach (s, vcap->aead.digest_sizes)
210         if (*s == digest_size)
211           {
212             digest_match = 1;
213             break;
214           }
215       vec_foreach (s, vcap->aead.key_sizes)
216         if (*s == key_size)
217           {
218             key_match = 1;
219             break;
220           }
221       vec_foreach (s, vcap->aead.aad_sizes)
222         if (*s == aad_size)
223           {
224             aad_match = 1;
225             break;
226           }
227     }
228
229   if (key_match == 1 && digest_match == 1 && aad_match == 1)
230     return 1;
231
232   return 0;
233 }
234
235 static_always_inline int
236 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
237 {
238   u32 matched = 0;
239
240   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
241     {
242       switch (key->async_alg)
243         {
244 #define _(a, b, c, d, e)                                                      \
245   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
246     if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) &&                    \
247         check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e))                   \
248       return 1;
249           foreach_cryptodev_link_async_alg
250 #undef _
251             default : return 0;
252         }
253       return 0;
254     }
255
256 #define _(a, b, c, d, e, f, g)                                                \
257   if (key->alg == VNET_CRYPTO_ALG_##a)                                        \
258     {                                                                         \
259       if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f))                  \
260         matched++;                                                            \
261     }
262   foreach_vnet_aead_crypto_conversion
263 #undef _
264
265     if (matched < 2) return 0;
266
267   return 1;
268 }
269
270 void
271 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
272                         vnet_crypto_key_index_t idx, u32 aad_len)
273 {
274   cryptodev_main_t *cmt = &cryptodev_main;
275   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
276   cryptodev_key_t *ckey = 0;
277   u32 i;
278
279   vec_validate (cmt->keys, idx);
280   ckey = vec_elt_at_index (cmt->keys, idx);
281
282   if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
283     {
284       if (idx >= vec_len (cmt->keys))
285         return;
286
287       vec_foreach_index (i, cmt->per_numa_data)
288         {
289           if (!ckey->keys)
290             continue;
291           if (!ckey->keys[i])
292             continue;
293           if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
294             {
295               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
296               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
297
298               CLIB_MEMORY_STORE_BARRIER ();
299               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
300               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
301             }
302         }
303       return;
304     }
305
306   /* create key */
307
308   /* do not create session for unsupported alg */
309   if (cryptodev_check_supported_vnet_alg (key) == 0)
310     return;
311
312   vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
313   vec_foreach_index (i, ckey->keys)
314     vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
315 }
316
317 /*static*/ void
318 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
319                        vnet_crypto_key_index_t idx)
320 {
321   cryptodev_sess_handler (vm, kop, idx, 8);
322 }
323
324 clib_error_t *
325 allocate_session_pools (u32 numa_node,
326                         cryptodev_session_pool_t *sess_pools_elt, u32 len)
327 {
328   cryptodev_main_t *cmt = &cryptodev_main;
329   u8 *name;
330   clib_error_t *error = NULL;
331
332   name = format (0, "vcryptodev_sess_pool_%u_%c", numa_node, len);
333   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
334     (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
335
336   if (!sess_pools_elt->sess_pool)
337     {
338       error = clib_error_return (0, "Not enough memory for mp %s", name);
339       goto clear_mempools;
340     }
341   vec_free (name);
342
343   name = format (0, "cryptodev_sess_pool_%u_%c", numa_node, len);
344   sess_pools_elt->sess_priv_pool = rte_mempool_create (
345     (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
346     0, NULL, NULL, NULL, NULL, numa_node, 0);
347
348   if (!sess_pools_elt->sess_priv_pool)
349     {
350       error = clib_error_return (0, "Not enough memory for mp %s", name);
351       goto clear_mempools;
352     }
353   vec_free (name);
354
355 clear_mempools:
356   if (error)
357     {
358       vec_free (name);
359       if (sess_pools_elt->sess_pool)
360         rte_mempool_free (sess_pools_elt->sess_pool);
361       if (sess_pools_elt->sess_priv_pool)
362         rte_mempool_free (sess_pools_elt->sess_priv_pool);
363       return error;
364     }
365   return 0;
366 }
367
368 int
369 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
370                           u32 aad_len)
371 {
372   cryptodev_main_t *cmt = &cryptodev_main;
373   cryptodev_numa_data_t *numa_data;
374   cryptodev_inst_t *dev_inst;
375   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
376   struct rte_mempool *sess_pool, *sess_priv_pool;
377   cryptodev_session_pool_t *sess_pools_elt;
378   cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
379   struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
380   struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
381   struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
382   struct rte_cryptodev_info dev_info;
383   u32 numa_node = vm->numa_node;
384   clib_error_t *error;
385   int ret = 0;
386   u8 found = 0;
387
388   numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
389
390   clib_spinlock_lock (&cmt->tlock);
391   vec_foreach (sess_pools_elt, numa_data->sess_pools)
392     {
393       if (sess_pools_elt->sess_pool == NULL)
394         {
395           error = allocate_session_pools (numa_node, sess_pools_elt,
396                                           vec_len (numa_data->sess_pools) - 1);
397           if (error)
398             {
399               ret = -1;
400               goto clear_key;
401             }
402         }
403       if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
404         {
405           found = 1;
406           break;
407         }
408     }
409
410   if (found == 0)
411     {
412       vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
413       error = allocate_session_pools (numa_node, sess_pools_elt,
414                                       vec_len (numa_data->sess_pools) - 1);
415       if (error)
416         {
417           ret = -1;
418           goto clear_key;
419         }
420     }
421
422   sess_pool = sess_pools_elt->sess_pool;
423   sess_priv_pool = sess_pools_elt->sess_priv_pool;
424
425   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
426     rte_cryptodev_sym_session_create (sess_pool);
427
428   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
429     rte_cryptodev_sym_session_create (sess_pool);
430
431   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
432     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
433   else
434     ret =
435       prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
436   if (ret)
437     {
438       ret = -1;
439       goto clear_key;
440     }
441
442   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
443     prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
444   else
445     prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
446
447   vec_foreach (dev_inst, cmt->cryptodev_inst)
448     {
449       u32 dev_id = dev_inst->dev_id;
450       rte_cryptodev_info_get (dev_id, &dev_info);
451       u32 driver_id = dev_info.driver_id;
452
453       /* if the session is already configured for the driver type, avoid
454          configuring it again to increase the session data's refcnt */
455       if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
456           sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
457         continue;
458
459       ret = rte_cryptodev_sym_session_init (
460         dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
461         sess_priv_pool);
462       ret = rte_cryptodev_sym_session_init (
463         dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
464         sess_priv_pool);
465       if (ret < 0)
466         goto clear_key;
467     }
468
469   sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
470   sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
471
472   CLIB_MEMORY_STORE_BARRIER ();
473   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
474     sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
475   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
476     sessions[CRYPTODEV_OP_TYPE_DECRYPT];
477
478 clear_key:
479   if (ret != 0)
480     {
481       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
482       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
483     }
484   clib_spinlock_unlock (&cmt->tlock);
485   return ret;
486 }
487
488 typedef enum
489 {
490   CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
491   CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
492 } cryptodev_resource_assign_op_t;
493
494 /**
495  *  assign a cryptodev resource to a worker.
496  *  @param cet: the worker thread data
497  *  @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
498  *  @param op: the assignment method.
499  *  @return: 0 if successfully, negative number otherwise.
500  **/
501 static_always_inline int
502 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
503                            u32 cryptodev_inst_index,
504                            cryptodev_resource_assign_op_t op)
505 {
506   cryptodev_main_t *cmt = &cryptodev_main;
507   cryptodev_inst_t *cinst = 0;
508   uword idx;
509
510   /* assign resource is only allowed when no inflight op is in the queue */
511   if (cet->inflight)
512     return -EBUSY;
513
514   switch (op)
515     {
516     case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
517       if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
518           vec_len (cmt->cryptodev_inst))
519         return -1;
520
521       clib_spinlock_lock (&cmt->tlock);
522       idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
523       clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
524       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
525       cet->cryptodev_id = cinst->dev_id;
526       cet->cryptodev_q = cinst->q_id;
527       clib_spinlock_unlock (&cmt->tlock);
528       break;
529     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
530       /* assigning a used cryptodev resource is not allowed */
531       if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
532           == 1)
533         return -EBUSY;
534
535       vec_foreach_index (idx, cmt->cryptodev_inst)
536       {
537         cinst = cmt->cryptodev_inst + idx;
538         if (cinst->dev_id == cet->cryptodev_id &&
539             cinst->q_id == cet->cryptodev_q)
540           break;
541       }
542       /* invalid existing worker resource assignment */
543       if (idx == vec_len (cmt->cryptodev_inst))
544         return -EINVAL;
545       clib_spinlock_lock (&cmt->tlock);
546       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
547       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
548                                 cryptodev_inst_index, 1);
549       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
550       cet->cryptodev_id = cinst->dev_id;
551       cet->cryptodev_q = cinst->q_id;
552       clib_spinlock_unlock (&cmt->tlock);
553       break;
554     default:
555       return -EINVAL;
556     }
557   return 0;
558 }
559
560 static u8 *
561 format_cryptodev_inst (u8 * s, va_list * args)
562 {
563   cryptodev_main_t *cmt = &cryptodev_main;
564   u32 inst = va_arg (*args, u32);
565   cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
566   u32 thread_index = 0;
567   struct rte_cryptodev_info info;
568
569   rte_cryptodev_info_get (cit->dev_id, &info);
570   s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
571
572   vec_foreach_index (thread_index, cmt->per_thread_data)
573   {
574     cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
575     if (vlib_num_workers () > 0 && thread_index == 0)
576       continue;
577
578     if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
579       {
580         s = format (s, "%u (%v)\n", thread_index,
581                     vlib_worker_threads[thread_index].name);
582         break;
583       }
584   }
585
586   if (thread_index == vec_len (cmt->per_thread_data))
587     s = format (s, "%s\n", "free");
588
589   return s;
590 }
591
592 static clib_error_t *
593 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
594                               vlib_cli_command_t * cmd)
595 {
596   cryptodev_main_t *cmt = &cryptodev_main;
597   u32 inst;
598
599   vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
600                    "Assigned-to");
601   if (vec_len (cmt->cryptodev_inst) == 0)
602     {
603       vlib_cli_output (vm, "(nil)\n");
604       return 0;
605     }
606
607   vec_foreach_index (inst, cmt->cryptodev_inst)
608     vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
609
610   if (cmt->is_raw_api)
611     vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
612   else
613     vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
614   return 0;
615 }
616
617 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
618     .path = "show cryptodev assignment",
619     .short_help = "show cryptodev assignment",
620     .function = cryptodev_show_assignment_fn,
621 };
622
623 static clib_error_t *
624 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
625                              vlib_cli_command_t * cmd)
626 {
627   cryptodev_main_t *cmt = &cryptodev_main;
628   cryptodev_engine_thread_t *cet;
629   unformat_input_t _line_input, *line_input = &_line_input;
630   u32 thread_index, inst_index;
631   u32 thread_present = 0, inst_present = 0;
632   clib_error_t *error = 0;
633   int ret;
634
635   /* Get a line of input. */
636   if (!unformat_user (input, unformat_line_input, line_input))
637     return 0;
638
639   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
640     {
641       if (unformat (line_input, "thread %u", &thread_index))
642         thread_present = 1;
643       else if (unformat (line_input, "resource %u", &inst_index))
644         inst_present = 1;
645       else
646         {
647           error = clib_error_return (0, "unknown input `%U'",
648                                      format_unformat_error, line_input);
649           return error;
650         }
651     }
652
653   if (!thread_present || !inst_present)
654     {
655       error = clib_error_return (0, "mandatory argument(s) missing");
656       return error;
657     }
658
659   if (thread_index == 0 && vlib_num_workers () > 0)
660     {
661       error =
662         clib_error_return (0, "assign crypto resource for master thread");
663       return error;
664     }
665
666   if (thread_index > vec_len (cmt->per_thread_data) ||
667       inst_index > vec_len (cmt->cryptodev_inst))
668     {
669       error = clib_error_return (0, "wrong thread id or resource id");
670       return error;
671     }
672
673   cet = cmt->per_thread_data + thread_index;
674   ret = cryptodev_assign_resource (cet, inst_index,
675                                    CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
676   if (ret)
677     {
678       error =
679         clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
680       return error;
681     }
682
683   return 0;
684 }
685
686 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
687     .path = "set cryptodev assignment",
688     .short_help = "set cryptodev assignment thread <thread_index> "
689         "resource <inst_index>",
690     .function = cryptodev_set_assignment_fn,
691 };
692
693 static u32
694 cryptodev_count_queue (u32 numa)
695 {
696   struct rte_cryptodev_info info;
697   u32 n_cryptodev = rte_cryptodev_count ();
698   u32 i, q_count = 0;
699
700   for (i = 0; i < n_cryptodev; i++)
701     {
702       rte_cryptodev_info_get (i, &info);
703       q_count += info.max_nb_queue_pairs;
704     }
705
706   return q_count;
707 }
708
709 static int
710 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
711 {
712   struct rte_cryptodev_config cfg;
713   struct rte_cryptodev_info info;
714   cryptodev_main_t *cmt = &cryptodev_main;
715   u32 i;
716   int ret;
717
718   rte_cryptodev_info_get (cryptodev_id, &info);
719
720   if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
721     return -1;
722
723   cfg.socket_id = info.device->numa_node;
724   cfg.nb_queue_pairs = info.max_nb_queue_pairs;
725
726   rte_cryptodev_configure (cryptodev_id, &cfg);
727
728   for (i = 0; i < info.max_nb_queue_pairs; i++)
729     {
730       struct rte_cryptodev_qp_conf qp_cfg;
731
732       qp_cfg.mp_session = 0;
733       qp_cfg.mp_session_private = 0;
734       qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
735
736       ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
737                                             info.device->numa_node);
738       if (ret)
739         {
740           clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
741                         cryptodev_id, i, ret);
742           break;
743         }
744     }
745
746   if (i != info.max_nb_queue_pairs)
747     return -1;
748
749   /* start the device */
750   rte_cryptodev_start (cryptodev_id);
751
752   for (i = 0; i < info.max_nb_queue_pairs; i++)
753     {
754       cryptodev_inst_t *cdev_inst;
755       vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
756       cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
757       cdev_inst->dev_id = cryptodev_id;
758       cdev_inst->q_id = i;
759
760       snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
761                 "%s_q%u", info.device->name, i);
762     }
763
764   return 0;
765 }
766
767 static int
768 cryptodev_cmp (void *v1, void *v2)
769 {
770   cryptodev_inst_t *a1 = v1;
771   cryptodev_inst_t *a2 = v2;
772
773   if (a1->q_id > a2->q_id)
774     return 1;
775   if (a1->q_id < a2->q_id)
776     return -1;
777   return 0;
778 }
779
780 static int
781 cryptodev_supports_param_value (u32 *params, u32 param_value)
782 {
783   u32 *value;
784   vec_foreach (value, params)
785     {
786       if (*value == param_value)
787         return 1;
788     }
789   return 0;
790 }
791
792 int
793 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
794                              u32 key_size, u32 digest_size, u32 aad_size)
795 {
796   cryptodev_main_t *cmt = &cryptodev_main;
797   cryptodev_capability_t *cap;
798   vec_foreach (cap, cmt->supported_caps)
799     {
800
801       if (cap->xform_type != idx->type)
802         continue;
803
804       if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
805           cap->auth.algo == idx->algo.auth &&
806           cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
807         return 1;
808
809       if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
810           cap->cipher.algo == idx->algo.cipher &&
811           cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
812         return 1;
813
814       if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
815           cap->aead.algo == idx->algo.aead &&
816           cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
817           cryptodev_supports_param_value (cap->aead.digest_sizes,
818                                           digest_size) &&
819           cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
820         return 1;
821     }
822   return 0;
823 }
824
825 static void
826 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
827                                u32 param_size_max, u32 increment)
828 {
829   u32 i = 0;
830   u32 cap_param_size;
831
832   while (i < vec_len (*param_sizes))
833     {
834       u32 found_param = 0;
835       for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
836            cap_param_size += increment)
837         {
838           if ((*param_sizes)[i] == cap_param_size)
839             {
840               found_param = 1;
841               break;
842             }
843           if (increment == 0)
844             break;
845         }
846       if (!found_param)
847         /* no such param_size in cap so delete  this size in temp_cap params */
848         vec_delete (*param_sizes, 1, i);
849       else
850         i++;
851     }
852 }
853
854 static void
855 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
856 {
857   cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
858
859   switch (temp_cap.xform_type)
860     {
861     case RTE_CRYPTO_SYM_XFORM_AUTH:
862       vec_free (temp_cap.auth.digest_sizes);
863       break;
864     case RTE_CRYPTO_SYM_XFORM_CIPHER:
865       vec_free (temp_cap.cipher.key_sizes);
866       break;
867     case RTE_CRYPTO_SYM_XFORM_AEAD:
868       vec_free (temp_cap.aead.key_sizes);
869       vec_free (temp_cap.aead.aad_sizes);
870       vec_free (temp_cap.aead.digest_sizes);
871       break;
872     default:
873       break;
874     }
875   vec_delete (*temp_caps, 1, temp_cap_id);
876 }
877
878 static u32
879 cryptodev_remove_unsupported_param_sizes (
880   cryptodev_capability_t *temp_cap,
881   const struct rte_cryptodev_capabilities *dev_caps)
882 {
883   u32 cap_found = 0;
884   const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
885
886   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
887     {
888       if (cap->sym.xform_type == temp_cap->xform_type)
889         switch (cap->sym.xform_type)
890           {
891           case RTE_CRYPTO_SYM_XFORM_CIPHER:
892             if (cap->sym.cipher.algo == temp_cap->cipher.algo)
893               {
894                 remove_unsupported_param_size (
895                   &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
896                   cap->sym.cipher.key_size.max,
897                   cap->sym.cipher.key_size.increment);
898                 if (vec_len (temp_cap->cipher.key_sizes) > 0)
899                   cap_found = 1;
900               }
901             break;
902           case RTE_CRYPTO_SYM_XFORM_AUTH:
903             if (cap->sym.auth.algo == temp_cap->auth.algo)
904               {
905                 remove_unsupported_param_size (
906                   &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
907                   cap->sym.auth.digest_size.max,
908                   cap->sym.auth.digest_size.increment);
909                 if (vec_len (temp_cap->auth.digest_sizes) > 0)
910                   cap_found = 1;
911               }
912             break;
913           case RTE_CRYPTO_SYM_XFORM_AEAD:
914             if (cap->sym.aead.algo == temp_cap->aead.algo)
915               {
916                 remove_unsupported_param_size (
917                   &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
918                   cap->sym.aead.key_size.max,
919                   cap->sym.aead.key_size.increment);
920                 remove_unsupported_param_size (
921                   &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
922                   cap->sym.aead.aad_size.max,
923                   cap->sym.aead.aad_size.increment);
924                 remove_unsupported_param_size (
925                   &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
926                   cap->sym.aead.digest_size.max,
927                   cap->sym.aead.digest_size.increment);
928                 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
929                     vec_len (temp_cap->aead.aad_sizes) > 0 &&
930                     vec_len (temp_cap->aead.digest_sizes) > 0)
931                   cap_found = 1;
932               }
933             break;
934           default:
935             break;
936           }
937       if (cap_found)
938         break;
939       cap++;
940     }
941
942   return cap_found;
943 }
944
945 static void
946 cryptodev_get_common_capabilities ()
947 {
948   cryptodev_main_t *cmt = &cryptodev_main;
949   cryptodev_inst_t *dev_inst;
950   struct rte_cryptodev_info dev_info;
951   u32 previous_dev_id, dev_id;
952   u32 cap_id = 0;
953   u32 param;
954   cryptodev_capability_t tmp_cap;
955   const struct rte_cryptodev_capabilities *cap;
956   const struct rte_cryptodev_capabilities *dev_caps;
957
958   clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
959   if (vec_len (cmt->cryptodev_inst) == 0)
960     return;
961   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
962   rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
963   cap = &dev_info.capabilities[0];
964
965   /*init capabilities vector*/
966   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
967     {
968       if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
969         {
970           cap++;
971           continue;
972         }
973
974       tmp_cap.xform_type = cap->sym.xform_type;
975       switch (cap->sym.xform_type)
976         {
977         case RTE_CRYPTO_SYM_XFORM_CIPHER:
978           tmp_cap.cipher.key_sizes = 0;
979           tmp_cap.cipher.algo = cap->sym.cipher.algo;
980           for (param = cap->sym.cipher.key_size.min;
981                param <= cap->sym.cipher.key_size.max;
982                param += cap->sym.cipher.key_size.increment)
983             {
984               vec_add1 (tmp_cap.cipher.key_sizes, param);
985               if (cap->sym.cipher.key_size.increment == 0)
986                 break;
987             }
988           break;
989         case RTE_CRYPTO_SYM_XFORM_AUTH:
990           tmp_cap.auth.algo = cap->sym.auth.algo;
991           tmp_cap.auth.digest_sizes = 0;
992           for (param = cap->sym.auth.digest_size.min;
993                param <= cap->sym.auth.digest_size.max;
994                param += cap->sym.auth.digest_size.increment)
995             {
996               vec_add1 (tmp_cap.auth.digest_sizes, param);
997               if (cap->sym.auth.digest_size.increment == 0)
998                 break;
999             }
1000           break;
1001         case RTE_CRYPTO_SYM_XFORM_AEAD:
1002           tmp_cap.aead.key_sizes = 0;
1003           tmp_cap.aead.aad_sizes = 0;
1004           tmp_cap.aead.digest_sizes = 0;
1005           tmp_cap.aead.algo = cap->sym.aead.algo;
1006           for (param = cap->sym.aead.key_size.min;
1007                param <= cap->sym.aead.key_size.max;
1008                param += cap->sym.aead.key_size.increment)
1009             {
1010               vec_add1 (tmp_cap.aead.key_sizes, param);
1011               if (cap->sym.aead.key_size.increment == 0)
1012                 break;
1013             }
1014           for (param = cap->sym.aead.aad_size.min;
1015                param <= cap->sym.aead.aad_size.max;
1016                param += cap->sym.aead.aad_size.increment)
1017             {
1018               vec_add1 (tmp_cap.aead.aad_sizes, param);
1019               if (cap->sym.aead.aad_size.increment == 0)
1020                 break;
1021             }
1022           for (param = cap->sym.aead.digest_size.min;
1023                param <= cap->sym.aead.digest_size.max;
1024                param += cap->sym.aead.digest_size.increment)
1025             {
1026               vec_add1 (tmp_cap.aead.digest_sizes, param);
1027               if (cap->sym.aead.digest_size.increment == 0)
1028                 break;
1029             }
1030           break;
1031         default:
1032           break;
1033         }
1034
1035       vec_add1 (cmt->supported_caps, tmp_cap);
1036       cap++;
1037     }
1038
1039   while (cap_id < vec_len (cmt->supported_caps))
1040     {
1041       u32 cap_is_supported = 1;
1042       previous_dev_id = cmt->cryptodev_inst->dev_id;
1043
1044       vec_foreach (dev_inst, cmt->cryptodev_inst)
1045         {
1046           dev_id = dev_inst->dev_id;
1047           if (previous_dev_id != dev_id)
1048             {
1049               previous_dev_id = dev_id;
1050               rte_cryptodev_info_get (dev_id, &dev_info);
1051               dev_caps = &dev_info.capabilities[0];
1052               cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1053                 &cmt->supported_caps[cap_id], dev_caps);
1054               if (!cap_is_supported)
1055                 {
1056                   cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1057                   /*no need to check other devices as this one doesn't support
1058                    * this temp_cap*/
1059                   break;
1060                 }
1061             }
1062         }
1063       if (cap_is_supported)
1064         cap_id++;
1065     }
1066 }
1067
1068 static int
1069 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1070 {
1071   cryptodev_main_t *cmt = &cryptodev_main;
1072   u32 n_queues = cryptodev_count_queue (vm->numa_node);
1073   u32 i;
1074
1075   if (n_queues < n_workers)
1076     return -1;
1077
1078   for (i = 0; i < rte_cryptodev_count (); i++)
1079     cryptodev_configure (vm, i);
1080
1081   if (vec_len (cmt->cryptodev_inst) == 0)
1082     return -1;
1083   cryptodev_get_common_capabilities ();
1084   vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1085
1086   /* if there is not enough device stop cryptodev */
1087   if (vec_len (cmt->cryptodev_inst) < n_workers)
1088     return -1;
1089
1090   return 0;
1091 }
1092
1093 static void
1094 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1095 {
1096   u32 *unique_elt;
1097   u8 found = 0;
1098
1099   vec_foreach (unique_elt, *unique_drivers)
1100     {
1101       if (*unique_elt == driver_id)
1102         {
1103           found = 1;
1104           break;
1105         }
1106     }
1107
1108   if (!found)
1109     vec_add1 (*unique_drivers, driver_id);
1110 }
1111
1112 clib_error_t *
1113 dpdk_cryptodev_init (vlib_main_t * vm)
1114 {
1115   cryptodev_main_t *cmt = &cryptodev_main;
1116   vlib_thread_main_t *tm = vlib_get_thread_main ();
1117   cryptodev_engine_thread_t *cet;
1118   cryptodev_numa_data_t *numa_data;
1119   cryptodev_inst_t *dev_inst;
1120   struct rte_cryptodev_info dev_info;
1121   u32 node;
1122   u8 nodes = 0;
1123   u32 skip_master = vlib_num_workers () > 0;
1124   u32 n_workers = tm->n_vlib_mains - skip_master;
1125   u32 eidx;
1126   u32 i;
1127   u32 *unique_drivers = 0;
1128   clib_error_t *error;
1129
1130   cmt->iova_mode = rte_eal_iova_mode ();
1131
1132   clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1133     {
1134       if (node >= nodes)
1135         nodes = node;
1136     }
1137
1138   vec_validate (cmt->per_numa_data, nodes);
1139   vec_foreach (numa_data, cmt->per_numa_data)
1140     {
1141       vec_validate (numa_data->sess_pools, 0);
1142     }
1143
1144   /* probe all cryptodev devices and get queue info */
1145   if (cryptodev_probe (vm, n_workers) < 0)
1146     {
1147       error = clib_error_return (0, "Not enough cryptodev resources");
1148       goto err_handling;
1149     }
1150
1151   vec_foreach (dev_inst, cmt->cryptodev_inst)
1152     {
1153       u32 dev_id = dev_inst->dev_id;
1154       rte_cryptodev_info_get (dev_id, &dev_info);
1155       u32 driver_id = dev_info.driver_id;
1156       is_drv_unique (driver_id, &unique_drivers);
1157
1158       u32 sess_sz =
1159         rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1160       cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1161     }
1162
1163   cmt->drivers_cnt = vec_len (unique_drivers);
1164   vec_free (unique_drivers);
1165
1166   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1167   clib_spinlock_init (&cmt->tlock);
1168
1169   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1170                        CLIB_CACHE_LINE_BYTES);
1171   for (i = skip_master; i < tm->n_vlib_mains; i++)
1172     {
1173       cet = cmt->per_thread_data + i;
1174
1175       if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1176           0)
1177         {
1178           error = clib_error_return (0, "Failed to configure cryptodev");
1179           goto err_handling;
1180         }
1181     }
1182
1183   /* register handler */
1184   eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1185                                       "DPDK Cryptodev Engine");
1186
1187   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1188
1189   if (cryptodev_register_raw_hdl)
1190     error = cryptodev_register_raw_hdl (vm, eidx);
1191   else
1192     error = cryptodev_register_cop_hdl (vm, eidx);
1193
1194   if (error)
1195     goto err_handling;
1196
1197   /* this engine is only enabled when cryptodev device(s) are presented in
1198    * startup.conf. Assume it is wanted to be used, turn on async mode here.
1199    */
1200   vnet_crypto_request_async_mode (1);
1201   ipsec_set_async_mode (1);
1202
1203   return 0;
1204
1205 err_handling:
1206   return error;
1207 }