dpdk-cryptodev: add support chacha20-poly1305
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_config.h>
33
34 #include "cryptodev.h"
35
36 #if CLIB_DEBUG > 0
37 #define always_inline static inline
38 #else
39 #define always_inline static inline __attribute__ ((__always_inline__))
40 #endif
41
42 cryptodev_main_t cryptodev_main;
43
44 static_always_inline int
45 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
46                     cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
47                     u32 aad_len)
48 {
49   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
50   memset (xform, 0, sizeof (*xform));
51   xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
52   xform->next = 0;
53
54   if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
55       key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
56       key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
57     {
58       aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
59     }
60   else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
61     {
62       aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
63     }
64   else
65     return -1;
66
67   aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
68     RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
69   aead_xform->aad_length = aad_len;
70   aead_xform->digest_length = 16;
71   aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
72   aead_xform->iv.length = 12;
73   aead_xform->key.data = key->data;
74   aead_xform->key.length = vec_len (key->data);
75
76   return 0;
77 }
78
79 static_always_inline int
80 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
81                       cryptodev_op_type_t op_type,
82                       const vnet_crypto_key_t *key)
83 {
84   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
85   vnet_crypto_key_t *key_cipher, *key_auth;
86   enum rte_crypto_cipher_algorithm cipher_algo = ~0;
87   enum rte_crypto_auth_algorithm auth_algo = ~0;
88   u32 digest_len = ~0;
89
90   key_cipher = vnet_crypto_get_key (key->index_crypto);
91   key_auth = vnet_crypto_get_key (key->index_integ);
92   if (!key_cipher || !key_auth)
93     return -1;
94
95   if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
96     {
97       xform_cipher = xforms;
98       xform_auth = xforms + 1;
99       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
100       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
101     }
102   else
103     {
104       xform_cipher = xforms + 1;
105       xform_auth = xforms;
106       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
107       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
108     }
109
110   xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
111   xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
112   xforms->next = xforms + 1;
113
114   switch (key->async_alg)
115     {
116 #define _(a, b, c, d, e)                                                      \
117   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
118     cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
119     auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
120     digest_len = e;                                                           \
121     break;
122
123       foreach_cryptodev_link_async_alg
124 #undef _
125     default:
126       return -1;
127     }
128
129   xform_cipher->cipher.algo = cipher_algo;
130   xform_cipher->cipher.key.data = key_cipher->data;
131   xform_cipher->cipher.key.length = vec_len (key_cipher->data);
132   xform_cipher->cipher.iv.length = 16;
133   xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
134
135   xform_auth->auth.algo = auth_algo;
136   xform_auth->auth.digest_length = digest_len;
137   xform_auth->auth.key.data = key_auth->data;
138   xform_auth->auth.key.length = vec_len (key_auth->data);
139
140   return 0;
141 }
142
143 static_always_inline void
144 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
145 {
146   u32 n_devs, i;
147
148   if (sess == NULL)
149     return;
150
151   n_devs = rte_cryptodev_count ();
152
153   for (i = 0; i < n_devs; i++)
154     rte_cryptodev_sym_session_clear (i, sess);
155
156   rte_cryptodev_sym_session_free (sess);
157 }
158
159 static int
160 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
161 {
162   cryptodev_main_t *cmt = &cryptodev_main;
163   cryptodev_capability_t *vcap;
164   u32 *s;
165
166   vec_foreach (vcap, cmt->supported_caps)
167     {
168       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
169         continue;
170       if (vcap->cipher.algo != algo)
171         continue;
172       vec_foreach (s, vcap->cipher.key_sizes)
173         if (*s == key_size)
174           return 1;
175     }
176
177   return 0;
178 }
179
180 static int
181 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
182 {
183   cryptodev_main_t *cmt = &cryptodev_main;
184   cryptodev_capability_t *vcap;
185   u32 *s;
186
187   vec_foreach (vcap, cmt->supported_caps)
188     {
189       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
190         continue;
191       if (vcap->auth.algo != algo)
192         continue;
193       vec_foreach (s, vcap->auth.digest_sizes)
194         if (*s == digest_size)
195           return 1;
196     }
197
198   return 0;
199 }
200
201 static_always_inline int
202 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
203                     u32 digest_size, u32 aad_size)
204 {
205   cryptodev_main_t *cmt = &cryptodev_main;
206   cryptodev_capability_t *vcap;
207   u32 *s;
208   u32 key_match = 0, digest_match = 0, aad_match = 0;
209
210   vec_foreach (vcap, cmt->supported_caps)
211     {
212       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
213         continue;
214       if (vcap->aead.algo != algo)
215         continue;
216       vec_foreach (s, vcap->aead.digest_sizes)
217         if (*s == digest_size)
218           {
219             digest_match = 1;
220             break;
221           }
222       vec_foreach (s, vcap->aead.key_sizes)
223         if (*s == key_size)
224           {
225             key_match = 1;
226             break;
227           }
228       vec_foreach (s, vcap->aead.aad_sizes)
229         if (*s == aad_size)
230           {
231             aad_match = 1;
232             break;
233           }
234     }
235
236   if (key_match == 1 && digest_match == 1 && aad_match == 1)
237     return 1;
238
239   return 0;
240 }
241
242 static_always_inline int
243 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
244 {
245   u32 matched = 0;
246
247   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
248     {
249       switch (key->async_alg)
250         {
251 #define _(a, b, c, d, e)                                                      \
252   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
253     if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) &&                    \
254         check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e))                   \
255       return 1;
256           foreach_cryptodev_link_async_alg
257 #undef _
258             default : return 0;
259         }
260       return 0;
261     }
262
263 #define _(a, b, c, d, e, f, g)                                                \
264   if (key->alg == VNET_CRYPTO_ALG_##a)                                        \
265     {                                                                         \
266       if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f))                  \
267         matched++;                                                            \
268     }
269   foreach_vnet_aead_crypto_conversion
270 #undef _
271
272     if (matched < 2) return 0;
273
274   return 1;
275 }
276
277 void
278 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
279                         vnet_crypto_key_index_t idx, u32 aad_len)
280 {
281   cryptodev_main_t *cmt = &cryptodev_main;
282   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
283   cryptodev_key_t *ckey = 0;
284   u32 i;
285
286   vec_validate (cmt->keys, idx);
287   ckey = vec_elt_at_index (cmt->keys, idx);
288
289   if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
290     {
291       if (idx >= vec_len (cmt->keys))
292         return;
293
294       vec_foreach_index (i, cmt->per_numa_data)
295         {
296           if (!ckey->keys)
297             continue;
298           if (!ckey->keys[i])
299             continue;
300           if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
301             {
302               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
303               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
304
305               CLIB_MEMORY_STORE_BARRIER ();
306               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
307               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
308             }
309         }
310       return;
311     }
312
313   /* create key */
314
315   /* do not create session for unsupported alg */
316   if (cryptodev_check_supported_vnet_alg (key) == 0)
317     return;
318
319   vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
320   vec_foreach_index (i, ckey->keys)
321     vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
322 }
323
324 /*static*/ void
325 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
326                        vnet_crypto_key_index_t idx)
327 {
328   cryptodev_sess_handler (vm, kop, idx, 8);
329 }
330
331 clib_error_t *
332 allocate_session_pools (u32 numa_node,
333                         cryptodev_session_pool_t *sess_pools_elt, u32 len)
334 {
335   cryptodev_main_t *cmt = &cryptodev_main;
336   u8 *name;
337   clib_error_t *error = NULL;
338
339   name = format (0, "vcryptodev_sess_pool_%u_%c", numa_node, len);
340   sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
341     (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
342
343   if (!sess_pools_elt->sess_pool)
344     {
345       error = clib_error_return (0, "Not enough memory for mp %s", name);
346       goto clear_mempools;
347     }
348   vec_free (name);
349
350   name = format (0, "cryptodev_sess_pool_%u_%c", numa_node, len);
351   sess_pools_elt->sess_priv_pool = rte_mempool_create (
352     (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
353     0, NULL, NULL, NULL, NULL, numa_node, 0);
354
355   if (!sess_pools_elt->sess_priv_pool)
356     {
357       error = clib_error_return (0, "Not enough memory for mp %s", name);
358       goto clear_mempools;
359     }
360   vec_free (name);
361
362 clear_mempools:
363   if (error)
364     {
365       vec_free (name);
366       if (sess_pools_elt->sess_pool)
367         rte_mempool_free (sess_pools_elt->sess_pool);
368       if (sess_pools_elt->sess_priv_pool)
369         rte_mempool_free (sess_pools_elt->sess_priv_pool);
370       return error;
371     }
372   return 0;
373 }
374
375 int
376 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
377                           u32 aad_len)
378 {
379   cryptodev_main_t *cmt = &cryptodev_main;
380   cryptodev_numa_data_t *numa_data;
381   cryptodev_inst_t *dev_inst;
382   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
383   struct rte_mempool *sess_pool, *sess_priv_pool;
384   cryptodev_session_pool_t *sess_pools_elt;
385   cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
386   struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
387   struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
388   struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
389   struct rte_cryptodev_info dev_info;
390   u32 numa_node = vm->numa_node;
391   clib_error_t *error;
392   int ret = 0;
393   u8 found = 0;
394
395   numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
396
397   clib_spinlock_lock (&cmt->tlock);
398   vec_foreach (sess_pools_elt, numa_data->sess_pools)
399     {
400       if (sess_pools_elt->sess_pool == NULL)
401         {
402           error = allocate_session_pools (numa_node, sess_pools_elt,
403                                           vec_len (numa_data->sess_pools) - 1);
404           if (error)
405             {
406               ret = -1;
407               goto clear_key;
408             }
409         }
410       if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
411         {
412           found = 1;
413           break;
414         }
415     }
416
417   if (found == 0)
418     {
419       vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
420       error = allocate_session_pools (numa_node, sess_pools_elt,
421                                       vec_len (numa_data->sess_pools) - 1);
422       if (error)
423         {
424           ret = -1;
425           goto clear_key;
426         }
427     }
428
429   sess_pool = sess_pools_elt->sess_pool;
430   sess_priv_pool = sess_pools_elt->sess_priv_pool;
431
432   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
433     rte_cryptodev_sym_session_create (sess_pool);
434
435   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
436     rte_cryptodev_sym_session_create (sess_pool);
437
438   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
439     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
440   else
441     ret =
442       prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
443   if (ret)
444     {
445       ret = -1;
446       goto clear_key;
447     }
448
449   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
450     prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
451   else
452     prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
453
454   vec_foreach (dev_inst, cmt->cryptodev_inst)
455     {
456       u32 dev_id = dev_inst->dev_id;
457       rte_cryptodev_info_get (dev_id, &dev_info);
458       u32 driver_id = dev_info.driver_id;
459
460       /* if the session is already configured for the driver type, avoid
461          configuring it again to increase the session data's refcnt */
462       if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
463           sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
464         continue;
465
466       ret = rte_cryptodev_sym_session_init (
467         dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
468         sess_priv_pool);
469       ret = rte_cryptodev_sym_session_init (
470         dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
471         sess_priv_pool);
472       if (ret < 0)
473         goto clear_key;
474     }
475
476   sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
477   sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
478
479   CLIB_MEMORY_STORE_BARRIER ();
480   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
481     sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
482   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
483     sessions[CRYPTODEV_OP_TYPE_DECRYPT];
484
485 clear_key:
486   if (ret != 0)
487     {
488       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
489       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
490     }
491   clib_spinlock_unlock (&cmt->tlock);
492   return ret;
493 }
494
495 typedef enum
496 {
497   CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
498   CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
499 } cryptodev_resource_assign_op_t;
500
501 /**
502  *  assign a cryptodev resource to a worker.
503  *  @param cet: the worker thread data
504  *  @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
505  *  @param op: the assignment method.
506  *  @return: 0 if successfully, negative number otherwise.
507  **/
508 static_always_inline int
509 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
510                            u32 cryptodev_inst_index,
511                            cryptodev_resource_assign_op_t op)
512 {
513   cryptodev_main_t *cmt = &cryptodev_main;
514   cryptodev_inst_t *cinst = 0;
515   uword idx;
516
517   /* assign resource is only allowed when no inflight op is in the queue */
518   if (cet->inflight)
519     return -EBUSY;
520
521   switch (op)
522     {
523     case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
524       if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
525           vec_len (cmt->cryptodev_inst))
526         return -1;
527
528       clib_spinlock_lock (&cmt->tlock);
529       idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
530       clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
531       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
532       cet->cryptodev_id = cinst->dev_id;
533       cet->cryptodev_q = cinst->q_id;
534       clib_spinlock_unlock (&cmt->tlock);
535       break;
536     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
537       /* assigning a used cryptodev resource is not allowed */
538       if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
539           == 1)
540         return -EBUSY;
541
542       vec_foreach_index (idx, cmt->cryptodev_inst)
543       {
544         cinst = cmt->cryptodev_inst + idx;
545         if (cinst->dev_id == cet->cryptodev_id &&
546             cinst->q_id == cet->cryptodev_q)
547           break;
548       }
549       /* invalid existing worker resource assignment */
550       if (idx == vec_len (cmt->cryptodev_inst))
551         return -EINVAL;
552       clib_spinlock_lock (&cmt->tlock);
553       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
554       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
555                                 cryptodev_inst_index, 1);
556       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
557       cet->cryptodev_id = cinst->dev_id;
558       cet->cryptodev_q = cinst->q_id;
559       clib_spinlock_unlock (&cmt->tlock);
560       break;
561     default:
562       return -EINVAL;
563     }
564   return 0;
565 }
566
567 static u8 *
568 format_cryptodev_inst (u8 * s, va_list * args)
569 {
570   cryptodev_main_t *cmt = &cryptodev_main;
571   u32 inst = va_arg (*args, u32);
572   cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
573   u32 thread_index = 0;
574   struct rte_cryptodev_info info;
575
576   rte_cryptodev_info_get (cit->dev_id, &info);
577   s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
578
579   vec_foreach_index (thread_index, cmt->per_thread_data)
580   {
581     cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
582     if (vlib_num_workers () > 0 && thread_index == 0)
583       continue;
584
585     if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
586       {
587         s = format (s, "%u (%v)\n", thread_index,
588                     vlib_worker_threads[thread_index].name);
589         break;
590       }
591   }
592
593   if (thread_index == vec_len (cmt->per_thread_data))
594     s = format (s, "%s\n", "free");
595
596   return s;
597 }
598
599 static clib_error_t *
600 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
601                               vlib_cli_command_t * cmd)
602 {
603   cryptodev_main_t *cmt = &cryptodev_main;
604   u32 inst;
605
606   vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
607                    "Assigned-to");
608   if (vec_len (cmt->cryptodev_inst) == 0)
609     {
610       vlib_cli_output (vm, "(nil)\n");
611       return 0;
612     }
613
614   vec_foreach_index (inst, cmt->cryptodev_inst)
615     vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
616
617   if (cmt->is_raw_api)
618     vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
619   else
620     vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
621   return 0;
622 }
623
624 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
625     .path = "show cryptodev assignment",
626     .short_help = "show cryptodev assignment",
627     .function = cryptodev_show_assignment_fn,
628 };
629
630 static clib_error_t *
631 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
632                              vlib_cli_command_t * cmd)
633 {
634   cryptodev_main_t *cmt = &cryptodev_main;
635   cryptodev_engine_thread_t *cet;
636   unformat_input_t _line_input, *line_input = &_line_input;
637   u32 thread_index, inst_index;
638   u32 thread_present = 0, inst_present = 0;
639   clib_error_t *error = 0;
640   int ret;
641
642   /* Get a line of input. */
643   if (!unformat_user (input, unformat_line_input, line_input))
644     return 0;
645
646   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
647     {
648       if (unformat (line_input, "thread %u", &thread_index))
649         thread_present = 1;
650       else if (unformat (line_input, "resource %u", &inst_index))
651         inst_present = 1;
652       else
653         {
654           error = clib_error_return (0, "unknown input `%U'",
655                                      format_unformat_error, line_input);
656           return error;
657         }
658     }
659
660   if (!thread_present || !inst_present)
661     {
662       error = clib_error_return (0, "mandatory argument(s) missing");
663       return error;
664     }
665
666   if (thread_index == 0 && vlib_num_workers () > 0)
667     {
668       error =
669         clib_error_return (0, "assign crypto resource for master thread");
670       return error;
671     }
672
673   if (thread_index > vec_len (cmt->per_thread_data) ||
674       inst_index > vec_len (cmt->cryptodev_inst))
675     {
676       error = clib_error_return (0, "wrong thread id or resource id");
677       return error;
678     }
679
680   cet = cmt->per_thread_data + thread_index;
681   ret = cryptodev_assign_resource (cet, inst_index,
682                                    CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
683   if (ret)
684     {
685       error =
686         clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
687       return error;
688     }
689
690   return 0;
691 }
692
693 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
694     .path = "set cryptodev assignment",
695     .short_help = "set cryptodev assignment thread <thread_index> "
696         "resource <inst_index>",
697     .function = cryptodev_set_assignment_fn,
698 };
699
700 static u32
701 cryptodev_count_queue (u32 numa)
702 {
703   struct rte_cryptodev_info info;
704   u32 n_cryptodev = rte_cryptodev_count ();
705   u32 i, q_count = 0;
706
707   for (i = 0; i < n_cryptodev; i++)
708     {
709       rte_cryptodev_info_get (i, &info);
710       q_count += info.max_nb_queue_pairs;
711     }
712
713   return q_count;
714 }
715
716 static int
717 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
718 {
719   struct rte_cryptodev_config cfg;
720   struct rte_cryptodev_info info;
721   cryptodev_main_t *cmt = &cryptodev_main;
722   u32 i;
723   int ret;
724
725   rte_cryptodev_info_get (cryptodev_id, &info);
726
727   if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
728     return -1;
729
730   cfg.socket_id = info.device->numa_node;
731   cfg.nb_queue_pairs = info.max_nb_queue_pairs;
732
733   rte_cryptodev_configure (cryptodev_id, &cfg);
734
735   for (i = 0; i < info.max_nb_queue_pairs; i++)
736     {
737       struct rte_cryptodev_qp_conf qp_cfg;
738
739       qp_cfg.mp_session = 0;
740       qp_cfg.mp_session_private = 0;
741       qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
742
743       ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
744                                             info.device->numa_node);
745       if (ret)
746         {
747           clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
748                         cryptodev_id, i, ret);
749           break;
750         }
751     }
752
753   if (i != info.max_nb_queue_pairs)
754     return -1;
755
756   /* start the device */
757   rte_cryptodev_start (cryptodev_id);
758
759   for (i = 0; i < info.max_nb_queue_pairs; i++)
760     {
761       cryptodev_inst_t *cdev_inst;
762       vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
763       cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
764       cdev_inst->dev_id = cryptodev_id;
765       cdev_inst->q_id = i;
766
767       snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
768                 "%s_q%u", info.device->name, i);
769     }
770
771   return 0;
772 }
773
774 static int
775 cryptodev_cmp (void *v1, void *v2)
776 {
777   cryptodev_inst_t *a1 = v1;
778   cryptodev_inst_t *a2 = v2;
779
780   if (a1->q_id > a2->q_id)
781     return 1;
782   if (a1->q_id < a2->q_id)
783     return -1;
784   return 0;
785 }
786
787 static int
788 cryptodev_supports_param_value (u32 *params, u32 param_value)
789 {
790   u32 *value;
791   vec_foreach (value, params)
792     {
793       if (*value == param_value)
794         return 1;
795     }
796   return 0;
797 }
798
799 int
800 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
801                              u32 key_size, u32 digest_size, u32 aad_size)
802 {
803   cryptodev_main_t *cmt = &cryptodev_main;
804   cryptodev_capability_t *cap;
805   vec_foreach (cap, cmt->supported_caps)
806     {
807
808       if (cap->xform_type != idx->type)
809         continue;
810
811       if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
812           cap->auth.algo == idx->algo.auth &&
813           cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
814         return 1;
815
816       if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
817           cap->cipher.algo == idx->algo.cipher &&
818           cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
819         return 1;
820
821       if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
822           cap->aead.algo == idx->algo.aead &&
823           cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
824           cryptodev_supports_param_value (cap->aead.digest_sizes,
825                                           digest_size) &&
826           cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
827         return 1;
828     }
829   return 0;
830 }
831
832 static void
833 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
834                                u32 param_size_max, u32 increment)
835 {
836   u32 i = 0;
837   u32 cap_param_size;
838
839   while (i < vec_len (*param_sizes))
840     {
841       u32 found_param = 0;
842       for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
843            cap_param_size += increment)
844         {
845           if ((*param_sizes)[i] == cap_param_size)
846             {
847               found_param = 1;
848               break;
849             }
850           if (increment == 0)
851             break;
852         }
853       if (!found_param)
854         /* no such param_size in cap so delete  this size in temp_cap params */
855         vec_delete (*param_sizes, 1, i);
856       else
857         i++;
858     }
859 }
860
861 static void
862 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
863 {
864   cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
865
866   switch (temp_cap.xform_type)
867     {
868     case RTE_CRYPTO_SYM_XFORM_AUTH:
869       vec_free (temp_cap.auth.digest_sizes);
870       break;
871     case RTE_CRYPTO_SYM_XFORM_CIPHER:
872       vec_free (temp_cap.cipher.key_sizes);
873       break;
874     case RTE_CRYPTO_SYM_XFORM_AEAD:
875       vec_free (temp_cap.aead.key_sizes);
876       vec_free (temp_cap.aead.aad_sizes);
877       vec_free (temp_cap.aead.digest_sizes);
878       break;
879     default:
880       break;
881     }
882   vec_delete (*temp_caps, 1, temp_cap_id);
883 }
884
885 static u32
886 cryptodev_remove_unsupported_param_sizes (
887   cryptodev_capability_t *temp_cap,
888   const struct rte_cryptodev_capabilities *dev_caps)
889 {
890   u32 cap_found = 0;
891   const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
892
893   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
894     {
895       if (cap->sym.xform_type == temp_cap->xform_type)
896         switch (cap->sym.xform_type)
897           {
898           case RTE_CRYPTO_SYM_XFORM_CIPHER:
899             if (cap->sym.cipher.algo == temp_cap->cipher.algo)
900               {
901                 remove_unsupported_param_size (
902                   &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
903                   cap->sym.cipher.key_size.max,
904                   cap->sym.cipher.key_size.increment);
905                 if (vec_len (temp_cap->cipher.key_sizes) > 0)
906                   cap_found = 1;
907               }
908             break;
909           case RTE_CRYPTO_SYM_XFORM_AUTH:
910             if (cap->sym.auth.algo == temp_cap->auth.algo)
911               {
912                 remove_unsupported_param_size (
913                   &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
914                   cap->sym.auth.digest_size.max,
915                   cap->sym.auth.digest_size.increment);
916                 if (vec_len (temp_cap->auth.digest_sizes) > 0)
917                   cap_found = 1;
918               }
919             break;
920           case RTE_CRYPTO_SYM_XFORM_AEAD:
921             if (cap->sym.aead.algo == temp_cap->aead.algo)
922               {
923                 remove_unsupported_param_size (
924                   &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
925                   cap->sym.aead.key_size.max,
926                   cap->sym.aead.key_size.increment);
927                 remove_unsupported_param_size (
928                   &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
929                   cap->sym.aead.aad_size.max,
930                   cap->sym.aead.aad_size.increment);
931                 remove_unsupported_param_size (
932                   &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
933                   cap->sym.aead.digest_size.max,
934                   cap->sym.aead.digest_size.increment);
935                 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
936                     vec_len (temp_cap->aead.aad_sizes) > 0 &&
937                     vec_len (temp_cap->aead.digest_sizes) > 0)
938                   cap_found = 1;
939               }
940             break;
941           default:
942             break;
943           }
944       if (cap_found)
945         break;
946       cap++;
947     }
948
949   return cap_found;
950 }
951
952 static void
953 cryptodev_get_common_capabilities ()
954 {
955   cryptodev_main_t *cmt = &cryptodev_main;
956   cryptodev_inst_t *dev_inst;
957   struct rte_cryptodev_info dev_info;
958   u32 previous_dev_id, dev_id;
959   u32 cap_id = 0;
960   u32 param;
961   cryptodev_capability_t tmp_cap;
962   const struct rte_cryptodev_capabilities *cap;
963   const struct rte_cryptodev_capabilities *dev_caps;
964
965   clib_memset (&tmp_cap, 0, sizeof (cryptodev_capability_t));
966   if (vec_len (cmt->cryptodev_inst) == 0)
967     return;
968   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
969   rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
970   cap = &dev_info.capabilities[0];
971
972   /*init capabilities vector*/
973   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
974     {
975       if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
976         {
977           cap++;
978           continue;
979         }
980
981       tmp_cap.xform_type = cap->sym.xform_type;
982       switch (cap->sym.xform_type)
983         {
984         case RTE_CRYPTO_SYM_XFORM_CIPHER:
985           tmp_cap.cipher.key_sizes = 0;
986           tmp_cap.cipher.algo = cap->sym.cipher.algo;
987           for (param = cap->sym.cipher.key_size.min;
988                param <= cap->sym.cipher.key_size.max;
989                param += cap->sym.cipher.key_size.increment)
990             {
991               vec_add1 (tmp_cap.cipher.key_sizes, param);
992               if (cap->sym.cipher.key_size.increment == 0)
993                 break;
994             }
995           break;
996         case RTE_CRYPTO_SYM_XFORM_AUTH:
997           tmp_cap.auth.algo = cap->sym.auth.algo;
998           tmp_cap.auth.digest_sizes = 0;
999           for (param = cap->sym.auth.digest_size.min;
1000                param <= cap->sym.auth.digest_size.max;
1001                param += cap->sym.auth.digest_size.increment)
1002             {
1003               vec_add1 (tmp_cap.auth.digest_sizes, param);
1004               if (cap->sym.auth.digest_size.increment == 0)
1005                 break;
1006             }
1007           break;
1008         case RTE_CRYPTO_SYM_XFORM_AEAD:
1009           tmp_cap.aead.key_sizes = 0;
1010           tmp_cap.aead.aad_sizes = 0;
1011           tmp_cap.aead.digest_sizes = 0;
1012           tmp_cap.aead.algo = cap->sym.aead.algo;
1013           for (param = cap->sym.aead.key_size.min;
1014                param <= cap->sym.aead.key_size.max;
1015                param += cap->sym.aead.key_size.increment)
1016             {
1017               vec_add1 (tmp_cap.aead.key_sizes, param);
1018               if (cap->sym.aead.key_size.increment == 0)
1019                 break;
1020             }
1021           for (param = cap->sym.aead.aad_size.min;
1022                param <= cap->sym.aead.aad_size.max;
1023                param += cap->sym.aead.aad_size.increment)
1024             {
1025               vec_add1 (tmp_cap.aead.aad_sizes, param);
1026               if (cap->sym.aead.aad_size.increment == 0)
1027                 break;
1028             }
1029           for (param = cap->sym.aead.digest_size.min;
1030                param <= cap->sym.aead.digest_size.max;
1031                param += cap->sym.aead.digest_size.increment)
1032             {
1033               vec_add1 (tmp_cap.aead.digest_sizes, param);
1034               if (cap->sym.aead.digest_size.increment == 0)
1035                 break;
1036             }
1037           break;
1038         default:
1039           break;
1040         }
1041
1042       vec_add1 (cmt->supported_caps, tmp_cap);
1043       cap++;
1044     }
1045
1046   while (cap_id < vec_len (cmt->supported_caps))
1047     {
1048       u32 cap_is_supported = 1;
1049       previous_dev_id = cmt->cryptodev_inst->dev_id;
1050
1051       vec_foreach (dev_inst, cmt->cryptodev_inst)
1052         {
1053           dev_id = dev_inst->dev_id;
1054           if (previous_dev_id != dev_id)
1055             {
1056               previous_dev_id = dev_id;
1057               rte_cryptodev_info_get (dev_id, &dev_info);
1058               dev_caps = &dev_info.capabilities[0];
1059               cap_is_supported = cryptodev_remove_unsupported_param_sizes (
1060                 &cmt->supported_caps[cap_id], dev_caps);
1061               if (!cap_is_supported)
1062                 {
1063                   cryptodev_delete_cap (&cmt->supported_caps, cap_id);
1064                   /*no need to check other devices as this one doesn't support
1065                    * this temp_cap*/
1066                   break;
1067                 }
1068             }
1069         }
1070       if (cap_is_supported)
1071         cap_id++;
1072     }
1073 }
1074
1075 static int
1076 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1077 {
1078   cryptodev_main_t *cmt = &cryptodev_main;
1079   u32 n_queues = cryptodev_count_queue (vm->numa_node);
1080   u32 i;
1081
1082   if (n_queues < n_workers)
1083     return -1;
1084
1085   for (i = 0; i < rte_cryptodev_count (); i++)
1086     cryptodev_configure (vm, i);
1087
1088   if (vec_len (cmt->cryptodev_inst) == 0)
1089     return -1;
1090   cryptodev_get_common_capabilities ();
1091   vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1092
1093   /* if there is not enough device stop cryptodev */
1094   if (vec_len (cmt->cryptodev_inst) < n_workers)
1095     return -1;
1096
1097   return 0;
1098 }
1099
1100 static void
1101 is_drv_unique (u32 driver_id, u32 **unique_drivers)
1102 {
1103   u32 *unique_elt;
1104   u8 found = 0;
1105
1106   vec_foreach (unique_elt, *unique_drivers)
1107     {
1108       if (*unique_elt == driver_id)
1109         {
1110           found = 1;
1111           break;
1112         }
1113     }
1114
1115   if (!found)
1116     vec_add1 (*unique_drivers, driver_id);
1117 }
1118
1119 clib_error_t *
1120 dpdk_cryptodev_init (vlib_main_t * vm)
1121 {
1122   cryptodev_main_t *cmt = &cryptodev_main;
1123   vlib_thread_main_t *tm = vlib_get_thread_main ();
1124   cryptodev_engine_thread_t *cet;
1125   cryptodev_numa_data_t *numa_data;
1126   cryptodev_inst_t *dev_inst;
1127   struct rte_cryptodev_info dev_info;
1128   u32 node;
1129   u8 nodes = 0;
1130   u32 skip_master = vlib_num_workers () > 0;
1131   u32 n_workers = tm->n_vlib_mains - skip_master;
1132   u32 eidx;
1133   u32 i;
1134   u32 *unique_drivers = 0;
1135   clib_error_t *error;
1136
1137   cmt->iova_mode = rte_eal_iova_mode ();
1138
1139   clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
1140     {
1141       if (node >= nodes)
1142         nodes = node;
1143     }
1144
1145   vec_validate (cmt->per_numa_data, nodes);
1146   vec_foreach (numa_data, cmt->per_numa_data)
1147     {
1148       vec_validate (numa_data->sess_pools, 0);
1149     }
1150
1151   /* probe all cryptodev devices and get queue info */
1152   if (cryptodev_probe (vm, n_workers) < 0)
1153     {
1154       error = clib_error_return (0, "Not enough cryptodev resources");
1155       goto err_handling;
1156     }
1157
1158   vec_foreach (dev_inst, cmt->cryptodev_inst)
1159     {
1160       u32 dev_id = dev_inst->dev_id;
1161       rte_cryptodev_info_get (dev_id, &dev_info);
1162       u32 driver_id = dev_info.driver_id;
1163       is_drv_unique (driver_id, &unique_drivers);
1164
1165       u32 sess_sz =
1166         rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
1167       cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
1168     }
1169
1170   cmt->drivers_cnt = vec_len (unique_drivers);
1171   vec_free (unique_drivers);
1172
1173   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1174   clib_spinlock_init (&cmt->tlock);
1175
1176   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1177                        CLIB_CACHE_LINE_BYTES);
1178   for (i = skip_master; i < tm->n_vlib_mains; i++)
1179     {
1180       cet = cmt->per_thread_data + i;
1181
1182       if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
1183           0)
1184         {
1185           error = clib_error_return (0, "Failed to configure cryptodev");
1186           goto err_handling;
1187         }
1188     }
1189
1190   /* register handler */
1191   eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1192                                       "DPDK Cryptodev Engine");
1193
1194   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1195
1196   if (cryptodev_register_raw_hdl)
1197     error = cryptodev_register_raw_hdl (vm, eidx);
1198   else
1199     error = cryptodev_register_cop_hdl (vm, eidx);
1200
1201   if (error)
1202     goto err_handling;
1203
1204   /* this engine is only enabled when cryptodev device(s) are presented in
1205    * startup.conf. Assume it is wanted to be used, turn on async mode here.
1206    */
1207   vnet_crypto_request_async_mode (1);
1208   ipsec_set_async_mode (1);
1209
1210   return 0;
1211
1212 err_handling:
1213   return error;
1214 }