dpdk: fix cryptodev session handler
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
34
35 #include "cryptodev.h"
36
37 #if CLIB_DEBUG > 0
38 #define always_inline static inline
39 #else
40 #define always_inline static inline __attribute__ ((__always_inline__))
41 #endif
42
43 cryptodev_main_t cryptodev_main;
44
45 static_always_inline int
46 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
47                     cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
48                     u32 aad_len)
49 {
50   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
51   memset (xform, 0, sizeof (*xform));
52   xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
53   xform->next = 0;
54
55   if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
56       key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
57       key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
58     return -1;
59
60   aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
61   aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
62     RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
63   aead_xform->aad_length = aad_len;
64   aead_xform->digest_length = 16;
65   aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
66   aead_xform->iv.length = 12;
67   aead_xform->key.data = key->data;
68   aead_xform->key.length = vec_len (key->data);
69
70   return 0;
71 }
72
73 static_always_inline int
74 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
75                       cryptodev_op_type_t op_type,
76                       const vnet_crypto_key_t *key)
77 {
78   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
79   vnet_crypto_key_t *key_cipher, *key_auth;
80   enum rte_crypto_cipher_algorithm cipher_algo = ~0;
81   enum rte_crypto_auth_algorithm auth_algo = ~0;
82   u32 digest_len = ~0;
83
84   key_cipher = vnet_crypto_get_key (key->index_crypto);
85   key_auth = vnet_crypto_get_key (key->index_integ);
86   if (!key_cipher || !key_auth)
87     return -1;
88
89   if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
90     {
91       xform_cipher = xforms;
92       xform_auth = xforms + 1;
93       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
94       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
95     }
96   else
97     {
98       xform_cipher = xforms + 1;
99       xform_auth = xforms;
100       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
101       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
102     }
103
104   xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
105   xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
106   xforms->next = xforms + 1;
107
108   switch (key->async_alg)
109     {
110 #define _(a, b, c, d, e)                                                      \
111   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
112     cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
113     auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
114     digest_len = e;                                                           \
115     break;
116
117       foreach_cryptodev_link_async_alg
118 #undef _
119     default:
120       return -1;
121     }
122
123   xform_cipher->cipher.algo = cipher_algo;
124   xform_cipher->cipher.key.data = key_cipher->data;
125   xform_cipher->cipher.key.length = vec_len (key_cipher->data);
126   xform_cipher->cipher.iv.length = 16;
127   xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
128
129   xform_auth->auth.algo = auth_algo;
130   xform_auth->auth.digest_length = digest_len;
131   xform_auth->auth.key.data = key_auth->data;
132   xform_auth->auth.key.length = vec_len (key_auth->data);
133
134   return 0;
135 }
136
137 static_always_inline void
138 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
139 {
140   u32 n_devs, i;
141
142   if (sess == NULL)
143     return;
144
145   n_devs = rte_cryptodev_count ();
146
147   for (i = 0; i < n_devs; i++)
148     rte_cryptodev_sym_session_clear (i, sess);
149
150   rte_cryptodev_sym_session_free (sess);
151 }
152
153 static int
154 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
155 {
156   cryptodev_main_t *cmt = &cryptodev_main;
157   cryptodev_capability_t *vcap;
158   u32 *s;
159
160   vec_foreach (vcap, cmt->supported_caps)
161     {
162       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
163         continue;
164       if (vcap->cipher.algo != algo)
165         continue;
166       vec_foreach (s, vcap->cipher.key_sizes)
167         if (*s == key_size)
168           return 1;
169     }
170
171   return 0;
172 }
173
174 static int
175 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
176 {
177   cryptodev_main_t *cmt = &cryptodev_main;
178   cryptodev_capability_t *vcap;
179   u32 *s;
180
181   vec_foreach (vcap, cmt->supported_caps)
182     {
183       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
184         continue;
185       if (vcap->auth.algo != algo)
186         continue;
187       vec_foreach (s, vcap->auth.digest_sizes)
188         if (*s == digest_size)
189           return 1;
190     }
191
192   return 0;
193 }
194
195 static_always_inline int
196 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
197                     u32 digest_size, u32 aad_size)
198 {
199   cryptodev_main_t *cmt = &cryptodev_main;
200   cryptodev_capability_t *vcap;
201   u32 *s;
202   u32 key_match = 0, digest_match = 0, aad_match = 0;
203
204   vec_foreach (vcap, cmt->supported_caps)
205     {
206       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
207         continue;
208       if (vcap->aead.algo != algo)
209         continue;
210       vec_foreach (s, vcap->aead.digest_sizes)
211         if (*s == digest_size)
212           {
213             digest_match = 1;
214             break;
215           }
216       vec_foreach (s, vcap->aead.key_sizes)
217         if (*s == key_size)
218           {
219             key_match = 1;
220             break;
221           }
222       vec_foreach (s, vcap->aead.aad_sizes)
223         if (*s == aad_size)
224           {
225             aad_match = 1;
226             break;
227           }
228     }
229
230   if (key_match == 1 && digest_match == 1 && aad_match == 1)
231     return 1;
232
233   return 0;
234 }
235
236 static_always_inline int
237 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
238 {
239   u32 matched = 0;
240
241   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
242     {
243       switch (key->async_alg)
244         {
245 #define _(a, b, c, d, e)                                                      \
246   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
247     if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) &&                    \
248         check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e))                   \
249       return 1;
250           foreach_cryptodev_link_async_alg
251 #undef _
252             default : return 0;
253         }
254       return 0;
255     }
256
257 #define _(a, b, c, d, e, f, g)                                                \
258   if (key->alg == VNET_CRYPTO_ALG_##a)                                        \
259     {                                                                         \
260       if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f))                  \
261         matched++;                                                            \
262     }
263   foreach_vnet_aead_crypto_conversion
264 #undef _
265
266     if (matched < 2) return 0;
267
268   return 1;
269 }
270
271 void
272 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
273                         vnet_crypto_key_index_t idx, u32 aad_len)
274 {
275   cryptodev_main_t *cmt = &cryptodev_main;
276   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
277   cryptodev_key_t *ckey = 0;
278   u32 i;
279
280   vec_validate (cmt->keys, idx);
281   ckey = vec_elt_at_index (cmt->keys, idx);
282
283   if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
284     {
285       if (idx >= vec_len (cmt->keys))
286         return;
287
288       vec_foreach_index (i, cmt->per_numa_data)
289         {
290           if (!ckey->keys)
291             continue;
292           if (!ckey->keys[i])
293             continue;
294           if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
295             {
296               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
297               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
298
299               CLIB_MEMORY_STORE_BARRIER ();
300               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
301               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
302             }
303         }
304       return;
305     }
306
307   /* create key */
308
309   /* do not create session for unsupported alg */
310   if (cryptodev_check_supported_vnet_alg (key) == 0)
311     return;
312
313   vec_validate (ckey->keys, idx);
314   vec_foreach_index (i, ckey->keys)
315     vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
316 }
317
318 /*static*/ void
319 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
320                        vnet_crypto_key_index_t idx)
321 {
322   cryptodev_sess_handler (vm, kop, idx, 8);
323 }
324
325 int
326 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
327                           u32 aad_len)
328 {
329   cryptodev_main_t *cmt = &cryptodev_main;
330   cryptodev_numa_data_t *numa_data;
331   cryptodev_inst_t *dev_inst;
332   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
333   struct rte_mempool *sess_pool, *sess_priv_pool;
334   cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
335   struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
336   struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
337   struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
338   u32 numa_node = vm->numa_node;
339   int ret;
340
341   numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
342   sess_pool = numa_data->sess_pool;
343   sess_priv_pool = numa_data->sess_priv_pool;
344
345   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
346     rte_cryptodev_sym_session_create (sess_pool);
347   if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
348     {
349       ret = -1;
350       goto clear_key;
351     }
352
353   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
354     rte_cryptodev_sym_session_create (sess_pool);
355   if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
356     {
357       ret = -1;
358       goto clear_key;
359     }
360
361   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
362     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
363   else
364     ret =
365       prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
366   if (ret)
367     return 0;
368
369   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
370     prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
371   else
372     prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
373
374   vec_foreach (dev_inst, cmt->cryptodev_inst)
375     {
376       u32 dev_id = dev_inst->dev_id;
377       struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
378       u32 driver_id = cdev->driver_id;
379
380       /* if the session is already configured for the driver type, avoid
381          configuring it again to increase the session data's refcnt */
382       if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
383           sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
384         continue;
385
386       ret = rte_cryptodev_sym_session_init (
387         dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
388         sess_priv_pool);
389       ret = rte_cryptodev_sym_session_init (
390         dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
391         sess_priv_pool);
392       if (ret < 0)
393         return ret;
394     }
395
396   sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
397   sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
398
399   CLIB_MEMORY_STORE_BARRIER ();
400   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
401     sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
402   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
403     sessions[CRYPTODEV_OP_TYPE_DECRYPT];
404
405 clear_key:
406   if (ret != 0)
407     {
408       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
409       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
410     }
411   return ret;
412 }
413
414 typedef enum
415 {
416   CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
417   CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
418 } cryptodev_resource_assign_op_t;
419
420 /**
421  *  assign a cryptodev resource to a worker.
422  *  @param cet: the worker thread data
423  *  @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
424  *  @param op: the assignment method.
425  *  @return: 0 if successfully, negative number otherwise.
426  **/
427 static_always_inline int
428 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
429                            u32 cryptodev_inst_index,
430                            cryptodev_resource_assign_op_t op)
431 {
432   cryptodev_main_t *cmt = &cryptodev_main;
433   cryptodev_inst_t *cinst = 0;
434   uword idx;
435
436   /* assign resource is only allowed when no inflight op is in the queue */
437   if (cet->inflight)
438     return -EBUSY;
439
440   switch (op)
441     {
442     case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
443       if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
444           vec_len (cmt->cryptodev_inst))
445         return -1;
446
447       clib_spinlock_lock (&cmt->tlock);
448       idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
449       clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
450       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
451       cet->cryptodev_id = cinst->dev_id;
452       cet->cryptodev_q = cinst->q_id;
453       clib_spinlock_unlock (&cmt->tlock);
454       break;
455     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
456       /* assigning a used cryptodev resource is not allowed */
457       if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
458           == 1)
459         return -EBUSY;
460
461       vec_foreach_index (idx, cmt->cryptodev_inst)
462       {
463         cinst = cmt->cryptodev_inst + idx;
464         if (cinst->dev_id == cet->cryptodev_id &&
465             cinst->q_id == cet->cryptodev_q)
466           break;
467       }
468       /* invalid existing worker resource assignment */
469       if (idx == vec_len (cmt->cryptodev_inst))
470         return -EINVAL;
471       clib_spinlock_lock (&cmt->tlock);
472       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
473       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
474                                 cryptodev_inst_index, 1);
475       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
476       cet->cryptodev_id = cinst->dev_id;
477       cet->cryptodev_q = cinst->q_id;
478       clib_spinlock_unlock (&cmt->tlock);
479       break;
480     default:
481       return -EINVAL;
482     }
483   return 0;
484 }
485
486 static u8 *
487 format_cryptodev_inst (u8 * s, va_list * args)
488 {
489   cryptodev_main_t *cmt = &cryptodev_main;
490   u32 inst = va_arg (*args, u32);
491   cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
492   u32 thread_index = 0;
493   struct rte_cryptodev_info info;
494
495   rte_cryptodev_info_get (cit->dev_id, &info);
496   s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
497
498   vec_foreach_index (thread_index, cmt->per_thread_data)
499   {
500     cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
501     if (vlib_num_workers () > 0 && thread_index == 0)
502       continue;
503
504     if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
505       {
506         s = format (s, "%u (%v)\n", thread_index,
507                     vlib_worker_threads[thread_index].name);
508         break;
509       }
510   }
511
512   if (thread_index == vec_len (cmt->per_thread_data))
513     s = format (s, "%s\n", "free");
514
515   return s;
516 }
517
518 static clib_error_t *
519 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
520                               vlib_cli_command_t * cmd)
521 {
522   cryptodev_main_t *cmt = &cryptodev_main;
523   u32 inst;
524
525   vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
526                    "Assigned-to");
527   if (vec_len (cmt->cryptodev_inst) == 0)
528     {
529       vlib_cli_output (vm, "(nil)\n");
530       return 0;
531     }
532
533   vec_foreach_index (inst, cmt->cryptodev_inst)
534     vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
535
536   if (cmt->is_raw_api)
537     vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
538   else
539     vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
540   return 0;
541 }
542
543 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
544     .path = "show cryptodev assignment",
545     .short_help = "show cryptodev assignment",
546     .function = cryptodev_show_assignment_fn,
547 };
548
549 static clib_error_t *
550 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
551                              vlib_cli_command_t * cmd)
552 {
553   cryptodev_main_t *cmt = &cryptodev_main;
554   cryptodev_engine_thread_t *cet;
555   unformat_input_t _line_input, *line_input = &_line_input;
556   u32 thread_index, inst_index;
557   u32 thread_present = 0, inst_present = 0;
558   clib_error_t *error = 0;
559   int ret;
560
561   /* Get a line of input. */
562   if (!unformat_user (input, unformat_line_input, line_input))
563     return 0;
564
565   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
566     {
567       if (unformat (line_input, "thread %u", &thread_index))
568         thread_present = 1;
569       else if (unformat (line_input, "resource %u", &inst_index))
570         inst_present = 1;
571       else
572         {
573           error = clib_error_return (0, "unknown input `%U'",
574                                      format_unformat_error, line_input);
575           return error;
576         }
577     }
578
579   if (!thread_present || !inst_present)
580     {
581       error = clib_error_return (0, "mandatory argument(s) missing");
582       return error;
583     }
584
585   if (thread_index == 0 && vlib_num_workers () > 0)
586     {
587       error =
588         clib_error_return (0, "assign crypto resource for master thread");
589       return error;
590     }
591
592   if (thread_index > vec_len (cmt->per_thread_data) ||
593       inst_index > vec_len (cmt->cryptodev_inst))
594     {
595       error = clib_error_return (0, "wrong thread id or resource id");
596       return error;
597     }
598
599   cet = cmt->per_thread_data + thread_index;
600   ret = cryptodev_assign_resource (cet, inst_index,
601                                    CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
602   if (ret)
603     {
604       error =
605         clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
606       return error;
607     }
608
609   return 0;
610 }
611
612 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
613     .path = "set cryptodev assignment",
614     .short_help = "set cryptodev assignment thread <thread_index> "
615         "resource <inst_index>",
616     .function = cryptodev_set_assignment_fn,
617 };
618
619 static u32
620 cryptodev_count_queue (u32 numa)
621 {
622   struct rte_cryptodev_info info;
623   u32 n_cryptodev = rte_cryptodev_count ();
624   u32 i, q_count = 0;
625
626   for (i = 0; i < n_cryptodev; i++)
627     {
628       rte_cryptodev_info_get (i, &info);
629       q_count += info.max_nb_queue_pairs;
630     }
631
632   return q_count;
633 }
634
635 static int
636 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
637 {
638   struct rte_cryptodev_config cfg;
639   struct rte_cryptodev_info info;
640   cryptodev_main_t *cmt = &cryptodev_main;
641   u32 i;
642   int ret;
643
644   rte_cryptodev_info_get (cryptodev_id, &info);
645
646   if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
647     return -1;
648
649   cfg.socket_id = info.device->numa_node;
650   cfg.nb_queue_pairs = info.max_nb_queue_pairs;
651
652   rte_cryptodev_configure (cryptodev_id, &cfg);
653
654   for (i = 0; i < info.max_nb_queue_pairs; i++)
655     {
656       struct rte_cryptodev_qp_conf qp_cfg;
657
658       qp_cfg.mp_session = 0;
659       qp_cfg.mp_session_private = 0;
660       qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
661
662       ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
663                                             info.device->numa_node);
664       if (ret)
665         {
666           clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
667                         cryptodev_id, i, ret);
668           break;
669         }
670     }
671
672   if (i != info.max_nb_queue_pairs)
673     return -1;
674
675   /* start the device */
676   rte_cryptodev_start (cryptodev_id);
677
678   for (i = 0; i < info.max_nb_queue_pairs; i++)
679     {
680       cryptodev_inst_t *cdev_inst;
681       vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
682       cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
683       cdev_inst->dev_id = cryptodev_id;
684       cdev_inst->q_id = i;
685
686       snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
687                 "%s_q%u", info.device->name, i);
688     }
689
690   return 0;
691 }
692
693 static int
694 cryptodev_cmp (void *v1, void *v2)
695 {
696   cryptodev_inst_t *a1 = v1;
697   cryptodev_inst_t *a2 = v2;
698
699   if (a1->q_id > a2->q_id)
700     return 1;
701   if (a1->q_id < a2->q_id)
702     return -1;
703   return 0;
704 }
705
706 static int
707 cryptodev_supports_param_value (u32 *params, u32 param_value)
708 {
709   u32 *value;
710   vec_foreach (value, params)
711     {
712       if (*value == param_value)
713         return 1;
714     }
715   return 0;
716 }
717
718 int
719 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
720                              u32 key_size, u32 digest_size, u32 aad_size)
721 {
722   cryptodev_main_t *cmt = &cryptodev_main;
723   cryptodev_capability_t *cap;
724   vec_foreach (cap, cmt->supported_caps)
725     {
726
727       if (cap->xform_type != idx->type)
728         continue;
729
730       if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
731           cap->auth.algo == idx->algo.auth &&
732           cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
733         return 1;
734
735       if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
736           cap->cipher.algo == idx->algo.cipher &&
737           cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
738         return 1;
739
740       if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
741           cap->aead.algo == idx->algo.aead &&
742           cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
743           cryptodev_supports_param_value (cap->aead.digest_sizes,
744                                           digest_size) &&
745           cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
746         return 1;
747     }
748   return 0;
749 }
750
751 static void
752 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
753                                u32 param_size_max, u32 increment)
754 {
755   u32 i = 0;
756   u32 cap_param_size;
757
758   while (i < vec_len (*param_sizes))
759     {
760       u32 found_param = 0;
761       for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
762            cap_param_size += increment)
763         {
764           if ((*param_sizes)[i] == cap_param_size)
765             {
766               found_param = 1;
767               break;
768             }
769           if (increment == 0)
770             break;
771         }
772       if (!found_param)
773         /* no such param_size in cap so delete  this size in temp_cap params */
774         vec_delete (*param_sizes, 1, i);
775       else
776         i++;
777     }
778 }
779
780 static void
781 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
782 {
783   cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
784
785   switch (temp_cap.xform_type)
786     {
787     case RTE_CRYPTO_SYM_XFORM_AUTH:
788       vec_free (temp_cap.auth.digest_sizes);
789       break;
790     case RTE_CRYPTO_SYM_XFORM_CIPHER:
791       vec_free (temp_cap.cipher.key_sizes);
792       break;
793     case RTE_CRYPTO_SYM_XFORM_AEAD:
794       vec_free (temp_cap.aead.key_sizes);
795       vec_free (temp_cap.aead.aad_sizes);
796       vec_free (temp_cap.aead.digest_sizes);
797       break;
798     default:
799       break;
800     }
801   vec_delete (*temp_caps, 1, temp_cap_id);
802 }
803
804 static u32
805 cryptodev_remove_unsupported_param_sizes (
806   cryptodev_capability_t *temp_cap,
807   const struct rte_cryptodev_capabilities *dev_caps)
808 {
809   u32 cap_found = 0;
810   const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
811
812   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
813     {
814       if (cap->sym.xform_type == temp_cap->xform_type)
815         switch (cap->sym.xform_type)
816           {
817           case RTE_CRYPTO_SYM_XFORM_CIPHER:
818             if (cap->sym.cipher.algo == temp_cap->cipher.algo)
819               {
820                 remove_unsupported_param_size (
821                   &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
822                   cap->sym.cipher.key_size.max,
823                   cap->sym.cipher.key_size.increment);
824                 if (vec_len (temp_cap->cipher.key_sizes) > 0)
825                   cap_found = 1;
826               }
827             break;
828           case RTE_CRYPTO_SYM_XFORM_AUTH:
829             if (cap->sym.auth.algo == temp_cap->auth.algo)
830               {
831                 remove_unsupported_param_size (
832                   &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
833                   cap->sym.auth.digest_size.max,
834                   cap->sym.auth.digest_size.increment);
835                 if (vec_len (temp_cap->auth.digest_sizes) > 0)
836                   cap_found = 1;
837               }
838             break;
839           case RTE_CRYPTO_SYM_XFORM_AEAD:
840             if (cap->sym.aead.algo == temp_cap->aead.algo)
841               {
842                 remove_unsupported_param_size (
843                   &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
844                   cap->sym.aead.key_size.max,
845                   cap->sym.aead.key_size.increment);
846                 remove_unsupported_param_size (
847                   &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
848                   cap->sym.aead.aad_size.max,
849                   cap->sym.aead.aad_size.increment);
850                 remove_unsupported_param_size (
851                   &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
852                   cap->sym.aead.digest_size.max,
853                   cap->sym.aead.digest_size.increment);
854                 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
855                     vec_len (temp_cap->aead.aad_sizes) > 0 &&
856                     vec_len (temp_cap->aead.digest_sizes) > 0)
857                   cap_found = 1;
858               }
859             break;
860           default:
861             break;
862           }
863       if (cap_found)
864         break;
865       cap++;
866     }
867
868   return cap_found;
869 }
870
871 static void
872 cryptodev_get_common_capabilities ()
873 {
874   cryptodev_main_t *cmt = &cryptodev_main;
875   cryptodev_inst_t *dev_inst;
876   struct rte_cryptodev_info dev_info;
877   u32 previous_dev_id, dev_id;
878   u32 cap_id = 0;
879   u32 param;
880   cryptodev_capability_t tmp_cap;
881   const struct rte_cryptodev_capabilities *cap;
882   const struct rte_cryptodev_capabilities *dev_caps;
883
884   if (vec_len (cmt->cryptodev_inst) == 0)
885     return;
886   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
887   rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
888   cap = &dev_info.capabilities[0];
889
890   /*init capabilities vector*/
891   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
892     {
893       if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
894         {
895           cap++;
896           continue;
897         }
898
899       tmp_cap.xform_type = cap->sym.xform_type;
900       switch (cap->sym.xform_type)
901         {
902         case RTE_CRYPTO_SYM_XFORM_CIPHER:
903           tmp_cap.cipher.key_sizes = 0;
904           tmp_cap.cipher.algo = cap->sym.cipher.algo;
905           for (param = cap->sym.cipher.key_size.min;
906                param <= cap->sym.cipher.key_size.max;
907                param += cap->sym.cipher.key_size.increment)
908             {
909               vec_add1 (tmp_cap.cipher.key_sizes, param);
910               if (cap->sym.cipher.key_size.increment == 0)
911                 break;
912             }
913           break;
914         case RTE_CRYPTO_SYM_XFORM_AUTH:
915           tmp_cap.auth.algo = cap->sym.auth.algo;
916           tmp_cap.auth.digest_sizes = 0;
917           for (param = cap->sym.auth.digest_size.min;
918                param <= cap->sym.auth.digest_size.max;
919                param += cap->sym.auth.digest_size.increment)
920             {
921               vec_add1 (tmp_cap.auth.digest_sizes, param);
922               if (cap->sym.auth.digest_size.increment == 0)
923                 break;
924             }
925           break;
926         case RTE_CRYPTO_SYM_XFORM_AEAD:
927           tmp_cap.aead.key_sizes = 0;
928           tmp_cap.aead.aad_sizes = 0;
929           tmp_cap.aead.digest_sizes = 0;
930           tmp_cap.aead.algo = cap->sym.aead.algo;
931           for (param = cap->sym.aead.key_size.min;
932                param <= cap->sym.aead.key_size.max;
933                param += cap->sym.aead.key_size.increment)
934             {
935               vec_add1 (tmp_cap.aead.key_sizes, param);
936               if (cap->sym.aead.key_size.increment == 0)
937                 break;
938             }
939           for (param = cap->sym.aead.aad_size.min;
940                param <= cap->sym.aead.aad_size.max;
941                param += cap->sym.aead.aad_size.increment)
942             {
943               vec_add1 (tmp_cap.aead.aad_sizes, param);
944               if (cap->sym.aead.aad_size.increment == 0)
945                 break;
946             }
947           for (param = cap->sym.aead.digest_size.min;
948                param <= cap->sym.aead.digest_size.max;
949                param += cap->sym.aead.digest_size.increment)
950             {
951               vec_add1 (tmp_cap.aead.digest_sizes, param);
952               if (cap->sym.aead.digest_size.increment == 0)
953                 break;
954             }
955           break;
956         default:
957           break;
958         }
959
960       vec_add1 (cmt->supported_caps, tmp_cap);
961       cap++;
962     }
963
964   while (cap_id < vec_len (cmt->supported_caps))
965     {
966       u32 cap_is_supported = 1;
967       previous_dev_id = cmt->cryptodev_inst->dev_id;
968
969       vec_foreach (dev_inst, cmt->cryptodev_inst)
970         {
971           dev_id = dev_inst->dev_id;
972           if (previous_dev_id != dev_id)
973             {
974               previous_dev_id = dev_id;
975               rte_cryptodev_info_get (dev_id, &dev_info);
976               dev_caps = &dev_info.capabilities[0];
977               cap_is_supported = cryptodev_remove_unsupported_param_sizes (
978                 &cmt->supported_caps[cap_id], dev_caps);
979               if (!cap_is_supported)
980                 {
981                   cryptodev_delete_cap (&cmt->supported_caps, cap_id);
982                   /*no need to check other devices as this one doesn't support
983                    * this temp_cap*/
984                   break;
985                 }
986             }
987         }
988       if (cap_is_supported)
989         cap_id++;
990     }
991 }
992
993 static int
994 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
995 {
996   cryptodev_main_t *cmt = &cryptodev_main;
997   u32 n_queues = cryptodev_count_queue (vm->numa_node);
998   u32 i;
999
1000   if (n_queues < n_workers)
1001     return -1;
1002
1003   for (i = 0; i < rte_cryptodev_count (); i++)
1004     cryptodev_configure (vm, i);
1005
1006   if (vec_len (cmt->cryptodev_inst) == 0)
1007     return -1;
1008   cryptodev_get_common_capabilities ();
1009   vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1010
1011   /* if there is not enough device stop cryptodev */
1012   if (vec_len (cmt->cryptodev_inst) < n_workers)
1013     return -1;
1014
1015   return 0;
1016 }
1017
1018 static void
1019 cryptodev_get_max_sz (u32 *max_sess_sz, u32 *max_dp_sz)
1020 {
1021   cryptodev_main_t *cmt = &cryptodev_main;
1022   cryptodev_inst_t *cinst;
1023   u32 max_sess = 0, max_dp = 0;
1024
1025   vec_foreach (cinst, cmt->cryptodev_inst)
1026     {
1027       u32 sess_sz = rte_cryptodev_sym_get_private_session_size (cinst->dev_id);
1028       u32 dp_sz = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
1029
1030       max_sess = clib_max (sess_sz, max_sess);
1031       max_dp = clib_max (dp_sz, max_dp);
1032     }
1033
1034   *max_sess_sz = max_sess;
1035   *max_dp_sz = max_dp;
1036 }
1037
1038 static void
1039 dpdk_disable_cryptodev_engine (vlib_main_t *vm)
1040 {
1041   vlib_thread_main_t *tm = vlib_get_thread_main ();
1042   cryptodev_main_t *cmt = &cryptodev_main;
1043   u32 i;
1044
1045   for (i = (vlib_num_workers () > 0); i < tm->n_vlib_mains; i++)
1046     {
1047       u32 numa = vlib_get_main_by_index (i)->numa_node;
1048       cryptodev_numa_data_t *numa_data;
1049
1050       vec_validate (cmt->per_numa_data, numa);
1051       numa_data = cmt->per_numa_data + numa;
1052       if (numa_data->sess_pool)
1053         rte_mempool_free (numa_data->sess_pool);
1054       if (numa_data->sess_priv_pool)
1055         rte_mempool_free (numa_data->sess_priv_pool);
1056     }
1057 }
1058
1059 clib_error_t *
1060 dpdk_cryptodev_init (vlib_main_t * vm)
1061 {
1062   cryptodev_main_t *cmt = &cryptodev_main;
1063   vlib_thread_main_t *tm = vlib_get_thread_main ();
1064   cryptodev_engine_thread_t *cet;
1065   cryptodev_numa_data_t *numa_data;
1066   struct rte_mempool *mp;
1067   u32 skip_master = vlib_num_workers () > 0;
1068   u32 n_workers = tm->n_vlib_mains - skip_master;
1069   u32 numa = vm->numa_node;
1070   u32 sess_sz, dp_sz;
1071   u32 eidx;
1072   u32 i;
1073   u8 *name = 0;
1074   clib_error_t *error;
1075
1076   cmt->iova_mode = rte_eal_iova_mode ();
1077
1078   vec_validate (cmt->per_numa_data, vm->numa_node);
1079
1080   /* probe all cryptodev devices and get queue info */
1081   if (cryptodev_probe (vm, n_workers) < 0)
1082     {
1083       error = clib_error_return (0, "Failed to configure cryptodev");
1084       goto err_handling;
1085     }
1086
1087   cryptodev_get_max_sz (&sess_sz, &dp_sz);
1088
1089   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1090   clib_spinlock_init (&cmt->tlock);
1091
1092   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1093                        CLIB_CACHE_LINE_BYTES);
1094   for (i = skip_master; i < tm->n_vlib_mains; i++)
1095     {
1096       cet = cmt->per_thread_data + i;
1097       numa = vlib_get_main_by_index (i)->numa_node;
1098
1099       vec_validate (cmt->per_numa_data, numa);
1100       numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1101
1102       if (!numa_data->sess_pool)
1103         {
1104           /* create session pool for the numa node */
1105           name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1106           mp = rte_cryptodev_sym_session_pool_create (
1107             (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
1108           if (!mp)
1109             {
1110               error =
1111                 clib_error_return (0, "Not enough memory for mp %s", name);
1112               goto err_handling;
1113             }
1114           vec_free (name);
1115
1116           numa_data->sess_pool = mp;
1117
1118           /* create session private pool for the numa node */
1119           name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1120           mp =
1121             rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz,
1122                                 0, 0, NULL, NULL, NULL, NULL, numa, 0);
1123           if (!mp)
1124             {
1125               error =
1126                 clib_error_return (0, "Not enough memory for mp %s", name);
1127               vec_free (name);
1128               goto err_handling;
1129             }
1130
1131           vec_free (name);
1132
1133           numa_data->sess_priv_pool = mp;
1134         }
1135
1136       cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
1137     }
1138
1139   /* register handler */
1140   eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1141                                       "DPDK Cryptodev Engine");
1142
1143   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1144
1145   if (cryptodev_register_raw_hdl)
1146     error = cryptodev_register_raw_hdl (vm, eidx);
1147   else
1148     error = cryptodev_register_cop_hdl (vm, eidx);
1149
1150   if (error)
1151     goto err_handling;
1152
1153   /* this engine is only enabled when cryptodev device(s) are presented in
1154    * startup.conf. Assume it is wanted to be used, turn on async mode here.
1155    */
1156   vnet_crypto_request_async_mode (1);
1157   ipsec_set_async_mode (1);
1158
1159   return 0;
1160
1161 err_handling:
1162   dpdk_disable_cryptodev_engine (vm);
1163
1164   return error;
1165 }