a310538f064373a7dd3c3ebdf9f4bb80c957a29c
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
34
35 #include "cryptodev.h"
36
37 #if CLIB_DEBUG > 0
38 #define always_inline static inline
39 #else
40 #define always_inline static inline __attribute__ ((__always_inline__))
41 #endif
42
43 cryptodev_main_t cryptodev_main;
44
45 static_always_inline int
46 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
47                     cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
48                     u32 aad_len)
49 {
50   struct rte_crypto_aead_xform *aead_xform = &xform->aead;
51   memset (xform, 0, sizeof (*xform));
52   xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
53   xform->next = 0;
54
55   if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
56       key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
57       key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
58     return -1;
59
60   aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
61   aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
62     RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
63   aead_xform->aad_length = aad_len;
64   aead_xform->digest_length = 16;
65   aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
66   aead_xform->iv.length = 12;
67   aead_xform->key.data = key->data;
68   aead_xform->key.length = vec_len (key->data);
69
70   return 0;
71 }
72
73 static_always_inline int
74 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
75                       cryptodev_op_type_t op_type,
76                       const vnet_crypto_key_t *key)
77 {
78   struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
79   vnet_crypto_key_t *key_cipher, *key_auth;
80   enum rte_crypto_cipher_algorithm cipher_algo = ~0;
81   enum rte_crypto_auth_algorithm auth_algo = ~0;
82   u32 digest_len = ~0;
83
84   key_cipher = vnet_crypto_get_key (key->index_crypto);
85   key_auth = vnet_crypto_get_key (key->index_integ);
86   if (!key_cipher || !key_auth)
87     return -1;
88
89   if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
90     {
91       xform_cipher = xforms;
92       xform_auth = xforms + 1;
93       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
94       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
95     }
96   else
97     {
98       xform_cipher = xforms + 1;
99       xform_auth = xforms;
100       xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
101       xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
102     }
103
104   xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
105   xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
106   xforms->next = xforms + 1;
107
108   switch (key->async_alg)
109     {
110 #define _(a, b, c, d, e)                                                      \
111   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
112     cipher_algo = RTE_CRYPTO_CIPHER_##b;                                      \
113     auth_algo = RTE_CRYPTO_AUTH_##d##_HMAC;                                   \
114     digest_len = e;                                                           \
115     break;
116
117       foreach_cryptodev_link_async_alg
118 #undef _
119     default:
120       return -1;
121     }
122
123   xform_cipher->cipher.algo = cipher_algo;
124   xform_cipher->cipher.key.data = key_cipher->data;
125   xform_cipher->cipher.key.length = vec_len (key_cipher->data);
126   xform_cipher->cipher.iv.length = 16;
127   xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
128
129   xform_auth->auth.algo = auth_algo;
130   xform_auth->auth.digest_length = digest_len;
131   xform_auth->auth.key.data = key_auth->data;
132   xform_auth->auth.key.length = vec_len (key_auth->data);
133
134   return 0;
135 }
136
137 static_always_inline void
138 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
139 {
140   u32 n_devs, i;
141
142   if (sess == NULL)
143     return;
144
145   n_devs = rte_cryptodev_count ();
146
147   for (i = 0; i < n_devs; i++)
148     rte_cryptodev_sym_session_clear (i, sess);
149
150   rte_cryptodev_sym_session_free (sess);
151 }
152
153 static int
154 check_cipher_support (enum rte_crypto_cipher_algorithm algo, u32 key_size)
155 {
156   cryptodev_main_t *cmt = &cryptodev_main;
157   cryptodev_capability_t *vcap;
158   u32 *s;
159
160   vec_foreach (vcap, cmt->supported_caps)
161     {
162       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
163         continue;
164       if (vcap->cipher.algo != algo)
165         continue;
166       vec_foreach (s, vcap->cipher.key_sizes)
167         if (*s == key_size)
168           return 1;
169     }
170
171   return 0;
172 }
173
174 static int
175 check_auth_support (enum rte_crypto_auth_algorithm algo, u32 digest_size)
176 {
177   cryptodev_main_t *cmt = &cryptodev_main;
178   cryptodev_capability_t *vcap;
179   u32 *s;
180
181   vec_foreach (vcap, cmt->supported_caps)
182     {
183       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
184         continue;
185       if (vcap->auth.algo != algo)
186         continue;
187       vec_foreach (s, vcap->auth.digest_sizes)
188         if (*s == digest_size)
189           return 1;
190     }
191
192   return 0;
193 }
194
195 static_always_inline int
196 check_aead_support (enum rte_crypto_aead_algorithm algo, u32 key_size,
197                     u32 digest_size, u32 aad_size)
198 {
199   cryptodev_main_t *cmt = &cryptodev_main;
200   cryptodev_capability_t *vcap;
201   u32 *s;
202   u32 key_match = 0, digest_match = 0, aad_match = 0;
203
204   vec_foreach (vcap, cmt->supported_caps)
205     {
206       if (vcap->xform_type != RTE_CRYPTO_SYM_XFORM_AEAD)
207         continue;
208       if (vcap->aead.algo != algo)
209         continue;
210       vec_foreach (s, vcap->aead.digest_sizes)
211         if (*s == digest_size)
212           {
213             digest_match = 1;
214             break;
215           }
216       vec_foreach (s, vcap->aead.key_sizes)
217         if (*s == key_size)
218           {
219             key_match = 1;
220             break;
221           }
222       vec_foreach (s, vcap->aead.aad_sizes)
223         if (*s == aad_size)
224           {
225             aad_match = 1;
226             break;
227           }
228     }
229
230   if (key_match == 1 && digest_match == 1 && aad_match == 1)
231     return 1;
232
233   return 0;
234 }
235
236 static_always_inline int
237 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
238 {
239   u32 matched = 0;
240
241   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
242     {
243       switch (key->async_alg)
244         {
245 #define _(a, b, c, d, e)                                                      \
246   case VNET_CRYPTO_ALG_##a##_##d##_TAG##e:                                    \
247     if (check_cipher_support (RTE_CRYPTO_CIPHER_##b, c) &&                    \
248         check_auth_support (RTE_CRYPTO_AUTH_##d##_HMAC, e))                   \
249       return 1;
250           foreach_cryptodev_link_async_alg
251 #undef _
252             default : return 0;
253         }
254       return 0;
255     }
256
257 #define _(a, b, c, d, e, f, g)                                                \
258   if (key->alg == VNET_CRYPTO_ALG_##a)                                        \
259     {                                                                         \
260       if (check_aead_support (RTE_CRYPTO_AEAD_##c, g, e, f))                  \
261         matched++;                                                            \
262     }
263   foreach_vnet_aead_crypto_conversion
264 #undef _
265
266     if (matched < 2) return 0;
267
268   return 1;
269 }
270
271 void
272 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
273                         vnet_crypto_key_index_t idx, u32 aad_len)
274 {
275   cryptodev_main_t *cmt = &cryptodev_main;
276   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
277   cryptodev_key_t *ckey = 0;
278   u32 i;
279
280   vec_validate (cmt->keys, idx);
281   ckey = vec_elt_at_index (cmt->keys, idx);
282
283   if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
284     {
285       if (idx >= vec_len (cmt->keys))
286         return;
287
288       vec_foreach_index (i, cmt->per_numa_data)
289         {
290           if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
291             {
292               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
293               cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
294
295               CLIB_MEMORY_STORE_BARRIER ();
296               ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
297               ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
298             }
299         }
300       return;
301     }
302
303   /* create key */
304
305   /* do not create session for unsupported alg */
306   if (cryptodev_check_supported_vnet_alg (key) == 0)
307     return;
308
309   vec_validate (ckey->keys, idx);
310   vec_foreach_index (i, ckey->keys)
311     vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
312 }
313
314 /*static*/ void
315 cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
316                        vnet_crypto_key_index_t idx)
317 {
318   cryptodev_sess_handler (vm, kop, idx, 8);
319 }
320
321 int
322 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
323                           u32 aad_len)
324 {
325   cryptodev_main_t *cmt = &cryptodev_main;
326   cryptodev_numa_data_t *numa_data;
327   cryptodev_inst_t *dev_inst;
328   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
329   struct rte_mempool *sess_pool, *sess_priv_pool;
330   cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
331   struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
332   struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
333   struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
334   u32 numa_node = vm->numa_node;
335   int ret;
336
337   numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
338   sess_pool = numa_data->sess_pool;
339   sess_priv_pool = numa_data->sess_priv_pool;
340
341   sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
342     rte_cryptodev_sym_session_create (sess_pool);
343   if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
344     {
345       ret = -1;
346       goto clear_key;
347     }
348
349   sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
350     rte_cryptodev_sym_session_create (sess_pool);
351   if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
352     {
353       ret = -1;
354       goto clear_key;
355     }
356
357   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
358     ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
359   else
360     ret =
361       prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
362   if (ret)
363     return 0;
364
365   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
366     prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
367   else
368     prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
369
370   vec_foreach (dev_inst, cmt->cryptodev_inst)
371     {
372       u32 dev_id = dev_inst->dev_id;
373       struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
374       u32 driver_id = cdev->driver_id;
375
376       /* if the session is already configured for the driver type, avoid
377          configuring it again to increase the session data's refcnt */
378       if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[driver_id].data &&
379           sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[driver_id].data)
380         continue;
381
382       ret = rte_cryptodev_sym_session_init (
383         dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
384         sess_priv_pool);
385       ret = rte_cryptodev_sym_session_init (
386         dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
387         sess_priv_pool);
388       if (ret < 0)
389         return ret;
390     }
391
392   sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
393   sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
394
395   CLIB_MEMORY_STORE_BARRIER ();
396   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
397     sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
398   ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
399     sessions[CRYPTODEV_OP_TYPE_DECRYPT];
400
401 clear_key:
402   if (ret != 0)
403     {
404       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
405       cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
406     }
407   return ret;
408 }
409
410 typedef enum
411 {
412   CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
413   CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
414 } cryptodev_resource_assign_op_t;
415
416 /**
417  *  assign a cryptodev resource to a worker.
418  *  @param cet: the worker thread data
419  *  @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
420  *  @param op: the assignment method.
421  *  @return: 0 if successfully, negative number otherwise.
422  **/
423 static_always_inline int
424 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
425                            u32 cryptodev_inst_index,
426                            cryptodev_resource_assign_op_t op)
427 {
428   cryptodev_main_t *cmt = &cryptodev_main;
429   cryptodev_inst_t *cinst = 0;
430   uword idx;
431
432   /* assign resource is only allowed when no inflight op is in the queue */
433   if (cet->inflight)
434     return -EBUSY;
435
436   switch (op)
437     {
438     case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
439       if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
440           vec_len (cmt->cryptodev_inst))
441         return -1;
442
443       clib_spinlock_lock (&cmt->tlock);
444       idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
445       clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
446       cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
447       cet->cryptodev_id = cinst->dev_id;
448       cet->cryptodev_q = cinst->q_id;
449       clib_spinlock_unlock (&cmt->tlock);
450       break;
451     case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
452       /* assigning a used cryptodev resource is not allowed */
453       if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
454           == 1)
455         return -EBUSY;
456
457       vec_foreach_index (idx, cmt->cryptodev_inst)
458       {
459         cinst = cmt->cryptodev_inst + idx;
460         if (cinst->dev_id == cet->cryptodev_id &&
461             cinst->q_id == cet->cryptodev_q)
462           break;
463       }
464       /* invalid existing worker resource assignment */
465       if (idx == vec_len (cmt->cryptodev_inst))
466         return -EINVAL;
467       clib_spinlock_lock (&cmt->tlock);
468       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
469       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
470                                 cryptodev_inst_index, 1);
471       cinst = cmt->cryptodev_inst + cryptodev_inst_index;
472       cet->cryptodev_id = cinst->dev_id;
473       cet->cryptodev_q = cinst->q_id;
474       clib_spinlock_unlock (&cmt->tlock);
475       break;
476     default:
477       return -EINVAL;
478     }
479   return 0;
480 }
481
482 static u8 *
483 format_cryptodev_inst (u8 * s, va_list * args)
484 {
485   cryptodev_main_t *cmt = &cryptodev_main;
486   u32 inst = va_arg (*args, u32);
487   cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
488   u32 thread_index = 0;
489   struct rte_cryptodev_info info;
490
491   rte_cryptodev_info_get (cit->dev_id, &info);
492   s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
493
494   vec_foreach_index (thread_index, cmt->per_thread_data)
495   {
496     cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
497     if (vlib_num_workers () > 0 && thread_index == 0)
498       continue;
499
500     if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
501       {
502         s = format (s, "%u (%v)\n", thread_index,
503                     vlib_worker_threads[thread_index].name);
504         break;
505       }
506   }
507
508   if (thread_index == vec_len (cmt->per_thread_data))
509     s = format (s, "%s\n", "free");
510
511   return s;
512 }
513
514 static clib_error_t *
515 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
516                               vlib_cli_command_t * cmd)
517 {
518   cryptodev_main_t *cmt = &cryptodev_main;
519   u32 inst;
520
521   vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
522                    "Assigned-to");
523   if (vec_len (cmt->cryptodev_inst) == 0)
524     {
525       vlib_cli_output (vm, "(nil)\n");
526       return 0;
527     }
528
529   vec_foreach_index (inst, cmt->cryptodev_inst)
530     vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
531
532   if (cmt->is_raw_api)
533     vlib_cli_output (vm, "Cryptodev Data Path API used: RAW Data Path API");
534   else
535     vlib_cli_output (vm, "Cryptodev Data Path API used: crypto operation API");
536   return 0;
537 }
538
539 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
540     .path = "show cryptodev assignment",
541     .short_help = "show cryptodev assignment",
542     .function = cryptodev_show_assignment_fn,
543 };
544
545 static clib_error_t *
546 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
547                              vlib_cli_command_t * cmd)
548 {
549   cryptodev_main_t *cmt = &cryptodev_main;
550   cryptodev_engine_thread_t *cet;
551   unformat_input_t _line_input, *line_input = &_line_input;
552   u32 thread_index, inst_index;
553   u32 thread_present = 0, inst_present = 0;
554   clib_error_t *error = 0;
555   int ret;
556
557   /* Get a line of input. */
558   if (!unformat_user (input, unformat_line_input, line_input))
559     return 0;
560
561   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
562     {
563       if (unformat (line_input, "thread %u", &thread_index))
564         thread_present = 1;
565       else if (unformat (line_input, "resource %u", &inst_index))
566         inst_present = 1;
567       else
568         {
569           error = clib_error_return (0, "unknown input `%U'",
570                                      format_unformat_error, line_input);
571           return error;
572         }
573     }
574
575   if (!thread_present || !inst_present)
576     {
577       error = clib_error_return (0, "mandatory argument(s) missing");
578       return error;
579     }
580
581   if (thread_index == 0 && vlib_num_workers () > 0)
582     {
583       error =
584         clib_error_return (0, "assign crypto resource for master thread");
585       return error;
586     }
587
588   if (thread_index > vec_len (cmt->per_thread_data) ||
589       inst_index > vec_len (cmt->cryptodev_inst))
590     {
591       error = clib_error_return (0, "wrong thread id or resource id");
592       return error;
593     }
594
595   cet = cmt->per_thread_data + thread_index;
596   ret = cryptodev_assign_resource (cet, inst_index,
597                                    CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
598   if (ret)
599     {
600       error =
601         clib_error_return (0, "cryptodev_assign_resource returned %d", ret);
602       return error;
603     }
604
605   return 0;
606 }
607
608 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
609     .path = "set cryptodev assignment",
610     .short_help = "set cryptodev assignment thread <thread_index> "
611         "resource <inst_index>",
612     .function = cryptodev_set_assignment_fn,
613 };
614
615 static u32
616 cryptodev_count_queue (u32 numa)
617 {
618   struct rte_cryptodev_info info;
619   u32 n_cryptodev = rte_cryptodev_count ();
620   u32 i, q_count = 0;
621
622   for (i = 0; i < n_cryptodev; i++)
623     {
624       rte_cryptodev_info_get (i, &info);
625       q_count += info.max_nb_queue_pairs;
626     }
627
628   return q_count;
629 }
630
631 static int
632 cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
633 {
634   struct rte_cryptodev_config cfg;
635   struct rte_cryptodev_info info;
636   cryptodev_main_t *cmt = &cryptodev_main;
637   u32 i;
638   int ret;
639
640   rte_cryptodev_info_get (cryptodev_id, &info);
641
642   if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
643     return -1;
644
645   cfg.socket_id = info.device->numa_node;
646   cfg.nb_queue_pairs = info.max_nb_queue_pairs;
647
648   rte_cryptodev_configure (cryptodev_id, &cfg);
649
650   for (i = 0; i < info.max_nb_queue_pairs; i++)
651     {
652       struct rte_cryptodev_qp_conf qp_cfg;
653
654       qp_cfg.mp_session = 0;
655       qp_cfg.mp_session_private = 0;
656       qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
657
658       ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
659                                             info.device->numa_node);
660       if (ret)
661         {
662           clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
663                         cryptodev_id, i, ret);
664           break;
665         }
666     }
667
668   if (i != info.max_nb_queue_pairs)
669     return -1;
670
671   /* start the device */
672   rte_cryptodev_start (cryptodev_id);
673
674   for (i = 0; i < info.max_nb_queue_pairs; i++)
675     {
676       cryptodev_inst_t *cdev_inst;
677       vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
678       cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
679       cdev_inst->dev_id = cryptodev_id;
680       cdev_inst->q_id = i;
681
682       snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
683                 "%s_q%u", info.device->name, i);
684     }
685
686   return 0;
687 }
688
689 static int
690 cryptodev_cmp (void *v1, void *v2)
691 {
692   cryptodev_inst_t *a1 = v1;
693   cryptodev_inst_t *a2 = v2;
694
695   if (a1->q_id > a2->q_id)
696     return 1;
697   if (a1->q_id < a2->q_id)
698     return -1;
699   return 0;
700 }
701
702 static int
703 cryptodev_supports_param_value (u32 *params, u32 param_value)
704 {
705   u32 *value;
706   vec_foreach (value, params)
707     {
708       if (*value == param_value)
709         return 1;
710     }
711   return 0;
712 }
713
714 int
715 cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
716                              u32 key_size, u32 digest_size, u32 aad_size)
717 {
718   cryptodev_main_t *cmt = &cryptodev_main;
719   cryptodev_capability_t *cap;
720   vec_foreach (cap, cmt->supported_caps)
721     {
722
723       if (cap->xform_type != idx->type)
724         continue;
725
726       if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
727           cap->auth.algo == idx->algo.auth &&
728           cryptodev_supports_param_value (cap->auth.digest_sizes, digest_size))
729         return 1;
730
731       if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
732           cap->cipher.algo == idx->algo.cipher &&
733           cryptodev_supports_param_value (cap->cipher.key_sizes, key_size))
734         return 1;
735
736       if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
737           cap->aead.algo == idx->algo.aead &&
738           cryptodev_supports_param_value (cap->aead.key_sizes, key_size) &&
739           cryptodev_supports_param_value (cap->aead.digest_sizes,
740                                           digest_size) &&
741           cryptodev_supports_param_value (cap->aead.aad_sizes, aad_size))
742         return 1;
743     }
744   return 0;
745 }
746
747 static void
748 remove_unsupported_param_size (u32 **param_sizes, u32 param_size_min,
749                                u32 param_size_max, u32 increment)
750 {
751   u32 i = 0;
752   u32 cap_param_size;
753
754   while (i < vec_len (*param_sizes))
755     {
756       u32 found_param = 0;
757       for (cap_param_size = param_size_min; cap_param_size <= param_size_max;
758            cap_param_size += increment)
759         {
760           if ((*param_sizes)[i] == cap_param_size)
761             {
762               found_param = 1;
763               break;
764             }
765           if (increment == 0)
766             break;
767         }
768       if (!found_param)
769         /* no such param_size in cap so delete  this size in temp_cap params */
770         vec_delete (*param_sizes, 1, i);
771       else
772         i++;
773     }
774 }
775
776 static void
777 cryptodev_delete_cap (cryptodev_capability_t **temp_caps, u32 temp_cap_id)
778 {
779   cryptodev_capability_t temp_cap = (*temp_caps)[temp_cap_id];
780
781   switch (temp_cap.xform_type)
782     {
783     case RTE_CRYPTO_SYM_XFORM_AUTH:
784       vec_free (temp_cap.auth.digest_sizes);
785       break;
786     case RTE_CRYPTO_SYM_XFORM_CIPHER:
787       vec_free (temp_cap.cipher.key_sizes);
788       break;
789     case RTE_CRYPTO_SYM_XFORM_AEAD:
790       vec_free (temp_cap.aead.key_sizes);
791       vec_free (temp_cap.aead.aad_sizes);
792       vec_free (temp_cap.aead.digest_sizes);
793       break;
794     default:
795       break;
796     }
797   vec_delete (*temp_caps, 1, temp_cap_id);
798 }
799
800 static u32
801 cryptodev_remove_unsupported_param_sizes (
802   cryptodev_capability_t *temp_cap,
803   const struct rte_cryptodev_capabilities *dev_caps)
804 {
805   u32 cap_found = 0;
806   const struct rte_cryptodev_capabilities *cap = &dev_caps[0];
807
808   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
809     {
810       if (cap->sym.xform_type == temp_cap->xform_type)
811         switch (cap->sym.xform_type)
812           {
813           case RTE_CRYPTO_SYM_XFORM_CIPHER:
814             if (cap->sym.cipher.algo == temp_cap->cipher.algo)
815               {
816                 remove_unsupported_param_size (
817                   &temp_cap->cipher.key_sizes, cap->sym.cipher.key_size.min,
818                   cap->sym.cipher.key_size.max,
819                   cap->sym.cipher.key_size.increment);
820                 if (vec_len (temp_cap->cipher.key_sizes) > 0)
821                   cap_found = 1;
822               }
823             break;
824           case RTE_CRYPTO_SYM_XFORM_AUTH:
825             if (cap->sym.auth.algo == temp_cap->auth.algo)
826               {
827                 remove_unsupported_param_size (
828                   &temp_cap->auth.digest_sizes, cap->sym.auth.digest_size.min,
829                   cap->sym.auth.digest_size.max,
830                   cap->sym.auth.digest_size.increment);
831                 if (vec_len (temp_cap->auth.digest_sizes) > 0)
832                   cap_found = 1;
833               }
834             break;
835           case RTE_CRYPTO_SYM_XFORM_AEAD:
836             if (cap->sym.aead.algo == temp_cap->aead.algo)
837               {
838                 remove_unsupported_param_size (
839                   &temp_cap->aead.key_sizes, cap->sym.aead.key_size.min,
840                   cap->sym.aead.key_size.max,
841                   cap->sym.aead.key_size.increment);
842                 remove_unsupported_param_size (
843                   &temp_cap->aead.aad_sizes, cap->sym.aead.aad_size.min,
844                   cap->sym.aead.aad_size.max,
845                   cap->sym.aead.aad_size.increment);
846                 remove_unsupported_param_size (
847                   &temp_cap->aead.digest_sizes, cap->sym.aead.digest_size.min,
848                   cap->sym.aead.digest_size.max,
849                   cap->sym.aead.digest_size.increment);
850                 if (vec_len (temp_cap->aead.key_sizes) > 0 &&
851                     vec_len (temp_cap->aead.aad_sizes) > 0 &&
852                     vec_len (temp_cap->aead.digest_sizes) > 0)
853                   cap_found = 1;
854               }
855             break;
856           default:
857             break;
858           }
859       if (cap_found)
860         break;
861       cap++;
862     }
863
864   return cap_found;
865 }
866
867 static void
868 cryptodev_get_common_capabilities ()
869 {
870   cryptodev_main_t *cmt = &cryptodev_main;
871   cryptodev_inst_t *dev_inst;
872   struct rte_cryptodev_info dev_info;
873   u32 previous_dev_id, dev_id;
874   u32 cap_id = 0;
875   u32 param;
876   cryptodev_capability_t tmp_cap;
877   const struct rte_cryptodev_capabilities *cap;
878   const struct rte_cryptodev_capabilities *dev_caps;
879
880   if (vec_len (cmt->cryptodev_inst) == 0)
881     return;
882   dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
883   rte_cryptodev_info_get (dev_inst->dev_id, &dev_info);
884   cap = &dev_info.capabilities[0];
885
886   /*init capabilities vector*/
887   while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
888     {
889       if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
890         {
891           cap++;
892           continue;
893         }
894
895       tmp_cap.xform_type = cap->sym.xform_type;
896       switch (cap->sym.xform_type)
897         {
898         case RTE_CRYPTO_SYM_XFORM_CIPHER:
899           tmp_cap.cipher.key_sizes = 0;
900           tmp_cap.cipher.algo = cap->sym.cipher.algo;
901           for (param = cap->sym.cipher.key_size.min;
902                param <= cap->sym.cipher.key_size.max;
903                param += cap->sym.cipher.key_size.increment)
904             {
905               vec_add1 (tmp_cap.cipher.key_sizes, param);
906               if (cap->sym.cipher.key_size.increment == 0)
907                 break;
908             }
909           break;
910         case RTE_CRYPTO_SYM_XFORM_AUTH:
911           tmp_cap.auth.algo = cap->sym.auth.algo;
912           tmp_cap.auth.digest_sizes = 0;
913           for (param = cap->sym.auth.digest_size.min;
914                param <= cap->sym.auth.digest_size.max;
915                param += cap->sym.auth.digest_size.increment)
916             {
917               vec_add1 (tmp_cap.auth.digest_sizes, param);
918               if (cap->sym.auth.digest_size.increment == 0)
919                 break;
920             }
921           break;
922         case RTE_CRYPTO_SYM_XFORM_AEAD:
923           tmp_cap.aead.key_sizes = 0;
924           tmp_cap.aead.aad_sizes = 0;
925           tmp_cap.aead.digest_sizes = 0;
926           tmp_cap.aead.algo = cap->sym.aead.algo;
927           for (param = cap->sym.aead.key_size.min;
928                param <= cap->sym.aead.key_size.max;
929                param += cap->sym.aead.key_size.increment)
930             {
931               vec_add1 (tmp_cap.aead.key_sizes, param);
932               if (cap->sym.aead.key_size.increment == 0)
933                 break;
934             }
935           for (param = cap->sym.aead.aad_size.min;
936                param <= cap->sym.aead.aad_size.max;
937                param += cap->sym.aead.aad_size.increment)
938             {
939               vec_add1 (tmp_cap.aead.aad_sizes, param);
940               if (cap->sym.aead.aad_size.increment == 0)
941                 break;
942             }
943           for (param = cap->sym.aead.digest_size.min;
944                param <= cap->sym.aead.digest_size.max;
945                param += cap->sym.aead.digest_size.increment)
946             {
947               vec_add1 (tmp_cap.aead.digest_sizes, param);
948               if (cap->sym.aead.digest_size.increment == 0)
949                 break;
950             }
951           break;
952         default:
953           break;
954         }
955
956       vec_add1 (cmt->supported_caps, tmp_cap);
957       cap++;
958     }
959
960   while (cap_id < vec_len (cmt->supported_caps))
961     {
962       u32 cap_is_supported = 1;
963       previous_dev_id = cmt->cryptodev_inst->dev_id;
964
965       vec_foreach (dev_inst, cmt->cryptodev_inst)
966         {
967           dev_id = dev_inst->dev_id;
968           if (previous_dev_id != dev_id)
969             {
970               previous_dev_id = dev_id;
971               rte_cryptodev_info_get (dev_id, &dev_info);
972               dev_caps = &dev_info.capabilities[0];
973               cap_is_supported = cryptodev_remove_unsupported_param_sizes (
974                 &cmt->supported_caps[cap_id], dev_caps);
975               if (!cap_is_supported)
976                 {
977                   cryptodev_delete_cap (&cmt->supported_caps, cap_id);
978                   /*no need to check other devices as this one doesn't support
979                    * this temp_cap*/
980                   break;
981                 }
982             }
983         }
984       if (cap_is_supported)
985         cap_id++;
986     }
987 }
988
989 static int
990 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
991 {
992   cryptodev_main_t *cmt = &cryptodev_main;
993   u32 n_queues = cryptodev_count_queue (vm->numa_node);
994   u32 i;
995
996   if (n_queues < n_workers)
997     return -1;
998
999   for (i = 0; i < rte_cryptodev_count (); i++)
1000     cryptodev_configure (vm, i);
1001
1002   if (vec_len (cmt->cryptodev_inst) == 0)
1003     return -1;
1004   cryptodev_get_common_capabilities ();
1005   vec_sort_with_function (cmt->cryptodev_inst, cryptodev_cmp);
1006
1007   /* if there is not enough device stop cryptodev */
1008   if (vec_len (cmt->cryptodev_inst) < n_workers)
1009     return -1;
1010
1011   return 0;
1012 }
1013
1014 static void
1015 cryptodev_get_max_sz (u32 *max_sess_sz, u32 *max_dp_sz)
1016 {
1017   cryptodev_main_t *cmt = &cryptodev_main;
1018   cryptodev_inst_t *cinst;
1019   u32 max_sess = 0, max_dp = 0;
1020
1021   vec_foreach (cinst, cmt->cryptodev_inst)
1022     {
1023       u32 sess_sz = rte_cryptodev_sym_get_private_session_size (cinst->dev_id);
1024       u32 dp_sz = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
1025
1026       max_sess = clib_max (sess_sz, max_sess);
1027       max_dp = clib_max (dp_sz, max_dp);
1028     }
1029
1030   *max_sess_sz = max_sess;
1031   *max_dp_sz = max_dp;
1032 }
1033
1034 static void
1035 dpdk_disable_cryptodev_engine (vlib_main_t *vm)
1036 {
1037   vlib_thread_main_t *tm = vlib_get_thread_main ();
1038   cryptodev_main_t *cmt = &cryptodev_main;
1039   u32 i;
1040
1041   for (i = (vlib_num_workers () > 0); i < tm->n_vlib_mains; i++)
1042     {
1043       u32 numa = vlib_get_main_by_index (i)->numa_node;
1044       cryptodev_numa_data_t *numa_data;
1045
1046       vec_validate (cmt->per_numa_data, numa);
1047       numa_data = cmt->per_numa_data + numa;
1048       if (numa_data->sess_pool)
1049         rte_mempool_free (numa_data->sess_pool);
1050       if (numa_data->sess_priv_pool)
1051         rte_mempool_free (numa_data->sess_priv_pool);
1052     }
1053 }
1054
1055 clib_error_t *
1056 dpdk_cryptodev_init (vlib_main_t * vm)
1057 {
1058   cryptodev_main_t *cmt = &cryptodev_main;
1059   vlib_thread_main_t *tm = vlib_get_thread_main ();
1060   cryptodev_engine_thread_t *cet;
1061   cryptodev_numa_data_t *numa_data;
1062   struct rte_mempool *mp;
1063   u32 skip_master = vlib_num_workers () > 0;
1064   u32 n_workers = tm->n_vlib_mains - skip_master;
1065   u32 numa = vm->numa_node;
1066   u32 sess_sz, dp_sz;
1067   u32 eidx;
1068   u32 i;
1069   u8 *name = 0;
1070   clib_error_t *error;
1071
1072   cmt->iova_mode = rte_eal_iova_mode ();
1073
1074   vec_validate (cmt->per_numa_data, vm->numa_node);
1075
1076   /* probe all cryptodev devices and get queue info */
1077   if (cryptodev_probe (vm, n_workers) < 0)
1078     {
1079       error = clib_error_return (0, "Failed to configure cryptodev");
1080       goto err_handling;
1081     }
1082
1083   cryptodev_get_max_sz (&sess_sz, &dp_sz);
1084
1085   clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1086   clib_spinlock_init (&cmt->tlock);
1087
1088   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1089                        CLIB_CACHE_LINE_BYTES);
1090   for (i = skip_master; i < tm->n_vlib_mains; i++)
1091     {
1092       cet = cmt->per_thread_data + i;
1093       numa = vlib_get_main_by_index (i)->numa_node;
1094
1095       vec_validate (cmt->per_numa_data, numa);
1096       numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1097
1098       if (!numa_data->sess_pool)
1099         {
1100           /* create session pool for the numa node */
1101           name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1102           mp = rte_cryptodev_sym_session_pool_create (
1103             (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
1104           if (!mp)
1105             {
1106               error =
1107                 clib_error_return (0, "Not enough memory for mp %s", name);
1108               goto err_handling;
1109             }
1110           vec_free (name);
1111
1112           numa_data->sess_pool = mp;
1113
1114           /* create session private pool for the numa node */
1115           name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1116           mp =
1117             rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz,
1118                                 0, 0, NULL, NULL, NULL, NULL, numa, 0);
1119           if (!mp)
1120             {
1121               error =
1122                 clib_error_return (0, "Not enough memory for mp %s", name);
1123               vec_free (name);
1124               goto err_handling;
1125             }
1126
1127           vec_free (name);
1128
1129           numa_data->sess_priv_pool = mp;
1130         }
1131
1132       cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
1133     }
1134
1135   /* register handler */
1136   eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1137                                       "DPDK Cryptodev Engine");
1138
1139   vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1140
1141   if (cryptodev_register_raw_hdl)
1142     error = cryptodev_register_raw_hdl (vm, eidx);
1143   else
1144     error = cryptodev_register_cop_hdl (vm, eidx);
1145
1146   if (error)
1147     goto err_handling;
1148
1149   /* this engine is only enabled when cryptodev device(s) are presented in
1150    * startup.conf. Assume it is wanted to be used, turn on async mode here.
1151    */
1152   vnet_crypto_request_async_mode (1);
1153   ipsec_set_async_mode (1);
1154
1155   return 0;
1156
1157 err_handling:
1158   dpdk_disable_cryptodev_engine (vm);
1159
1160   return error;
1161 }