New upstream version 18.08
[deb_dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43
44 static uint8_t nb_drivers;
45
46 struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
47
48 struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
49
50 static struct rte_cryptodev_global cryptodev_globals = {
51                 .devs                   = &rte_crypto_devices[0],
52                 .data                   = { NULL },
53                 .nb_devs                = 0,
54                 .max_devs               = RTE_CRYPTO_MAX_DEVS
55 };
56
57 struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals;
58
59 /* spinlock for crypto device callbacks */
60 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
61
62
63 /**
64  * The user application callback description.
65  *
66  * It contains callback address to be registered by user application,
67  * the pointer to the parameters for callback, and the event type.
68  */
69 struct rte_cryptodev_callback {
70         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
71         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
72         void *cb_arg;                           /**< Parameter for callback */
73         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
74         uint32_t active;                        /**< Callback is executing */
75 };
76
77 /**
78  * The crypto cipher algorithm strings identifiers.
79  * It could be used in application command line.
80  */
81 const char *
82 rte_crypto_cipher_algorithm_strings[] = {
83         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
84         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
85         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
86
87         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
88         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
89         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
90         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
91         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
92         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
93
94         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
95
96         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
97         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
98
99         [RTE_CRYPTO_CIPHER_NULL]        = "null",
100
101         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
102         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
103         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
104 };
105
106 /**
107  * The crypto cipher operation strings identifiers.
108  * It could be used in application command line.
109  */
110 const char *
111 rte_crypto_cipher_operation_strings[] = {
112                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
113                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
114 };
115
116 /**
117  * The crypto auth algorithm strings identifiers.
118  * It could be used in application command line.
119  */
120 const char *
121 rte_crypto_auth_algorithm_strings[] = {
122         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
123         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
124         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
125         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
126
127         [RTE_CRYPTO_AUTH_MD5]           = "md5",
128         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
129
130         [RTE_CRYPTO_AUTH_NULL]          = "null",
131
132         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
133         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
134
135         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
136         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
137         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
138         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
139         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
140         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
141         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
142         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
143
144         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
145         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
146         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
147 };
148
149 /**
150  * The crypto AEAD algorithm strings identifiers.
151  * It could be used in application command line.
152  */
153 const char *
154 rte_crypto_aead_algorithm_strings[] = {
155         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
156         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
157 };
158
159 /**
160  * The crypto AEAD operation strings identifiers.
161  * It could be used in application command line.
162  */
163 const char *
164 rte_crypto_aead_operation_strings[] = {
165         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
166         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
167 };
168
169 /**
170  * Asymmetric crypto transform operation strings identifiers.
171  */
172 const char *rte_crypto_asym_xform_strings[] = {
173         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
174         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
175         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
176         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
177         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
178         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
179 };
180
181 /**
182  * Asymmetric crypto operation strings identifiers.
183  */
184 const char *rte_crypto_asym_op_strings[] = {
185         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
186         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
187         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
188         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
189         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
190         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
191         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
192 };
193
194 int
195 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
196                 const char *algo_string)
197 {
198         unsigned int i;
199
200         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
201                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
202                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
203                         return 0;
204                 }
205         }
206
207         /* Invalid string */
208         return -1;
209 }
210
211 int
212 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
213                 const char *algo_string)
214 {
215         unsigned int i;
216
217         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
218                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
219                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
220                         return 0;
221                 }
222         }
223
224         /* Invalid string */
225         return -1;
226 }
227
228 int
229 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
230                 const char *algo_string)
231 {
232         unsigned int i;
233
234         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
235                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
236                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
237                         return 0;
238                 }
239         }
240
241         /* Invalid string */
242         return -1;
243 }
244
245 int __rte_experimental
246 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
247                 const char *xform_string)
248 {
249         unsigned int i;
250
251         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
252                 if (strcmp(xform_string,
253                         rte_crypto_asym_xform_strings[i]) == 0) {
254                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
255                         return 0;
256                 }
257         }
258
259         /* Invalid string */
260         return -1;
261 }
262
263 /**
264  * The crypto auth operation strings identifiers.
265  * It could be used in application command line.
266  */
267 const char *
268 rte_crypto_auth_operation_strings[] = {
269                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
270                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
271 };
272
273 const struct rte_cryptodev_symmetric_capability *
274 rte_cryptodev_sym_capability_get(uint8_t dev_id,
275                 const struct rte_cryptodev_sym_capability_idx *idx)
276 {
277         const struct rte_cryptodev_capabilities *capability;
278         struct rte_cryptodev_info dev_info;
279         int i = 0;
280
281         rte_cryptodev_info_get(dev_id, &dev_info);
282
283         while ((capability = &dev_info.capabilities[i++])->op !=
284                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
285                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
286                         continue;
287
288                 if (capability->sym.xform_type != idx->type)
289                         continue;
290
291                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
292                         capability->sym.auth.algo == idx->algo.auth)
293                         return &capability->sym;
294
295                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
296                         capability->sym.cipher.algo == idx->algo.cipher)
297                         return &capability->sym;
298
299                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
300                                 capability->sym.aead.algo == idx->algo.aead)
301                         return &capability->sym;
302         }
303
304         return NULL;
305
306 }
307
308 static int
309 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
310 {
311         unsigned int next_size;
312
313         /* Check lower/upper bounds */
314         if (size < range->min)
315                 return -1;
316
317         if (size > range->max)
318                 return -1;
319
320         /* If range is actually only one value, size is correct */
321         if (range->increment == 0)
322                 return 0;
323
324         /* Check if value is one of the supported sizes */
325         for (next_size = range->min; next_size <= range->max;
326                         next_size += range->increment)
327                 if (size == next_size)
328                         return 0;
329
330         return -1;
331 }
332
333 const struct rte_cryptodev_asymmetric_xform_capability * __rte_experimental
334 rte_cryptodev_asym_capability_get(uint8_t dev_id,
335                 const struct rte_cryptodev_asym_capability_idx *idx)
336 {
337         const struct rte_cryptodev_capabilities *capability;
338         struct rte_cryptodev_info dev_info;
339         unsigned int i = 0;
340
341         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
342         rte_cryptodev_info_get(dev_id, &dev_info);
343
344         while ((capability = &dev_info.capabilities[i++])->op !=
345                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
346                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
347                         continue;
348
349                 if (capability->asym.xform_capa.xform_type == idx->type)
350                         return &capability->asym.xform_capa;
351         }
352         return NULL;
353 };
354
355 int
356 rte_cryptodev_sym_capability_check_cipher(
357                 const struct rte_cryptodev_symmetric_capability *capability,
358                 uint16_t key_size, uint16_t iv_size)
359 {
360         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
361                 return -1;
362
363         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
364                 return -1;
365
366         return 0;
367 }
368
369 int
370 rte_cryptodev_sym_capability_check_auth(
371                 const struct rte_cryptodev_symmetric_capability *capability,
372                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
373 {
374         if (param_range_check(key_size, &capability->auth.key_size) != 0)
375                 return -1;
376
377         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
378                 return -1;
379
380         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
381                 return -1;
382
383         return 0;
384 }
385
386 int
387 rte_cryptodev_sym_capability_check_aead(
388                 const struct rte_cryptodev_symmetric_capability *capability,
389                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
390                 uint16_t iv_size)
391 {
392         if (param_range_check(key_size, &capability->aead.key_size) != 0)
393                 return -1;
394
395         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
396                 return -1;
397
398         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
399                 return -1;
400
401         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
402                 return -1;
403
404         return 0;
405 }
406 int __rte_experimental
407 rte_cryptodev_asym_xform_capability_check_optype(
408         const struct rte_cryptodev_asymmetric_xform_capability *capability,
409         enum rte_crypto_asym_op_type op_type)
410 {
411         if (capability->op_types & (1 << op_type))
412                 return 1;
413
414         return 0;
415 }
416
417 int __rte_experimental
418 rte_cryptodev_asym_xform_capability_check_modlen(
419         const struct rte_cryptodev_asymmetric_xform_capability *capability,
420         uint16_t modlen)
421 {
422         /* no need to check for limits, if min or max = 0 */
423         if (capability->modlen.min != 0) {
424                 if (modlen < capability->modlen.min)
425                         return -1;
426         }
427
428         if (capability->modlen.max != 0) {
429                 if (modlen > capability->modlen.max)
430                         return -1;
431         }
432
433         /* in any case, check if given modlen is module increment */
434         if (capability->modlen.increment != 0) {
435                 if (modlen % (capability->modlen.increment))
436                         return -1;
437         }
438
439         return 0;
440 }
441
442
443 const char *
444 rte_cryptodev_get_feature_name(uint64_t flag)
445 {
446         switch (flag) {
447         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
448                 return "SYMMETRIC_CRYPTO";
449         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
450                 return "ASYMMETRIC_CRYPTO";
451         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
452                 return "SYM_OPERATION_CHAINING";
453         case RTE_CRYPTODEV_FF_CPU_SSE:
454                 return "CPU_SSE";
455         case RTE_CRYPTODEV_FF_CPU_AVX:
456                 return "CPU_AVX";
457         case RTE_CRYPTODEV_FF_CPU_AVX2:
458                 return "CPU_AVX2";
459         case RTE_CRYPTODEV_FF_CPU_AVX512:
460                 return "CPU_AVX512";
461         case RTE_CRYPTODEV_FF_CPU_AESNI:
462                 return "CPU_AESNI";
463         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
464                 return "HW_ACCELERATED";
465         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
466                 return "IN_PLACE_SGL";
467         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
468                 return "OOP_SGL_IN_SGL_OUT";
469         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
470                 return "OOP_SGL_IN_LB_OUT";
471         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
472                 return "OOP_LB_IN_SGL_OUT";
473         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
474                 return "OOP_LB_IN_LB_OUT";
475         case RTE_CRYPTODEV_FF_CPU_NEON:
476                 return "CPU_NEON";
477         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
478                 return "CPU_ARM_CE";
479         case RTE_CRYPTODEV_FF_SECURITY:
480                 return "SECURITY_PROTOCOL";
481         default:
482                 return NULL;
483         }
484 }
485
486 struct rte_cryptodev *
487 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
488 {
489         return &rte_cryptodev_globals->devs[dev_id];
490 }
491
492 struct rte_cryptodev *
493 rte_cryptodev_pmd_get_named_dev(const char *name)
494 {
495         struct rte_cryptodev *dev;
496         unsigned int i;
497
498         if (name == NULL)
499                 return NULL;
500
501         for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
502                 dev = &rte_cryptodev_globals->devs[i];
503
504                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
505                                 (strcmp(dev->data->name, name) == 0))
506                         return dev;
507         }
508
509         return NULL;
510 }
511
512 unsigned int
513 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
514 {
515         struct rte_cryptodev *dev = NULL;
516
517         if (dev_id >= rte_cryptodev_globals->nb_devs)
518                 return 0;
519
520         dev = rte_cryptodev_pmd_get_dev(dev_id);
521         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
522                 return 0;
523         else
524                 return 1;
525 }
526
527
528 int
529 rte_cryptodev_get_dev_id(const char *name)
530 {
531         unsigned i;
532
533         if (name == NULL)
534                 return -1;
535
536         for (i = 0; i < rte_cryptodev_globals->nb_devs; i++)
537                 if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name)
538                                 == 0) &&
539                                 (rte_cryptodev_globals->devs[i].attached ==
540                                                 RTE_CRYPTODEV_ATTACHED))
541                         return i;
542
543         return -1;
544 }
545
546 uint8_t
547 rte_cryptodev_count(void)
548 {
549         return rte_cryptodev_globals->nb_devs;
550 }
551
552 uint8_t
553 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
554 {
555         uint8_t i, dev_count = 0;
556
557         for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
558                 if (rte_cryptodev_globals->devs[i].driver_id == driver_id &&
559                         rte_cryptodev_globals->devs[i].attached ==
560                                         RTE_CRYPTODEV_ATTACHED)
561                         dev_count++;
562
563         return dev_count;
564 }
565
566 uint8_t
567 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
568         uint8_t nb_devices)
569 {
570         uint8_t i, count = 0;
571         struct rte_cryptodev *devs = rte_cryptodev_globals->devs;
572         uint8_t max_devs = rte_cryptodev_globals->max_devs;
573
574         for (i = 0; i < max_devs && count < nb_devices; i++) {
575
576                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
577                         int cmp;
578
579                         cmp = strncmp(devs[i].device->driver->name,
580                                         driver_name,
581                                         strlen(driver_name));
582
583                         if (cmp == 0)
584                                 devices[count++] = devs[i].data->dev_id;
585                 }
586         }
587
588         return count;
589 }
590
591 void *
592 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
593 {
594         if (rte_crypto_devices[dev_id].feature_flags &
595                         RTE_CRYPTODEV_FF_SECURITY)
596                 return rte_crypto_devices[dev_id].security_ctx;
597
598         return NULL;
599 }
600
601 int
602 rte_cryptodev_socket_id(uint8_t dev_id)
603 {
604         struct rte_cryptodev *dev;
605
606         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
607                 return -1;
608
609         dev = rte_cryptodev_pmd_get_dev(dev_id);
610
611         return dev->data->socket_id;
612 }
613
614 static inline int
615 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
616                 int socket_id)
617 {
618         char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
619         const struct rte_memzone *mz;
620         int n;
621
622         /* generate memzone name */
623         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
624         if (n >= (int)sizeof(mz_name))
625                 return -EINVAL;
626
627         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
628                 mz = rte_memzone_reserve(mz_name,
629                                 sizeof(struct rte_cryptodev_data),
630                                 socket_id, 0);
631         } else
632                 mz = rte_memzone_lookup(mz_name);
633
634         if (mz == NULL)
635                 return -ENOMEM;
636
637         *data = mz->addr;
638         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
639                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
640
641         return 0;
642 }
643
644 static uint8_t
645 rte_cryptodev_find_free_device_index(void)
646 {
647         uint8_t dev_id;
648
649         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
650                 if (rte_crypto_devices[dev_id].attached ==
651                                 RTE_CRYPTODEV_DETACHED)
652                         return dev_id;
653         }
654         return RTE_CRYPTO_MAX_DEVS;
655 }
656
657 struct rte_cryptodev *
658 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
659 {
660         struct rte_cryptodev *cryptodev;
661         uint8_t dev_id;
662
663         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
664                 CDEV_LOG_ERR("Crypto device with name %s already "
665                                 "allocated!", name);
666                 return NULL;
667         }
668
669         dev_id = rte_cryptodev_find_free_device_index();
670         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
671                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
672                 return NULL;
673         }
674
675         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
676
677         if (cryptodev->data == NULL) {
678                 struct rte_cryptodev_data *cryptodev_data =
679                                 cryptodev_globals.data[dev_id];
680
681                 int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
682                                 socket_id);
683
684                 if (retval < 0 || cryptodev_data == NULL)
685                         return NULL;
686
687                 cryptodev->data = cryptodev_data;
688
689                 snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
690                                 "%s", name);
691
692                 cryptodev->data->dev_id = dev_id;
693                 cryptodev->data->socket_id = socket_id;
694                 cryptodev->data->dev_started = 0;
695
696                 /* init user callbacks */
697                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
698
699                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
700
701                 cryptodev_globals.nb_devs++;
702         }
703
704         return cryptodev;
705 }
706
707 int
708 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
709 {
710         int ret;
711
712         if (cryptodev == NULL)
713                 return -EINVAL;
714
715         /* Close device only if device operations have been set */
716         if (cryptodev->dev_ops) {
717                 ret = rte_cryptodev_close(cryptodev->data->dev_id);
718                 if (ret < 0)
719                         return ret;
720         }
721
722         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
723         cryptodev_globals.nb_devs--;
724         return 0;
725 }
726
727 uint16_t
728 rte_cryptodev_queue_pair_count(uint8_t dev_id)
729 {
730         struct rte_cryptodev *dev;
731
732         dev = &rte_crypto_devices[dev_id];
733         return dev->data->nb_queue_pairs;
734 }
735
736 static int
737 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
738                 int socket_id)
739 {
740         struct rte_cryptodev_info dev_info;
741         void **qp;
742         unsigned i;
743
744         if ((dev == NULL) || (nb_qpairs < 1)) {
745                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
746                                                         dev, nb_qpairs);
747                 return -EINVAL;
748         }
749
750         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
751                         nb_qpairs, dev->data->dev_id);
752
753         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
754
755         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
756         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
757
758         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
759                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
760                                 nb_qpairs, dev->data->dev_id);
761             return -EINVAL;
762         }
763
764         if (dev->data->queue_pairs == NULL) { /* first time configuration */
765                 dev->data->queue_pairs = rte_zmalloc_socket(
766                                 "cryptodev->queue_pairs",
767                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
768                                 RTE_CACHE_LINE_SIZE, socket_id);
769
770                 if (dev->data->queue_pairs == NULL) {
771                         dev->data->nb_queue_pairs = 0;
772                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
773                                                         "nb_queues %u",
774                                                         nb_qpairs);
775                         return -(ENOMEM);
776                 }
777         } else { /* re-configure */
778                 int ret;
779                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
780
781                 qp = dev->data->queue_pairs;
782
783                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
784                                 -ENOTSUP);
785
786                 for (i = nb_qpairs; i < old_nb_queues; i++) {
787                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
788                         if (ret < 0)
789                                 return ret;
790                 }
791
792                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
793                                 RTE_CACHE_LINE_SIZE);
794                 if (qp == NULL) {
795                         CDEV_LOG_ERR("failed to realloc qp meta data,"
796                                                 " nb_queues %u", nb_qpairs);
797                         return -(ENOMEM);
798                 }
799
800                 if (nb_qpairs > old_nb_queues) {
801                         uint16_t new_qs = nb_qpairs - old_nb_queues;
802
803                         memset(qp + old_nb_queues, 0,
804                                 sizeof(qp[0]) * new_qs);
805                 }
806
807                 dev->data->queue_pairs = qp;
808
809         }
810         dev->data->nb_queue_pairs = nb_qpairs;
811         return 0;
812 }
813
814 int
815 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
816 {
817         struct rte_cryptodev *dev;
818         int diag;
819
820         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
821                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
822                 return -EINVAL;
823         }
824
825         dev = &rte_crypto_devices[dev_id];
826
827         if (dev->data->dev_started) {
828                 CDEV_LOG_ERR(
829                     "device %d must be stopped to allow configuration", dev_id);
830                 return -EBUSY;
831         }
832
833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
834
835         /* Setup new number of queue pairs and reconfigure device. */
836         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
837                         config->socket_id);
838         if (diag != 0) {
839                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
840                                 dev_id, diag);
841                 return diag;
842         }
843
844         return (*dev->dev_ops->dev_configure)(dev, config);
845 }
846
847
848 int
849 rte_cryptodev_start(uint8_t dev_id)
850 {
851         struct rte_cryptodev *dev;
852         int diag;
853
854         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
855
856         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
857                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
858                 return -EINVAL;
859         }
860
861         dev = &rte_crypto_devices[dev_id];
862
863         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
864
865         if (dev->data->dev_started != 0) {
866                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
867                         dev_id);
868                 return 0;
869         }
870
871         diag = (*dev->dev_ops->dev_start)(dev);
872         if (diag == 0)
873                 dev->data->dev_started = 1;
874         else
875                 return diag;
876
877         return 0;
878 }
879
880 void
881 rte_cryptodev_stop(uint8_t dev_id)
882 {
883         struct rte_cryptodev *dev;
884
885         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
886                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
887                 return;
888         }
889
890         dev = &rte_crypto_devices[dev_id];
891
892         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
893
894         if (dev->data->dev_started == 0) {
895                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
896                         dev_id);
897                 return;
898         }
899
900         (*dev->dev_ops->dev_stop)(dev);
901         dev->data->dev_started = 0;
902 }
903
904 int
905 rte_cryptodev_close(uint8_t dev_id)
906 {
907         struct rte_cryptodev *dev;
908         int retval;
909
910         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
911                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
912                 return -1;
913         }
914
915         dev = &rte_crypto_devices[dev_id];
916
917         /* Device must be stopped before it can be closed */
918         if (dev->data->dev_started == 1) {
919                 CDEV_LOG_ERR("Device %u must be stopped before closing",
920                                 dev_id);
921                 return -EBUSY;
922         }
923
924         /* We can't close the device if there are outstanding sessions in use */
925         if (dev->data->session_pool != NULL) {
926                 if (!rte_mempool_full(dev->data->session_pool)) {
927                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
928                                         "has sessions still in use, free "
929                                         "all sessions before calling close",
930                                         (unsigned)dev_id);
931                         return -EBUSY;
932                 }
933         }
934
935         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
936         retval = (*dev->dev_ops->dev_close)(dev);
937
938         if (retval < 0)
939                 return retval;
940
941         return 0;
942 }
943
944 int
945 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
946                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
947                 struct rte_mempool *session_pool)
948
949 {
950         struct rte_cryptodev *dev;
951
952         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
953                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
954                 return -EINVAL;
955         }
956
957         dev = &rte_crypto_devices[dev_id];
958         if (queue_pair_id >= dev->data->nb_queue_pairs) {
959                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
960                 return -EINVAL;
961         }
962
963         if (dev->data->dev_started) {
964                 CDEV_LOG_ERR(
965                     "device %d must be stopped to allow configuration", dev_id);
966                 return -EBUSY;
967         }
968
969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
970
971         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
972                         socket_id, session_pool);
973 }
974
975
976 int
977 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
978 {
979         struct rte_cryptodev *dev;
980
981         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
982                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
983                 return -ENODEV;
984         }
985
986         if (stats == NULL) {
987                 CDEV_LOG_ERR("Invalid stats ptr");
988                 return -EINVAL;
989         }
990
991         dev = &rte_crypto_devices[dev_id];
992         memset(stats, 0, sizeof(*stats));
993
994         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
995         (*dev->dev_ops->stats_get)(dev, stats);
996         return 0;
997 }
998
999 void
1000 rte_cryptodev_stats_reset(uint8_t dev_id)
1001 {
1002         struct rte_cryptodev *dev;
1003
1004         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1005                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1006                 return;
1007         }
1008
1009         dev = &rte_crypto_devices[dev_id];
1010
1011         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1012         (*dev->dev_ops->stats_reset)(dev);
1013 }
1014
1015
1016 void
1017 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1018 {
1019         struct rte_cryptodev *dev;
1020
1021         if (dev_id >= cryptodev_globals.nb_devs) {
1022                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1023                 return;
1024         }
1025
1026         dev = &rte_crypto_devices[dev_id];
1027
1028         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1029
1030         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1031         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1032
1033         dev_info->driver_name = dev->device->driver->name;
1034         dev_info->device = dev->device;
1035 }
1036
1037
1038 int
1039 rte_cryptodev_callback_register(uint8_t dev_id,
1040                         enum rte_cryptodev_event_type event,
1041                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1042 {
1043         struct rte_cryptodev *dev;
1044         struct rte_cryptodev_callback *user_cb;
1045
1046         if (!cb_fn)
1047                 return -EINVAL;
1048
1049         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1050                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1051                 return -EINVAL;
1052         }
1053
1054         dev = &rte_crypto_devices[dev_id];
1055         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1056
1057         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1058                 if (user_cb->cb_fn == cb_fn &&
1059                         user_cb->cb_arg == cb_arg &&
1060                         user_cb->event == event) {
1061                         break;
1062                 }
1063         }
1064
1065         /* create a new callback. */
1066         if (user_cb == NULL) {
1067                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1068                                 sizeof(struct rte_cryptodev_callback), 0);
1069                 if (user_cb != NULL) {
1070                         user_cb->cb_fn = cb_fn;
1071                         user_cb->cb_arg = cb_arg;
1072                         user_cb->event = event;
1073                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1074                 }
1075         }
1076
1077         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1078         return (user_cb == NULL) ? -ENOMEM : 0;
1079 }
1080
1081 int
1082 rte_cryptodev_callback_unregister(uint8_t dev_id,
1083                         enum rte_cryptodev_event_type event,
1084                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1085 {
1086         int ret;
1087         struct rte_cryptodev *dev;
1088         struct rte_cryptodev_callback *cb, *next;
1089
1090         if (!cb_fn)
1091                 return -EINVAL;
1092
1093         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1094                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1095                 return -EINVAL;
1096         }
1097
1098         dev = &rte_crypto_devices[dev_id];
1099         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1100
1101         ret = 0;
1102         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1103
1104                 next = TAILQ_NEXT(cb, next);
1105
1106                 if (cb->cb_fn != cb_fn || cb->event != event ||
1107                                 (cb->cb_arg != (void *)-1 &&
1108                                 cb->cb_arg != cb_arg))
1109                         continue;
1110
1111                 /*
1112                  * if this callback is not executing right now,
1113                  * then remove it.
1114                  */
1115                 if (cb->active == 0) {
1116                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1117                         rte_free(cb);
1118                 } else {
1119                         ret = -EAGAIN;
1120                 }
1121         }
1122
1123         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1124         return ret;
1125 }
1126
1127 void
1128 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1129         enum rte_cryptodev_event_type event)
1130 {
1131         struct rte_cryptodev_callback *cb_lst;
1132         struct rte_cryptodev_callback dev_cb;
1133
1134         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1135         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1136                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1137                         continue;
1138                 dev_cb = *cb_lst;
1139                 cb_lst->active = 1;
1140                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1141                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1142                                                 dev_cb.cb_arg);
1143                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1144                 cb_lst->active = 0;
1145         }
1146         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1147 }
1148
1149
1150 int
1151 rte_cryptodev_sym_session_init(uint8_t dev_id,
1152                 struct rte_cryptodev_sym_session *sess,
1153                 struct rte_crypto_sym_xform *xforms,
1154                 struct rte_mempool *mp)
1155 {
1156         struct rte_cryptodev *dev;
1157         uint8_t index;
1158         int ret;
1159
1160         dev = rte_cryptodev_pmd_get_dev(dev_id);
1161
1162         if (sess == NULL || xforms == NULL || dev == NULL)
1163                 return -EINVAL;
1164
1165         index = dev->driver_id;
1166
1167         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1168
1169         if (sess->sess_private_data[index] == NULL) {
1170                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1171                                                         sess, mp);
1172                 if (ret < 0) {
1173                         CDEV_LOG_ERR(
1174                                 "dev_id %d failed to configure session details",
1175                                 dev_id);
1176                         return ret;
1177                 }
1178         }
1179
1180         return 0;
1181 }
1182
1183 int __rte_experimental
1184 rte_cryptodev_asym_session_init(uint8_t dev_id,
1185                 struct rte_cryptodev_asym_session *sess,
1186                 struct rte_crypto_asym_xform *xforms,
1187                 struct rte_mempool *mp)
1188 {
1189         struct rte_cryptodev *dev;
1190         uint8_t index;
1191         int ret;
1192
1193         dev = rte_cryptodev_pmd_get_dev(dev_id);
1194
1195         if (sess == NULL || xforms == NULL || dev == NULL)
1196                 return -EINVAL;
1197
1198         index = dev->driver_id;
1199
1200         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1201                                 -ENOTSUP);
1202
1203         if (sess->sess_private_data[index] == NULL) {
1204                 ret = dev->dev_ops->asym_session_configure(dev,
1205                                                         xforms,
1206                                                         sess, mp);
1207                 if (ret < 0) {
1208                         CDEV_LOG_ERR(
1209                                 "dev_id %d failed to configure session details",
1210                                 dev_id);
1211                         return ret;
1212                 }
1213         }
1214
1215         return 0;
1216 }
1217
1218 struct rte_cryptodev_sym_session *
1219 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1220 {
1221         struct rte_cryptodev_sym_session *sess;
1222
1223         /* Allocate a session structure from the session pool */
1224         if (rte_mempool_get(mp, (void **)&sess)) {
1225                 CDEV_LOG_ERR("couldn't get object from session mempool");
1226                 return NULL;
1227         }
1228
1229         /* Clear device session pointer.
1230          * Include the flag indicating presence of user data
1231          */
1232         memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1233
1234         return sess;
1235 }
1236
1237 struct rte_cryptodev_asym_session * __rte_experimental
1238 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1239 {
1240         struct rte_cryptodev_asym_session *sess;
1241
1242         /* Allocate a session structure from the session pool */
1243         if (rte_mempool_get(mp, (void **)&sess)) {
1244                 CDEV_LOG_ERR("couldn't get object from session mempool");
1245                 return NULL;
1246         }
1247
1248         /* Clear device session pointer.
1249          * Include the flag indicating presence of private data
1250          */
1251         memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1252
1253         return sess;
1254 }
1255
1256 int
1257 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1258                 struct rte_cryptodev_sym_session *sess)
1259 {
1260         struct rte_cryptodev *dev;
1261
1262         dev = rte_cryptodev_pmd_get_dev(dev_id);
1263
1264         if (dev == NULL || sess == NULL)
1265                 return -EINVAL;
1266
1267         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1268
1269         dev->dev_ops->sym_session_clear(dev, sess);
1270
1271         return 0;
1272 }
1273
1274 int __rte_experimental
1275 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1276                 struct rte_cryptodev_asym_session *sess)
1277 {
1278         struct rte_cryptodev *dev;
1279
1280         dev = rte_cryptodev_pmd_get_dev(dev_id);
1281
1282         if (dev == NULL || sess == NULL)
1283                 return -EINVAL;
1284
1285         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1286
1287         dev->dev_ops->asym_session_clear(dev, sess);
1288
1289         return 0;
1290 }
1291
1292 int
1293 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1294 {
1295         uint8_t i;
1296         void *sess_priv;
1297         struct rte_mempool *sess_mp;
1298
1299         if (sess == NULL)
1300                 return -EINVAL;
1301
1302         /* Check that all device private data has been freed */
1303         for (i = 0; i < nb_drivers; i++) {
1304                 sess_priv = get_sym_session_private_data(sess, i);
1305                 if (sess_priv != NULL)
1306                         return -EBUSY;
1307         }
1308
1309         /* Return session to mempool */
1310         sess_mp = rte_mempool_from_obj(sess);
1311         rte_mempool_put(sess_mp, sess);
1312
1313         return 0;
1314 }
1315
1316 int __rte_experimental
1317 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1318 {
1319         uint8_t i;
1320         void *sess_priv;
1321         struct rte_mempool *sess_mp;
1322
1323         if (sess == NULL)
1324                 return -EINVAL;
1325
1326         /* Check that all device private data has been freed */
1327         for (i = 0; i < nb_drivers; i++) {
1328                 sess_priv = get_asym_session_private_data(sess, i);
1329                 if (sess_priv != NULL)
1330                         return -EBUSY;
1331         }
1332
1333         /* Return session to mempool */
1334         sess_mp = rte_mempool_from_obj(sess);
1335         rte_mempool_put(sess_mp, sess);
1336
1337         return 0;
1338 }
1339
1340
1341 unsigned int
1342 rte_cryptodev_sym_get_header_session_size(void)
1343 {
1344         /*
1345          * Header contains pointers to the private data
1346          * of all registered drivers, and a flag which
1347          * indicates presence of user data
1348          */
1349         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1350 }
1351
1352 unsigned int __rte_experimental
1353 rte_cryptodev_asym_get_header_session_size(void)
1354 {
1355         /*
1356          * Header contains pointers to the private data
1357          * of all registered drivers, and a flag which
1358          * indicates presence of private data
1359          */
1360         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1361 }
1362
1363 unsigned int
1364 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1365 {
1366         struct rte_cryptodev *dev;
1367         unsigned int header_size = sizeof(void *) * nb_drivers;
1368         unsigned int priv_sess_size;
1369
1370         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1371                 return 0;
1372
1373         dev = rte_cryptodev_pmd_get_dev(dev_id);
1374
1375         if (*dev->dev_ops->sym_session_get_size == NULL)
1376                 return 0;
1377
1378         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1379
1380         /*
1381          * If size is less than session header size,
1382          * return the latter, as this guarantees that
1383          * sessionless operations will work
1384          */
1385         if (priv_sess_size < header_size)
1386                 return header_size;
1387
1388         return priv_sess_size;
1389
1390 }
1391
1392 unsigned int __rte_experimental
1393 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1394 {
1395         struct rte_cryptodev *dev;
1396         unsigned int header_size = sizeof(void *) * nb_drivers;
1397         unsigned int priv_sess_size;
1398
1399         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1400                 return 0;
1401
1402         dev = rte_cryptodev_pmd_get_dev(dev_id);
1403
1404         if (*dev->dev_ops->asym_session_get_size == NULL)
1405                 return 0;
1406
1407         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1408         if (priv_sess_size < header_size)
1409                 return header_size;
1410
1411         return priv_sess_size;
1412
1413 }
1414
1415 int __rte_experimental
1416 rte_cryptodev_sym_session_set_user_data(
1417                                         struct rte_cryptodev_sym_session *sess,
1418                                         void *data,
1419                                         uint16_t size)
1420 {
1421         uint16_t off_set = sizeof(void *) * nb_drivers;
1422         uint8_t *user_data_present = (uint8_t *)sess + off_set;
1423
1424         if (sess == NULL)
1425                 return -EINVAL;
1426
1427         *user_data_present = 1;
1428         off_set += sizeof(uint8_t);
1429         rte_memcpy((uint8_t *)sess + off_set, data, size);
1430         return 0;
1431 }
1432
1433 void * __rte_experimental
1434 rte_cryptodev_sym_session_get_user_data(
1435                                         struct rte_cryptodev_sym_session *sess)
1436 {
1437         uint16_t off_set = sizeof(void *) * nb_drivers;
1438         uint8_t *user_data_present = (uint8_t *)sess + off_set;
1439
1440         if (sess == NULL || !*user_data_present)
1441                 return NULL;
1442
1443         off_set += sizeof(uint8_t);
1444         return (uint8_t *)sess + off_set;
1445 }
1446
1447 /** Initialise rte_crypto_op mempool element */
1448 static void
1449 rte_crypto_op_init(struct rte_mempool *mempool,
1450                 void *opaque_arg,
1451                 void *_op_data,
1452                 __rte_unused unsigned i)
1453 {
1454         struct rte_crypto_op *op = _op_data;
1455         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1456
1457         memset(_op_data, 0, mempool->elt_size);
1458
1459         __rte_crypto_op_reset(op, type);
1460
1461         op->phys_addr = rte_mem_virt2iova(_op_data);
1462         op->mempool = mempool;
1463 }
1464
1465
1466 struct rte_mempool *
1467 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1468                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1469                 int socket_id)
1470 {
1471         struct rte_crypto_op_pool_private *priv;
1472
1473         unsigned elt_size = sizeof(struct rte_crypto_op) +
1474                         priv_size;
1475
1476         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1477                 elt_size += sizeof(struct rte_crypto_sym_op);
1478         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1479                 elt_size += sizeof(struct rte_crypto_asym_op);
1480         } else {
1481                 CDEV_LOG_ERR("Invalid op_type\n");
1482                 return NULL;
1483         }
1484
1485         /* lookup mempool in case already allocated */
1486         struct rte_mempool *mp = rte_mempool_lookup(name);
1487
1488         if (mp != NULL) {
1489                 priv = (struct rte_crypto_op_pool_private *)
1490                                 rte_mempool_get_priv(mp);
1491
1492                 if (mp->elt_size != elt_size ||
1493                                 mp->cache_size < cache_size ||
1494                                 mp->size < nb_elts ||
1495                                 priv->priv_size <  priv_size) {
1496                         mp = NULL;
1497                         CDEV_LOG_ERR("Mempool %s already exists but with "
1498                                         "incompatible parameters", name);
1499                         return NULL;
1500                 }
1501                 return mp;
1502         }
1503
1504         mp = rte_mempool_create(
1505                         name,
1506                         nb_elts,
1507                         elt_size,
1508                         cache_size,
1509                         sizeof(struct rte_crypto_op_pool_private),
1510                         NULL,
1511                         NULL,
1512                         rte_crypto_op_init,
1513                         &type,
1514                         socket_id,
1515                         0);
1516
1517         if (mp == NULL) {
1518                 CDEV_LOG_ERR("Failed to create mempool %s", name);
1519                 return NULL;
1520         }
1521
1522         priv = (struct rte_crypto_op_pool_private *)
1523                         rte_mempool_get_priv(mp);
1524
1525         priv->priv_size = priv_size;
1526         priv->type = type;
1527
1528         return mp;
1529 }
1530
1531 int
1532 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1533 {
1534         struct rte_cryptodev *dev = NULL;
1535         uint32_t i = 0;
1536
1537         if (name == NULL)
1538                 return -EINVAL;
1539
1540         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1541                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1542                                 "%s_%u", dev_name_prefix, i);
1543
1544                 if (ret < 0)
1545                         return ret;
1546
1547                 dev = rte_cryptodev_pmd_get_named_dev(name);
1548                 if (!dev)
1549                         return 0;
1550         }
1551
1552         return -1;
1553 }
1554
1555 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1556
1557 static struct cryptodev_driver_list cryptodev_driver_list =
1558         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1559
1560 int
1561 rte_cryptodev_driver_id_get(const char *name)
1562 {
1563         struct cryptodev_driver *driver;
1564         const char *driver_name;
1565
1566         if (name == NULL) {
1567                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1568                 return -1;
1569         }
1570
1571         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1572                 driver_name = driver->driver->name;
1573                 if (strncmp(driver_name, name, strlen(driver_name)) == 0)
1574                         return driver->id;
1575         }
1576         return -1;
1577 }
1578
1579 const char *
1580 rte_cryptodev_name_get(uint8_t dev_id)
1581 {
1582         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1583
1584         if (dev == NULL)
1585                 return NULL;
1586
1587         return dev->data->name;
1588 }
1589
1590 const char *
1591 rte_cryptodev_driver_name_get(uint8_t driver_id)
1592 {
1593         struct cryptodev_driver *driver;
1594
1595         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1596                 if (driver->id == driver_id)
1597                         return driver->driver->name;
1598         return NULL;
1599 }
1600
1601 uint8_t
1602 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1603                 const struct rte_driver *drv)
1604 {
1605         crypto_drv->driver = drv;
1606         crypto_drv->id = nb_drivers;
1607
1608         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1609
1610         return nb_drivers++;
1611 }