New upstream version 18.08
[deb_dpdk.git] / lib / librte_compressdev / rte_compressdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <stdarg.h>
7 #include <stdio.h>
8 #include <inttypes.h>
9
10 #include <rte_malloc.h>
11 #include <rte_eal.h>
12 #include <rte_memzone.h>
13
14 #include "rte_compressdev.h"
15 #include "rte_compressdev_internal.h"
16 #include "rte_compressdev_pmd.h"
17
18 #define RTE_COMPRESSDEV_DETACHED  (0)
19 #define RTE_COMPRESSDEV_ATTACHED  (1)
20
21 struct rte_compressdev rte_comp_devices[RTE_COMPRESS_MAX_DEVS];
22
23 struct rte_compressdev *rte_compressdevs = &rte_comp_devices[0];
24
25 static struct rte_compressdev_global compressdev_globals = {
26                 .devs                   = &rte_comp_devices[0],
27                 .data                   = { NULL },
28                 .nb_devs                = 0,
29                 .max_devs               = RTE_COMPRESS_MAX_DEVS
30 };
31
32 struct rte_compressdev_global *rte_compressdev_globals = &compressdev_globals;
33
34 const struct rte_compressdev_capabilities * __rte_experimental
35 rte_compressdev_capability_get(uint8_t dev_id,
36                         enum rte_comp_algorithm algo)
37 {
38         const struct rte_compressdev_capabilities *capability;
39         struct rte_compressdev_info dev_info;
40         int i = 0;
41
42         if (dev_id >= compressdev_globals.nb_devs) {
43                 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
44                 return NULL;
45         }
46         rte_compressdev_info_get(dev_id, &dev_info);
47
48         while ((capability = &dev_info.capabilities[i++])->algo !=
49                         RTE_COMP_ALGO_UNSPECIFIED){
50                 if (capability->algo == algo)
51                         return capability;
52         }
53
54         return NULL;
55 }
56
57 const char * __rte_experimental
58 rte_compressdev_get_feature_name(uint64_t flag)
59 {
60         switch (flag) {
61         case RTE_COMPDEV_FF_HW_ACCELERATED:
62                 return "HW_ACCELERATED";
63         case RTE_COMPDEV_FF_CPU_SSE:
64                 return "CPU_SSE";
65         case RTE_COMPDEV_FF_CPU_AVX:
66                 return "CPU_AVX";
67         case RTE_COMPDEV_FF_CPU_AVX2:
68                 return "CPU_AVX2";
69         case RTE_COMPDEV_FF_CPU_AVX512:
70                 return "CPU_AVX512";
71         case RTE_COMPDEV_FF_CPU_NEON:
72                 return "CPU_NEON";
73         default:
74                 return NULL;
75         }
76 }
77
78 static struct rte_compressdev *
79 rte_compressdev_get_dev(uint8_t dev_id)
80 {
81         return &rte_compressdev_globals->devs[dev_id];
82 }
83
84 struct rte_compressdev * __rte_experimental
85 rte_compressdev_pmd_get_named_dev(const char *name)
86 {
87         struct rte_compressdev *dev;
88         unsigned int i;
89
90         if (name == NULL)
91                 return NULL;
92
93         for (i = 0; i < rte_compressdev_globals->max_devs; i++) {
94                 dev = &rte_compressdev_globals->devs[i];
95
96                 if ((dev->attached == RTE_COMPRESSDEV_ATTACHED) &&
97                                 (strcmp(dev->data->name, name) == 0))
98                         return dev;
99         }
100
101         return NULL;
102 }
103
104 static unsigned int
105 rte_compressdev_is_valid_dev(uint8_t dev_id)
106 {
107         struct rte_compressdev *dev = NULL;
108
109         if (dev_id >= rte_compressdev_globals->nb_devs)
110                 return 0;
111
112         dev = rte_compressdev_get_dev(dev_id);
113         if (dev->attached != RTE_COMPRESSDEV_ATTACHED)
114                 return 0;
115         else
116                 return 1;
117 }
118
119
120 int __rte_experimental
121 rte_compressdev_get_dev_id(const char *name)
122 {
123         unsigned int i;
124
125         if (name == NULL)
126                 return -1;
127
128         for (i = 0; i < rte_compressdev_globals->nb_devs; i++)
129                 if ((strcmp(rte_compressdev_globals->devs[i].data->name, name)
130                                 == 0) &&
131                                 (rte_compressdev_globals->devs[i].attached ==
132                                                 RTE_COMPRESSDEV_ATTACHED))
133                         return i;
134
135         return -1;
136 }
137
138 uint8_t __rte_experimental
139 rte_compressdev_count(void)
140 {
141         return rte_compressdev_globals->nb_devs;
142 }
143
144 uint8_t __rte_experimental
145 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
146         uint8_t nb_devices)
147 {
148         uint8_t i, count = 0;
149         struct rte_compressdev *devs = rte_compressdev_globals->devs;
150         uint8_t max_devs = rte_compressdev_globals->max_devs;
151
152         for (i = 0; i < max_devs && count < nb_devices; i++) {
153
154                 if (devs[i].attached == RTE_COMPRESSDEV_ATTACHED) {
155                         int cmp;
156
157                         cmp = strncmp(devs[i].device->driver->name,
158                                         driver_name,
159                                         strlen(driver_name));
160
161                         if (cmp == 0)
162                                 devices[count++] = devs[i].data->dev_id;
163                 }
164         }
165
166         return count;
167 }
168
169 int __rte_experimental
170 rte_compressdev_socket_id(uint8_t dev_id)
171 {
172         struct rte_compressdev *dev;
173
174         if (!rte_compressdev_is_valid_dev(dev_id))
175                 return -1;
176
177         dev = rte_compressdev_get_dev(dev_id);
178
179         return dev->data->socket_id;
180 }
181
182 static inline int
183 rte_compressdev_data_alloc(uint8_t dev_id, struct rte_compressdev_data **data,
184                 int socket_id)
185 {
186         char mz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
187         const struct rte_memzone *mz;
188         int n;
189
190         /* generate memzone name */
191         n = snprintf(mz_name, sizeof(mz_name),
192                         "rte_compressdev_data_%u", dev_id);
193         if (n >= (int)sizeof(mz_name))
194                 return -EINVAL;
195
196         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
197                 mz = rte_memzone_reserve(mz_name,
198                                 sizeof(struct rte_compressdev_data),
199                                 socket_id, 0);
200         } else
201                 mz = rte_memzone_lookup(mz_name);
202
203         if (mz == NULL)
204                 return -ENOMEM;
205
206         *data = mz->addr;
207         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
208                 memset(*data, 0, sizeof(struct rte_compressdev_data));
209
210         return 0;
211 }
212
213 static uint8_t
214 rte_compressdev_find_free_device_index(void)
215 {
216         uint8_t dev_id;
217
218         for (dev_id = 0; dev_id < RTE_COMPRESS_MAX_DEVS; dev_id++) {
219                 if (rte_comp_devices[dev_id].attached ==
220                                 RTE_COMPRESSDEV_DETACHED)
221                         return dev_id;
222         }
223         return RTE_COMPRESS_MAX_DEVS;
224 }
225
226 struct rte_compressdev * __rte_experimental
227 rte_compressdev_pmd_allocate(const char *name, int socket_id)
228 {
229         struct rte_compressdev *compressdev;
230         uint8_t dev_id;
231
232         if (rte_compressdev_pmd_get_named_dev(name) != NULL) {
233                 COMPRESSDEV_LOG(ERR,
234                         "comp device with name %s already allocated!", name);
235                 return NULL;
236         }
237
238         dev_id = rte_compressdev_find_free_device_index();
239         if (dev_id == RTE_COMPRESS_MAX_DEVS) {
240                 COMPRESSDEV_LOG(ERR, "Reached maximum number of comp devices");
241                 return NULL;
242         }
243         compressdev = rte_compressdev_get_dev(dev_id);
244
245         if (compressdev->data == NULL) {
246                 struct rte_compressdev_data *compressdev_data =
247                                 compressdev_globals.data[dev_id];
248
249                 int retval = rte_compressdev_data_alloc(dev_id,
250                                 &compressdev_data, socket_id);
251
252                 if (retval < 0 || compressdev_data == NULL)
253                         return NULL;
254
255                 compressdev->data = compressdev_data;
256
257                 snprintf(compressdev->data->name, RTE_COMPRESSDEV_NAME_MAX_LEN,
258                                 "%s", name);
259
260                 compressdev->data->dev_id = dev_id;
261                 compressdev->data->socket_id = socket_id;
262                 compressdev->data->dev_started = 0;
263
264                 compressdev->attached = RTE_COMPRESSDEV_ATTACHED;
265
266                 compressdev_globals.nb_devs++;
267         }
268
269         return compressdev;
270 }
271
272 int __rte_experimental
273 rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev)
274 {
275         int ret;
276
277         if (compressdev == NULL)
278                 return -EINVAL;
279
280         /* Close device only if device operations have been set */
281         if (compressdev->dev_ops) {
282                 ret = rte_compressdev_close(compressdev->data->dev_id);
283                 if (ret < 0)
284                         return ret;
285         }
286
287         compressdev->attached = RTE_COMPRESSDEV_DETACHED;
288         compressdev_globals.nb_devs--;
289         return 0;
290 }
291
292 uint16_t __rte_experimental
293 rte_compressdev_queue_pair_count(uint8_t dev_id)
294 {
295         struct rte_compressdev *dev;
296
297         dev = &rte_comp_devices[dev_id];
298         return dev->data->nb_queue_pairs;
299 }
300
301 static int
302 rte_compressdev_queue_pairs_config(struct rte_compressdev *dev,
303                 uint16_t nb_qpairs, int socket_id)
304 {
305         struct rte_compressdev_info dev_info;
306         void **qp;
307         unsigned int i;
308
309         if ((dev == NULL) || (nb_qpairs < 1)) {
310                 COMPRESSDEV_LOG(ERR, "invalid param: dev %p, nb_queues %u",
311                                                         dev, nb_qpairs);
312                 return -EINVAL;
313         }
314
315         COMPRESSDEV_LOG(DEBUG, "Setup %d queues pairs on device %u",
316                         nb_qpairs, dev->data->dev_id);
317
318         memset(&dev_info, 0, sizeof(struct rte_compressdev_info));
319
320         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
321         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
322
323         if ((dev_info.max_nb_queue_pairs != 0) &&
324                         (nb_qpairs > dev_info.max_nb_queue_pairs)) {
325                 COMPRESSDEV_LOG(ERR, "Invalid num queue_pairs (%u) for dev %u",
326                                 nb_qpairs, dev->data->dev_id);
327                 return -EINVAL;
328         }
329
330         if (dev->data->queue_pairs == NULL) { /* first time configuration */
331                 dev->data->queue_pairs = rte_zmalloc_socket(
332                                 "compressdev->queue_pairs",
333                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
334                                 RTE_CACHE_LINE_SIZE, socket_id);
335
336                 if (dev->data->queue_pairs == NULL) {
337                         dev->data->nb_queue_pairs = 0;
338                         COMPRESSDEV_LOG(ERR,
339                         "failed to get memory for qp meta data, nb_queues %u",
340                                                         nb_qpairs);
341                         return -(ENOMEM);
342                 }
343         } else { /* re-configure */
344                 int ret;
345                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
346
347                 qp = dev->data->queue_pairs;
348
349                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
350                                 -ENOTSUP);
351
352                 for (i = nb_qpairs; i < old_nb_queues; i++) {
353                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
354                         if (ret < 0)
355                                 return ret;
356                 }
357
358                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
359                                 RTE_CACHE_LINE_SIZE);
360                 if (qp == NULL) {
361                         COMPRESSDEV_LOG(ERR,
362                         "failed to realloc qp meta data, nb_queues %u",
363                                                 nb_qpairs);
364                         return -(ENOMEM);
365                 }
366
367                 if (nb_qpairs > old_nb_queues) {
368                         uint16_t new_qs = nb_qpairs - old_nb_queues;
369
370                         memset(qp + old_nb_queues, 0,
371                                 sizeof(qp[0]) * new_qs);
372                 }
373
374                 dev->data->queue_pairs = qp;
375
376         }
377         dev->data->nb_queue_pairs = nb_qpairs;
378         return 0;
379 }
380
381 static int
382 rte_compressdev_queue_pairs_release(struct rte_compressdev *dev)
383 {
384         uint16_t num_qps, i;
385         int ret;
386
387         if (dev == NULL) {
388                 COMPRESSDEV_LOG(ERR, "invalid param: dev %p", dev);
389                 return -EINVAL;
390         }
391
392         num_qps = dev->data->nb_queue_pairs;
393
394         if (num_qps == 0)
395                 return 0;
396
397         COMPRESSDEV_LOG(DEBUG, "Free %d queues pairs on device %u",
398                         dev->data->nb_queue_pairs, dev->data->dev_id);
399
400         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
401                                 -ENOTSUP);
402
403         for (i = 0; i < num_qps; i++) {
404                 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
405                 if (ret < 0)
406                         return ret;
407         }
408
409         if (dev->data->queue_pairs != NULL)
410                 rte_free(dev->data->queue_pairs);
411         dev->data->queue_pairs = NULL;
412         dev->data->nb_queue_pairs = 0;
413
414         return 0;
415 }
416
417 int __rte_experimental
418 rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
419 {
420         struct rte_compressdev *dev;
421         int diag;
422
423         if (!rte_compressdev_is_valid_dev(dev_id)) {
424                 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
425                 return -EINVAL;
426         }
427
428         dev = &rte_comp_devices[dev_id];
429
430         if (dev->data->dev_started) {
431                 COMPRESSDEV_LOG(ERR,
432                     "device %d must be stopped to allow configuration", dev_id);
433                 return -EBUSY;
434         }
435
436         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
437
438         /* Setup new number of queue pairs and reconfigure device. */
439         diag = rte_compressdev_queue_pairs_config(dev, config->nb_queue_pairs,
440                         config->socket_id);
441         if (diag != 0) {
442                 COMPRESSDEV_LOG(ERR,
443                         "dev%d rte_comp_dev_queue_pairs_config = %d",
444                                 dev_id, diag);
445                 return diag;
446         }
447
448         return (*dev->dev_ops->dev_configure)(dev, config);
449 }
450
451 int __rte_experimental
452 rte_compressdev_start(uint8_t dev_id)
453 {
454         struct rte_compressdev *dev;
455         int diag;
456
457         COMPRESSDEV_LOG(DEBUG, "Start dev_id=%" PRIu8, dev_id);
458
459         if (!rte_compressdev_is_valid_dev(dev_id)) {
460                 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
461                 return -EINVAL;
462         }
463
464         dev = &rte_comp_devices[dev_id];
465
466         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
467
468         if (dev->data->dev_started != 0) {
469                 COMPRESSDEV_LOG(ERR,
470                     "Device with dev_id=%" PRIu8 " already started", dev_id);
471                 return 0;
472         }
473
474         diag = (*dev->dev_ops->dev_start)(dev);
475         if (diag == 0)
476                 dev->data->dev_started = 1;
477         else
478                 return diag;
479
480         return 0;
481 }
482
483 void __rte_experimental
484 rte_compressdev_stop(uint8_t dev_id)
485 {
486         struct rte_compressdev *dev;
487
488         if (!rte_compressdev_is_valid_dev(dev_id)) {
489                 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
490                 return;
491         }
492
493         dev = &rte_comp_devices[dev_id];
494
495         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
496
497         if (dev->data->dev_started == 0) {
498                 COMPRESSDEV_LOG(ERR,
499                     "Device with dev_id=%" PRIu8 " already stopped", dev_id);
500                 return;
501         }
502
503         (*dev->dev_ops->dev_stop)(dev);
504         dev->data->dev_started = 0;
505 }
506
507 int __rte_experimental
508 rte_compressdev_close(uint8_t dev_id)
509 {
510         struct rte_compressdev *dev;
511         int retval;
512
513         if (!rte_compressdev_is_valid_dev(dev_id)) {
514                 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
515                 return -1;
516         }
517
518         dev = &rte_comp_devices[dev_id];
519
520         /* Device must be stopped before it can be closed */
521         if (dev->data->dev_started == 1) {
522                 COMPRESSDEV_LOG(ERR, "Device %u must be stopped before closing",
523                                 dev_id);
524                 return -EBUSY;
525         }
526
527         /* Free queue pairs memory */
528         retval = rte_compressdev_queue_pairs_release(dev);
529
530         if (retval < 0)
531                 return retval;
532
533         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
534         retval = (*dev->dev_ops->dev_close)(dev);
535
536         if (retval < 0)
537                 return retval;
538
539         return 0;
540 }
541
542 int __rte_experimental
543 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
544                 uint32_t max_inflight_ops, int socket_id)
545 {
546         struct rte_compressdev *dev;
547
548         if (!rte_compressdev_is_valid_dev(dev_id)) {
549                 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
550                 return -EINVAL;
551         }
552
553         dev = &rte_comp_devices[dev_id];
554         if (queue_pair_id >= dev->data->nb_queue_pairs) {
555                 COMPRESSDEV_LOG(ERR, "Invalid queue_pair_id=%d", queue_pair_id);
556                 return -EINVAL;
557         }
558
559         if (dev->data->dev_started) {
560                 COMPRESSDEV_LOG(ERR,
561                     "device %d must be stopped to allow configuration", dev_id);
562                 return -EBUSY;
563         }
564
565         if (max_inflight_ops == 0) {
566                 COMPRESSDEV_LOG(ERR,
567                         "Invalid maximum number of inflight operations");
568                 return -EINVAL;
569         }
570
571         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
572
573         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id,
574                         max_inflight_ops, socket_id);
575 }
576
577 uint16_t __rte_experimental
578 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
579                 struct rte_comp_op **ops, uint16_t nb_ops)
580 {
581         struct rte_compressdev *dev = &rte_compressdevs[dev_id];
582
583         nb_ops = (*dev->dequeue_burst)
584                         (dev->data->queue_pairs[qp_id], ops, nb_ops);
585
586         return nb_ops;
587 }
588
589 uint16_t __rte_experimental
590 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
591                 struct rte_comp_op **ops, uint16_t nb_ops)
592 {
593         struct rte_compressdev *dev = &rte_compressdevs[dev_id];
594
595         return (*dev->enqueue_burst)(
596                         dev->data->queue_pairs[qp_id], ops, nb_ops);
597 }
598
599 int __rte_experimental
600 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
601 {
602         struct rte_compressdev *dev;
603
604         if (!rte_compressdev_is_valid_dev(dev_id)) {
605                 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
606                 return -ENODEV;
607         }
608
609         if (stats == NULL) {
610                 COMPRESSDEV_LOG(ERR, "Invalid stats ptr");
611                 return -EINVAL;
612         }
613
614         dev = &rte_comp_devices[dev_id];
615         memset(stats, 0, sizeof(*stats));
616
617         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
618         (*dev->dev_ops->stats_get)(dev, stats);
619         return 0;
620 }
621
622 void __rte_experimental
623 rte_compressdev_stats_reset(uint8_t dev_id)
624 {
625         struct rte_compressdev *dev;
626
627         if (!rte_compressdev_is_valid_dev(dev_id)) {
628                 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
629                 return;
630         }
631
632         dev = &rte_comp_devices[dev_id];
633
634         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
635         (*dev->dev_ops->stats_reset)(dev);
636 }
637
638
639 void __rte_experimental
640 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
641 {
642         struct rte_compressdev *dev;
643
644         if (dev_id >= compressdev_globals.nb_devs) {
645                 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
646                 return;
647         }
648
649         dev = &rte_comp_devices[dev_id];
650
651         memset(dev_info, 0, sizeof(struct rte_compressdev_info));
652
653         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
654         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
655
656         dev_info->driver_name = dev->device->driver->name;
657 }
658
659 int __rte_experimental
660 rte_compressdev_private_xform_create(uint8_t dev_id,
661                 const struct rte_comp_xform *xform,
662                 void **priv_xform)
663 {
664         struct rte_compressdev *dev;
665         int ret;
666
667         dev = rte_compressdev_get_dev(dev_id);
668
669         if (xform == NULL || priv_xform == NULL || dev == NULL)
670                 return -EINVAL;
671
672         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_create, -ENOTSUP);
673         ret = (*dev->dev_ops->private_xform_create)(dev, xform, priv_xform);
674         if (ret < 0) {
675                 COMPRESSDEV_LOG(ERR,
676                         "dev_id %d failed to create private_xform: err=%d",
677                         dev_id, ret);
678                 return ret;
679         };
680
681         return 0;
682 }
683
684 int __rte_experimental
685 rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform)
686 {
687         struct rte_compressdev *dev;
688         int ret;
689
690         dev = rte_compressdev_get_dev(dev_id);
691
692         if (dev == NULL || priv_xform == NULL)
693                 return -EINVAL;
694
695         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_free, -ENOTSUP);
696         ret = dev->dev_ops->private_xform_free(dev, priv_xform);
697         if (ret < 0) {
698                 COMPRESSDEV_LOG(ERR,
699                         "dev_id %d failed to free private xform: err=%d",
700                         dev_id, ret);
701                 return ret;
702         };
703
704         return 0;
705 }
706
707 int __rte_experimental
708 rte_compressdev_stream_create(uint8_t dev_id,
709                 const struct rte_comp_xform *xform,
710                 void **stream)
711 {
712         struct rte_compressdev *dev;
713         int ret;
714
715         dev = rte_compressdev_get_dev(dev_id);
716
717         if (xform == NULL || dev == NULL || stream == NULL)
718                 return -EINVAL;
719
720         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_create, -ENOTSUP);
721         ret = (*dev->dev_ops->stream_create)(dev, xform, stream);
722         if (ret < 0) {
723                 COMPRESSDEV_LOG(ERR,
724                         "dev_id %d failed to create stream: err=%d",
725                         dev_id, ret);
726                 return ret;
727         };
728
729         return 0;
730 }
731
732
733 int __rte_experimental
734 rte_compressdev_stream_free(uint8_t dev_id, void *stream)
735 {
736         struct rte_compressdev *dev;
737         int ret;
738
739         dev = rte_compressdev_get_dev(dev_id);
740
741         if (dev == NULL || stream == NULL)
742                 return -EINVAL;
743
744         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_free, -ENOTSUP);
745         ret = dev->dev_ops->stream_free(dev, stream);
746         if (ret < 0) {
747                 COMPRESSDEV_LOG(ERR,
748                         "dev_id %d failed to free stream: err=%d",
749                         dev_id, ret);
750                 return ret;
751         };
752
753         return 0;
754 }
755
756 const char * __rte_experimental
757 rte_compressdev_name_get(uint8_t dev_id)
758 {
759         struct rte_compressdev *dev = rte_compressdev_get_dev(dev_id);
760
761         if (dev == NULL)
762                 return NULL;
763
764         return dev->data->name;
765 }
766
767 RTE_INIT(rte_compressdev_log)
768 {
769         compressdev_logtype = rte_log_register("lib.compressdev");
770         if (compressdev_logtype >= 0)
771                 rte_log_set_level(compressdev_logtype, RTE_LOG_NOTICE);
772 }