New upstream version 18.11-rc2
[deb_dpdk.git] / test / test / test_compressdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
11 #include <rte_mbuf.h>
12 #include <rte_compressdev.h>
13 #include <rte_string_fns.h>
14
15 #include "test_compressdev_test_buffer.h"
16 #include "test.h"
17
18 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
19
20 #define DEFAULT_WINDOW_SIZE 15
21 #define DEFAULT_MEM_LEVEL 8
22 #define MAX_DEQD_RETRIES 10
23 #define DEQUEUE_WAIT_TIME 10000
24
25 /*
26  * 30% extra size for compressed data compared to original data,
27  * in case data size cannot be reduced and it is actually bigger
28  * due to the compress block headers
29  */
30 #define COMPRESS_BUF_SIZE_RATIO 1.3
31 #define NUM_LARGE_MBUFS 16
32 #define SMALL_SEG_SIZE 256
33 #define MAX_SEGS 16
34 #define NUM_OPS 16
35 #define NUM_MAX_XFORMS 16
36 #define NUM_MAX_INFLIGHT_OPS 128
37 #define CACHE_SIZE 0
38
39 const char *
40 huffman_type_strings[] = {
41         [RTE_COMP_HUFFMAN_DEFAULT]      = "PMD default",
42         [RTE_COMP_HUFFMAN_FIXED]        = "Fixed",
43         [RTE_COMP_HUFFMAN_DYNAMIC]      = "Dynamic"
44 };
45
46 enum zlib_direction {
47         ZLIB_NONE,
48         ZLIB_COMPRESS,
49         ZLIB_DECOMPRESS,
50         ZLIB_ALL
51 };
52
53 struct priv_op_data {
54         uint16_t orig_idx;
55 };
56
57 struct comp_testsuite_params {
58         struct rte_mempool *large_mbuf_pool;
59         struct rte_mempool *small_mbuf_pool;
60         struct rte_mempool *op_pool;
61         struct rte_comp_xform *def_comp_xform;
62         struct rte_comp_xform *def_decomp_xform;
63 };
64
65 static struct comp_testsuite_params testsuite_params = { 0 };
66
67 static void
68 testsuite_teardown(void)
69 {
70         struct comp_testsuite_params *ts_params = &testsuite_params;
71
72         rte_mempool_free(ts_params->large_mbuf_pool);
73         rte_mempool_free(ts_params->small_mbuf_pool);
74         rte_mempool_free(ts_params->op_pool);
75         rte_free(ts_params->def_comp_xform);
76         rte_free(ts_params->def_decomp_xform);
77 }
78
79 static int
80 testsuite_setup(void)
81 {
82         struct comp_testsuite_params *ts_params = &testsuite_params;
83         uint32_t max_buf_size = 0;
84         unsigned int i;
85
86         if (rte_compressdev_count() == 0) {
87                 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
88                 return TEST_FAILED;
89         }
90
91         RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
92                                 rte_compressdev_name_get(0));
93
94         for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
95                 max_buf_size = RTE_MAX(max_buf_size,
96                                 strlen(compress_test_bufs[i]) + 1);
97
98         /*
99          * Buffers to be used in compression and decompression.
100          * Since decompressed data might be larger than
101          * compressed data (due to block header),
102          * buffers should be big enough for both cases.
103          */
104         max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
105         ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
106                         NUM_LARGE_MBUFS,
107                         CACHE_SIZE, 0,
108                         max_buf_size + RTE_PKTMBUF_HEADROOM,
109                         rte_socket_id());
110         if (ts_params->large_mbuf_pool == NULL) {
111                 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
112                 return TEST_FAILED;
113         }
114
115         /* Create mempool with smaller buffers for SGL testing */
116         ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
117                         NUM_LARGE_MBUFS * MAX_SEGS,
118                         CACHE_SIZE, 0,
119                         SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
120                         rte_socket_id());
121         if (ts_params->small_mbuf_pool == NULL) {
122                 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
123                 goto exit;
124         }
125
126         ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
127                                 0, sizeof(struct priv_op_data),
128                                 rte_socket_id());
129         if (ts_params->op_pool == NULL) {
130                 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
131                 goto exit;
132         }
133
134         ts_params->def_comp_xform =
135                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
136         if (ts_params->def_comp_xform == NULL) {
137                 RTE_LOG(ERR, USER1,
138                         "Default compress xform could not be created\n");
139                 goto exit;
140         }
141         ts_params->def_decomp_xform =
142                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
143         if (ts_params->def_decomp_xform == NULL) {
144                 RTE_LOG(ERR, USER1,
145                         "Default decompress xform could not be created\n");
146                 goto exit;
147         }
148
149         /* Initializes default values for compress/decompress xforms */
150         ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
151         ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
152         ts_params->def_comp_xform->compress.deflate.huffman =
153                                                 RTE_COMP_HUFFMAN_DEFAULT;
154         ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
155         ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
156         ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
157
158         ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
159         ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
160         ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
161         ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
162
163         return TEST_SUCCESS;
164
165 exit:
166         testsuite_teardown();
167
168         return TEST_FAILED;
169 }
170
171 static int
172 generic_ut_setup(void)
173 {
174         /* Configure compressdev (one device, one queue pair) */
175         struct rte_compressdev_config config = {
176                 .socket_id = rte_socket_id(),
177                 .nb_queue_pairs = 1,
178                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
179                 .max_nb_streams = 0
180         };
181
182         if (rte_compressdev_configure(0, &config) < 0) {
183                 RTE_LOG(ERR, USER1, "Device configuration failed\n");
184                 return -1;
185         }
186
187         if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
188                         rte_socket_id()) < 0) {
189                 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
190                 return -1;
191         }
192
193         if (rte_compressdev_start(0) < 0) {
194                 RTE_LOG(ERR, USER1, "Device could not be started\n");
195                 return -1;
196         }
197
198         return 0;
199 }
200
201 static void
202 generic_ut_teardown(void)
203 {
204         rte_compressdev_stop(0);
205         if (rte_compressdev_close(0) < 0)
206                 RTE_LOG(ERR, USER1, "Device could not be closed\n");
207 }
208
209 static int
210 test_compressdev_invalid_configuration(void)
211 {
212         struct rte_compressdev_config invalid_config;
213         struct rte_compressdev_config valid_config = {
214                 .socket_id = rte_socket_id(),
215                 .nb_queue_pairs = 1,
216                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
217                 .max_nb_streams = 0
218         };
219         struct rte_compressdev_info dev_info;
220
221         /* Invalid configuration with 0 queue pairs */
222         memcpy(&invalid_config, &valid_config,
223                         sizeof(struct rte_compressdev_config));
224         invalid_config.nb_queue_pairs = 0;
225
226         TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
227                         "Device configuration was successful "
228                         "with no queue pairs (invalid)\n");
229
230         /*
231          * Invalid configuration with too many queue pairs
232          * (if there is an actual maximum number of queue pairs)
233          */
234         rte_compressdev_info_get(0, &dev_info);
235         if (dev_info.max_nb_queue_pairs != 0) {
236                 memcpy(&invalid_config, &valid_config,
237                         sizeof(struct rte_compressdev_config));
238                 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
239
240                 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
241                                 "Device configuration was successful "
242                                 "with too many queue pairs (invalid)\n");
243         }
244
245         /* Invalid queue pair setup, with no number of queue pairs set */
246         TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
247                                 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
248                         "Queue pair setup was successful "
249                         "with no queue pairs set (invalid)\n");
250
251         return TEST_SUCCESS;
252 }
253
254 static int
255 compare_buffers(const char *buffer1, uint32_t buffer1_len,
256                 const char *buffer2, uint32_t buffer2_len)
257 {
258         if (buffer1_len != buffer2_len) {
259                 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
260                 return -1;
261         }
262
263         if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
264                 RTE_LOG(ERR, USER1, "Buffers are different\n");
265                 return -1;
266         }
267
268         return 0;
269 }
270
271 /*
272  * Maps compressdev and Zlib flush flags
273  */
274 static int
275 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
276 {
277         switch (flag) {
278         case RTE_COMP_FLUSH_NONE:
279                 return Z_NO_FLUSH;
280         case RTE_COMP_FLUSH_SYNC:
281                 return Z_SYNC_FLUSH;
282         case RTE_COMP_FLUSH_FULL:
283                 return Z_FULL_FLUSH;
284         case RTE_COMP_FLUSH_FINAL:
285                 return Z_FINISH;
286         /*
287          * There should be only the values above,
288          * so this should never happen
289          */
290         default:
291                 return -1;
292         }
293 }
294
295 static int
296 compress_zlib(struct rte_comp_op *op,
297                 const struct rte_comp_xform *xform, int mem_level)
298 {
299         z_stream stream;
300         int zlib_flush;
301         int strategy, window_bits, comp_level;
302         int ret = TEST_FAILED;
303         uint8_t *single_src_buf = NULL;
304         uint8_t *single_dst_buf = NULL;
305
306         /* initialize zlib stream */
307         stream.zalloc = Z_NULL;
308         stream.zfree = Z_NULL;
309         stream.opaque = Z_NULL;
310
311         if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
312                 strategy = Z_FIXED;
313         else
314                 strategy = Z_DEFAULT_STRATEGY;
315
316         /*
317          * Window bits is the base two logarithm of the window size (in bytes).
318          * When doing raw DEFLATE, this number will be negative.
319          */
320         window_bits = -(xform->compress.window_size);
321
322         comp_level = xform->compress.level;
323
324         if (comp_level != RTE_COMP_LEVEL_NONE)
325                 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
326                         window_bits, mem_level, strategy);
327         else
328                 ret = deflateInit(&stream, Z_NO_COMPRESSION);
329
330         if (ret != Z_OK) {
331                 printf("Zlib deflate could not be initialized\n");
332                 goto exit;
333         }
334
335         /* Assuming stateless operation */
336         /* SGL */
337         if (op->m_src->nb_segs > 1) {
338                 single_src_buf = rte_malloc(NULL,
339                                 rte_pktmbuf_pkt_len(op->m_src), 0);
340                 if (single_src_buf == NULL) {
341                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
342                         goto exit;
343                 }
344                 single_dst_buf = rte_malloc(NULL,
345                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
346                 if (single_dst_buf == NULL) {
347                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
348                         goto exit;
349                 }
350                 if (rte_pktmbuf_read(op->m_src, 0,
351                                         rte_pktmbuf_pkt_len(op->m_src),
352                                         single_src_buf) == NULL) {
353                         RTE_LOG(ERR, USER1,
354                                 "Buffer could not be read entirely\n");
355                         goto exit;
356                 }
357
358                 stream.avail_in = op->src.length;
359                 stream.next_in = single_src_buf;
360                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
361                 stream.next_out = single_dst_buf;
362
363         } else {
364                 stream.avail_in = op->src.length;
365                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
366                 stream.avail_out = op->m_dst->data_len;
367                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
368         }
369         /* Stateless operation, all buffer will be compressed in one go */
370         zlib_flush = map_zlib_flush_flag(op->flush_flag);
371         ret = deflate(&stream, zlib_flush);
372
373         if (stream.avail_in != 0) {
374                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
375                 goto exit;
376         }
377
378         if (ret != Z_STREAM_END)
379                 goto exit;
380
381         /* Copy data to destination SGL */
382         if (op->m_src->nb_segs > 1) {
383                 uint32_t remaining_data = stream.total_out;
384                 uint8_t *src_data = single_dst_buf;
385                 struct rte_mbuf *dst_buf = op->m_dst;
386
387                 while (remaining_data > 0) {
388                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
389                                         uint8_t *);
390                         /* Last segment */
391                         if (remaining_data < dst_buf->data_len) {
392                                 memcpy(dst_data, src_data, remaining_data);
393                                 remaining_data = 0;
394                         } else {
395                                 memcpy(dst_data, src_data, dst_buf->data_len);
396                                 remaining_data -= dst_buf->data_len;
397                                 src_data += dst_buf->data_len;
398                                 dst_buf = dst_buf->next;
399                         }
400                 }
401         }
402
403         op->consumed = stream.total_in;
404         op->produced = stream.total_out;
405         op->status = RTE_COMP_OP_STATUS_SUCCESS;
406
407         deflateReset(&stream);
408
409         ret = 0;
410 exit:
411         deflateEnd(&stream);
412         rte_free(single_src_buf);
413         rte_free(single_dst_buf);
414
415         return ret;
416 }
417
418 static int
419 decompress_zlib(struct rte_comp_op *op,
420                 const struct rte_comp_xform *xform)
421 {
422         z_stream stream;
423         int window_bits;
424         int zlib_flush;
425         int ret = TEST_FAILED;
426         uint8_t *single_src_buf = NULL;
427         uint8_t *single_dst_buf = NULL;
428
429         /* initialize zlib stream */
430         stream.zalloc = Z_NULL;
431         stream.zfree = Z_NULL;
432         stream.opaque = Z_NULL;
433
434         /*
435          * Window bits is the base two logarithm of the window size (in bytes).
436          * When doing raw DEFLATE, this number will be negative.
437          */
438         window_bits = -(xform->decompress.window_size);
439
440         ret = inflateInit2(&stream, window_bits);
441
442         if (ret != Z_OK) {
443                 printf("Zlib deflate could not be initialized\n");
444                 goto exit;
445         }
446
447         /* Assuming stateless operation */
448         /* SGL */
449         if (op->m_src->nb_segs > 1) {
450                 single_src_buf = rte_malloc(NULL,
451                                 rte_pktmbuf_pkt_len(op->m_src), 0);
452                 if (single_src_buf == NULL) {
453                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
454                         goto exit;
455                 }
456                 single_dst_buf = rte_malloc(NULL,
457                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
458                 if (single_dst_buf == NULL) {
459                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
460                         goto exit;
461                 }
462                 if (rte_pktmbuf_read(op->m_src, 0,
463                                         rte_pktmbuf_pkt_len(op->m_src),
464                                         single_src_buf) == NULL) {
465                         RTE_LOG(ERR, USER1,
466                                 "Buffer could not be read entirely\n");
467                         goto exit;
468                 }
469
470                 stream.avail_in = op->src.length;
471                 stream.next_in = single_src_buf;
472                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
473                 stream.next_out = single_dst_buf;
474
475         } else {
476                 stream.avail_in = op->src.length;
477                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
478                 stream.avail_out = op->m_dst->data_len;
479                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
480         }
481
482         /* Stateless operation, all buffer will be compressed in one go */
483         zlib_flush = map_zlib_flush_flag(op->flush_flag);
484         ret = inflate(&stream, zlib_flush);
485
486         if (stream.avail_in != 0) {
487                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
488                 goto exit;
489         }
490
491         if (ret != Z_STREAM_END)
492                 goto exit;
493
494         if (op->m_src->nb_segs > 1) {
495                 uint32_t remaining_data = stream.total_out;
496                 uint8_t *src_data = single_dst_buf;
497                 struct rte_mbuf *dst_buf = op->m_dst;
498
499                 while (remaining_data > 0) {
500                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
501                                         uint8_t *);
502                         /* Last segment */
503                         if (remaining_data < dst_buf->data_len) {
504                                 memcpy(dst_data, src_data, remaining_data);
505                                 remaining_data = 0;
506                         } else {
507                                 memcpy(dst_data, src_data, dst_buf->data_len);
508                                 remaining_data -= dst_buf->data_len;
509                                 src_data += dst_buf->data_len;
510                                 dst_buf = dst_buf->next;
511                         }
512                 }
513         }
514
515         op->consumed = stream.total_in;
516         op->produced = stream.total_out;
517         op->status = RTE_COMP_OP_STATUS_SUCCESS;
518
519         inflateReset(&stream);
520
521         ret = 0;
522 exit:
523         inflateEnd(&stream);
524
525         return ret;
526 }
527
528 static int
529 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
530                 uint32_t total_data_size,
531                 struct rte_mempool *small_mbuf_pool,
532                 struct rte_mempool *large_mbuf_pool,
533                 uint8_t limit_segs_in_sgl)
534 {
535         uint32_t remaining_data = total_data_size;
536         uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
537         struct rte_mempool *pool;
538         struct rte_mbuf *next_seg;
539         uint32_t data_size;
540         char *buf_ptr;
541         const char *data_ptr = test_buf;
542         uint16_t i;
543         int ret;
544
545         if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
546                 num_remaining_segs = limit_segs_in_sgl - 1;
547
548         /*
549          * Allocate data in the first segment (header) and
550          * copy data if test buffer is provided
551          */
552         if (remaining_data < SMALL_SEG_SIZE)
553                 data_size = remaining_data;
554         else
555                 data_size = SMALL_SEG_SIZE;
556         buf_ptr = rte_pktmbuf_append(head_buf, data_size);
557         if (buf_ptr == NULL) {
558                 RTE_LOG(ERR, USER1,
559                         "Not enough space in the 1st buffer\n");
560                 return -1;
561         }
562
563         if (data_ptr != NULL) {
564                 /* Copy characters without NULL terminator */
565                 strncpy(buf_ptr, data_ptr, data_size);
566                 data_ptr += data_size;
567         }
568         remaining_data -= data_size;
569         num_remaining_segs--;
570
571         /*
572          * Allocate the rest of the segments,
573          * copy the rest of the data and chain the segments.
574          */
575         for (i = 0; i < num_remaining_segs; i++) {
576
577                 if (i == (num_remaining_segs - 1)) {
578                         /* last segment */
579                         if (remaining_data > SMALL_SEG_SIZE)
580                                 pool = large_mbuf_pool;
581                         else
582                                 pool = small_mbuf_pool;
583                         data_size = remaining_data;
584                 } else {
585                         data_size = SMALL_SEG_SIZE;
586                         pool = small_mbuf_pool;
587                 }
588
589                 next_seg = rte_pktmbuf_alloc(pool);
590                 if (next_seg == NULL) {
591                         RTE_LOG(ERR, USER1,
592                                 "New segment could not be allocated "
593                                 "from the mempool\n");
594                         return -1;
595                 }
596                 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
597                 if (buf_ptr == NULL) {
598                         RTE_LOG(ERR, USER1,
599                                 "Not enough space in the buffer\n");
600                         rte_pktmbuf_free(next_seg);
601                         return -1;
602                 }
603                 if (data_ptr != NULL) {
604                         /* Copy characters without NULL terminator */
605                         strncpy(buf_ptr, data_ptr, data_size);
606                         data_ptr += data_size;
607                 }
608                 remaining_data -= data_size;
609
610                 ret = rte_pktmbuf_chain(head_buf, next_seg);
611                 if (ret != 0) {
612                         rte_pktmbuf_free(next_seg);
613                         RTE_LOG(ERR, USER1,
614                                 "Segment could not chained\n");
615                         return -1;
616                 }
617         }
618
619         return 0;
620 }
621
622 /*
623  * Compresses and decompresses buffer with compressdev API and Zlib API
624  */
625 static int
626 test_deflate_comp_decomp(const char * const test_bufs[],
627                 unsigned int num_bufs,
628                 uint16_t buf_idx[],
629                 struct rte_comp_xform *compress_xforms[],
630                 struct rte_comp_xform *decompress_xforms[],
631                 unsigned int num_xforms,
632                 enum rte_comp_op_type state,
633                 unsigned int sgl,
634                 enum zlib_direction zlib_dir)
635 {
636         struct comp_testsuite_params *ts_params = &testsuite_params;
637         int ret_status = -1;
638         int ret;
639         struct rte_mbuf *uncomp_bufs[num_bufs];
640         struct rte_mbuf *comp_bufs[num_bufs];
641         struct rte_comp_op *ops[num_bufs];
642         struct rte_comp_op *ops_processed[num_bufs];
643         void *priv_xforms[num_bufs];
644         uint16_t num_enqd, num_deqd, num_total_deqd;
645         uint16_t num_priv_xforms = 0;
646         unsigned int deqd_retries = 0;
647         struct priv_op_data *priv_data;
648         char *buf_ptr;
649         unsigned int i;
650         struct rte_mempool *buf_pool;
651         uint32_t data_size;
652         const struct rte_compressdev_capabilities *capa =
653                 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
654         char *contig_buf = NULL;
655
656         /* Initialize all arrays to NULL */
657         memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
658         memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
659         memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
660         memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
661         memset(priv_xforms, 0, sizeof(void *) * num_bufs);
662
663         if (sgl)
664                 buf_pool = ts_params->small_mbuf_pool;
665         else
666                 buf_pool = ts_params->large_mbuf_pool;
667
668         /* Prepare the source mbufs with the data */
669         ret = rte_pktmbuf_alloc_bulk(buf_pool,
670                                 uncomp_bufs, num_bufs);
671         if (ret < 0) {
672                 RTE_LOG(ERR, USER1,
673                         "Source mbufs could not be allocated "
674                         "from the mempool\n");
675                 goto exit;
676         }
677
678         if (sgl) {
679                 for (i = 0; i < num_bufs; i++) {
680                         data_size = strlen(test_bufs[i]) + 1;
681                         if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
682                                         data_size,
683                                         ts_params->small_mbuf_pool,
684                                         ts_params->large_mbuf_pool,
685                                         MAX_SEGS) < 0)
686                                 goto exit;
687                 }
688         } else {
689                 for (i = 0; i < num_bufs; i++) {
690                         data_size = strlen(test_bufs[i]) + 1;
691                         buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
692                         snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
693                 }
694         }
695
696         /* Prepare the destination mbufs */
697         ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
698         if (ret < 0) {
699                 RTE_LOG(ERR, USER1,
700                         "Destination mbufs could not be allocated "
701                         "from the mempool\n");
702                 goto exit;
703         }
704
705         if (sgl) {
706                 for (i = 0; i < num_bufs; i++) {
707                         data_size = strlen(test_bufs[i]) *
708                                 COMPRESS_BUF_SIZE_RATIO;
709                         if (prepare_sgl_bufs(NULL, comp_bufs[i],
710                                         data_size,
711                                         ts_params->small_mbuf_pool,
712                                         ts_params->large_mbuf_pool,
713                                         MAX_SEGS) < 0)
714                                 goto exit;
715                 }
716
717         } else {
718                 for (i = 0; i < num_bufs; i++) {
719                         data_size = strlen(test_bufs[i]) *
720                                 COMPRESS_BUF_SIZE_RATIO;
721                         rte_pktmbuf_append(comp_bufs[i], data_size);
722                 }
723         }
724
725         /* Build the compression operations */
726         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
727         if (ret < 0) {
728                 RTE_LOG(ERR, USER1,
729                         "Compress operations could not be allocated "
730                         "from the mempool\n");
731                 goto exit;
732         }
733
734         for (i = 0; i < num_bufs; i++) {
735                 ops[i]->m_src = uncomp_bufs[i];
736                 ops[i]->m_dst = comp_bufs[i];
737                 ops[i]->src.offset = 0;
738                 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
739                 ops[i]->dst.offset = 0;
740                 if (state == RTE_COMP_OP_STATELESS) {
741                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
742                 } else {
743                         RTE_LOG(ERR, USER1,
744                                 "Stateful operations are not supported "
745                                 "in these tests yet\n");
746                         goto exit;
747                 }
748                 ops[i]->input_chksum = 0;
749                 /*
750                  * Store original operation index in private data,
751                  * since ordering does not have to be maintained,
752                  * when dequeueing from compressdev, so a comparison
753                  * at the end of the test can be done.
754                  */
755                 priv_data = (struct priv_op_data *) (ops[i] + 1);
756                 priv_data->orig_idx = i;
757         }
758
759         /* Compress data (either with Zlib API or compressdev API */
760         if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
761                 for (i = 0; i < num_bufs; i++) {
762                         const struct rte_comp_xform *compress_xform =
763                                 compress_xforms[i % num_xforms];
764                         ret = compress_zlib(ops[i], compress_xform,
765                                         DEFAULT_MEM_LEVEL);
766                         if (ret < 0)
767                                 goto exit;
768
769                         ops_processed[i] = ops[i];
770                 }
771         } else {
772                 /* Create compress private xform data */
773                 for (i = 0; i < num_xforms; i++) {
774                         ret = rte_compressdev_private_xform_create(0,
775                                 (const struct rte_comp_xform *)compress_xforms[i],
776                                 &priv_xforms[i]);
777                         if (ret < 0) {
778                                 RTE_LOG(ERR, USER1,
779                                         "Compression private xform "
780                                         "could not be created\n");
781                                 goto exit;
782                         }
783                         num_priv_xforms++;
784                 }
785
786                 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
787                         /* Attach shareable private xform data to ops */
788                         for (i = 0; i < num_bufs; i++)
789                                 ops[i]->private_xform = priv_xforms[i % num_xforms];
790                 } else {
791                         /* Create rest of the private xforms for the other ops */
792                         for (i = num_xforms; i < num_bufs; i++) {
793                                 ret = rte_compressdev_private_xform_create(0,
794                                         compress_xforms[i % num_xforms],
795                                         &priv_xforms[i]);
796                                 if (ret < 0) {
797                                         RTE_LOG(ERR, USER1,
798                                                 "Compression private xform "
799                                                 "could not be created\n");
800                                         goto exit;
801                                 }
802                                 num_priv_xforms++;
803                         }
804
805                         /* Attach non shareable private xform data to ops */
806                         for (i = 0; i < num_bufs; i++)
807                                 ops[i]->private_xform = priv_xforms[i];
808                 }
809
810                 /* Enqueue and dequeue all operations */
811                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
812                 if (num_enqd < num_bufs) {
813                         RTE_LOG(ERR, USER1,
814                                 "The operations could not be enqueued\n");
815                         goto exit;
816                 }
817
818                 num_total_deqd = 0;
819                 do {
820                         /*
821                          * If retrying a dequeue call, wait for 10 ms to allow
822                          * enough time to the driver to process the operations
823                          */
824                         if (deqd_retries != 0) {
825                                 /*
826                                  * Avoid infinite loop if not all the
827                                  * operations get out of the device
828                                  */
829                                 if (deqd_retries == MAX_DEQD_RETRIES) {
830                                         RTE_LOG(ERR, USER1,
831                                                 "Not all operations could be "
832                                                 "dequeued\n");
833                                         goto exit;
834                                 }
835                                 usleep(DEQUEUE_WAIT_TIME);
836                         }
837                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
838                                         &ops_processed[num_total_deqd], num_bufs);
839                         num_total_deqd += num_deqd;
840                         deqd_retries++;
841                 } while (num_total_deqd < num_enqd);
842
843                 deqd_retries = 0;
844
845                 /* Free compress private xforms */
846                 for (i = 0; i < num_priv_xforms; i++) {
847                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
848                         priv_xforms[i] = NULL;
849                 }
850                 num_priv_xforms = 0;
851         }
852
853         for (i = 0; i < num_bufs; i++) {
854                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
855                 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
856                 const struct rte_comp_compress_xform *compress_xform =
857                                 &compress_xforms[xform_idx]->compress;
858                 enum rte_comp_huffman huffman_type =
859                         compress_xform->deflate.huffman;
860                 char engine[22];
861                 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL)
862                         strlcpy(engine, "zlib (direct, no pmd)", 22);
863                 else
864                         strlcpy(engine, "pmd", 22);
865
866                 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
867                         " %u bytes (level = %d, huffman = %s)\n",
868                         buf_idx[priv_data->orig_idx], engine,
869                         ops_processed[i]->consumed, ops_processed[i]->produced,
870                         compress_xform->level,
871                         huffman_type_strings[huffman_type]);
872                 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
873                         ops_processed[i]->consumed == 0 ? 0 :
874                         (float)ops_processed[i]->produced /
875                         ops_processed[i]->consumed * 100);
876                 ops[i] = NULL;
877         }
878
879         /*
880          * Check operation status and free source mbufs (destination mbuf and
881          * compress operation information is needed for the decompression stage)
882          */
883         for (i = 0; i < num_bufs; i++) {
884                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
885                         RTE_LOG(ERR, USER1,
886                                 "Some operations were not successful\n");
887                         goto exit;
888                 }
889                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
890                 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
891                 uncomp_bufs[priv_data->orig_idx] = NULL;
892         }
893
894         /* Allocate buffers for decompressed data */
895         ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
896         if (ret < 0) {
897                 RTE_LOG(ERR, USER1,
898                         "Destination mbufs could not be allocated "
899                         "from the mempool\n");
900                 goto exit;
901         }
902
903         if (sgl) {
904                 for (i = 0; i < num_bufs; i++) {
905                         priv_data = (struct priv_op_data *)
906                                         (ops_processed[i] + 1);
907                         data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
908                         if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
909                                         data_size,
910                                         ts_params->small_mbuf_pool,
911                                         ts_params->large_mbuf_pool,
912                                         MAX_SEGS) < 0)
913                                 goto exit;
914                 }
915
916         } else {
917                 for (i = 0; i < num_bufs; i++) {
918                         priv_data = (struct priv_op_data *)
919                                         (ops_processed[i] + 1);
920                         data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
921                         rte_pktmbuf_append(uncomp_bufs[i], data_size);
922                 }
923         }
924
925         /* Build the decompression operations */
926         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
927         if (ret < 0) {
928                 RTE_LOG(ERR, USER1,
929                         "Decompress operations could not be allocated "
930                         "from the mempool\n");
931                 goto exit;
932         }
933
934         /* Source buffer is the compressed data from the previous operations */
935         for (i = 0; i < num_bufs; i++) {
936                 ops[i]->m_src = ops_processed[i]->m_dst;
937                 ops[i]->m_dst = uncomp_bufs[i];
938                 ops[i]->src.offset = 0;
939                 /*
940                  * Set the length of the compressed data to the
941                  * number of bytes that were produced in the previous stage
942                  */
943                 ops[i]->src.length = ops_processed[i]->produced;
944                 ops[i]->dst.offset = 0;
945                 if (state == RTE_COMP_OP_STATELESS) {
946                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
947                 } else {
948                         RTE_LOG(ERR, USER1,
949                                 "Stateful operations are not supported "
950                                 "in these tests yet\n");
951                         goto exit;
952                 }
953                 ops[i]->input_chksum = 0;
954                 /*
955                  * Copy private data from previous operations,
956                  * to keep the pointer to the original buffer
957                  */
958                 memcpy(ops[i] + 1, ops_processed[i] + 1,
959                                 sizeof(struct priv_op_data));
960         }
961
962         /*
963          * Free the previous compress operations,
964          * as it is not needed anymore
965          */
966         for (i = 0; i < num_bufs; i++) {
967                 rte_comp_op_free(ops_processed[i]);
968                 ops_processed[i] = NULL;
969         }
970
971         /* Decompress data (either with Zlib API or compressdev API */
972         if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
973                 for (i = 0; i < num_bufs; i++) {
974                         priv_data = (struct priv_op_data *)(ops[i] + 1);
975                         uint16_t xform_idx = priv_data->orig_idx % num_xforms;
976                         const struct rte_comp_xform *decompress_xform =
977                                 decompress_xforms[xform_idx];
978
979                         ret = decompress_zlib(ops[i], decompress_xform);
980                         if (ret < 0)
981                                 goto exit;
982
983                         ops_processed[i] = ops[i];
984                 }
985         } else {
986                 /* Create decompress private xform data */
987                 for (i = 0; i < num_xforms; i++) {
988                         ret = rte_compressdev_private_xform_create(0,
989                                 (const struct rte_comp_xform *)decompress_xforms[i],
990                                 &priv_xforms[i]);
991                         if (ret < 0) {
992                                 RTE_LOG(ERR, USER1,
993                                         "Decompression private xform "
994                                         "could not be created\n");
995                                 goto exit;
996                         }
997                         num_priv_xforms++;
998                 }
999
1000                 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1001                         /* Attach shareable private xform data to ops */
1002                         for (i = 0; i < num_bufs; i++) {
1003                                 priv_data = (struct priv_op_data *)(ops[i] + 1);
1004                                 uint16_t xform_idx = priv_data->orig_idx %
1005                                                                 num_xforms;
1006                                 ops[i]->private_xform = priv_xforms[xform_idx];
1007                         }
1008                 } else {
1009                         /* Create rest of the private xforms for the other ops */
1010                         for (i = num_xforms; i < num_bufs; i++) {
1011                                 ret = rte_compressdev_private_xform_create(0,
1012                                         decompress_xforms[i % num_xforms],
1013                                         &priv_xforms[i]);
1014                                 if (ret < 0) {
1015                                         RTE_LOG(ERR, USER1,
1016                                                 "Decompression private xform "
1017                                                 "could not be created\n");
1018                                         goto exit;
1019                                 }
1020                                 num_priv_xforms++;
1021                         }
1022
1023                         /* Attach non shareable private xform data to ops */
1024                         for (i = 0; i < num_bufs; i++) {
1025                                 priv_data = (struct priv_op_data *)(ops[i] + 1);
1026                                 uint16_t xform_idx = priv_data->orig_idx;
1027                                 ops[i]->private_xform = priv_xforms[xform_idx];
1028                         }
1029                 }
1030
1031                 /* Enqueue and dequeue all operations */
1032                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1033                 if (num_enqd < num_bufs) {
1034                         RTE_LOG(ERR, USER1,
1035                                 "The operations could not be enqueued\n");
1036                         goto exit;
1037                 }
1038
1039                 num_total_deqd = 0;
1040                 do {
1041                         /*
1042                          * If retrying a dequeue call, wait for 10 ms to allow
1043                          * enough time to the driver to process the operations
1044                          */
1045                         if (deqd_retries != 0) {
1046                                 /*
1047                                  * Avoid infinite loop if not all the
1048                                  * operations get out of the device
1049                                  */
1050                                 if (deqd_retries == MAX_DEQD_RETRIES) {
1051                                         RTE_LOG(ERR, USER1,
1052                                                 "Not all operations could be "
1053                                                 "dequeued\n");
1054                                         goto exit;
1055                                 }
1056                                 usleep(DEQUEUE_WAIT_TIME);
1057                         }
1058                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
1059                                         &ops_processed[num_total_deqd], num_bufs);
1060                         num_total_deqd += num_deqd;
1061                         deqd_retries++;
1062                 } while (num_total_deqd < num_enqd);
1063
1064                 deqd_retries = 0;
1065         }
1066
1067         for (i = 0; i < num_bufs; i++) {
1068                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1069                 char engine[22];
1070                 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL)
1071                         strlcpy(engine, "zlib (direct, no pmd)", 22);
1072                 else
1073                         strlcpy(engine, "pmd", 22);
1074                 RTE_LOG(DEBUG, USER1,
1075                         "Buffer %u decompressed by %s from %u to %u bytes\n",
1076                         buf_idx[priv_data->orig_idx], engine,
1077                         ops_processed[i]->consumed, ops_processed[i]->produced);
1078                 ops[i] = NULL;
1079         }
1080
1081         /*
1082          * Check operation status and free source mbuf (destination mbuf and
1083          * compress operation information is still needed)
1084          */
1085         for (i = 0; i < num_bufs; i++) {
1086                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1087                         RTE_LOG(ERR, USER1,
1088                                 "Some operations were not successful\n");
1089                         goto exit;
1090                 }
1091                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1092                 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1093                 comp_bufs[priv_data->orig_idx] = NULL;
1094         }
1095
1096         /*
1097          * Compare the original stream with the decompressed stream
1098          * (in size and the data)
1099          */
1100         for (i = 0; i < num_bufs; i++) {
1101                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1102                 const char *buf1 = test_bufs[priv_data->orig_idx];
1103                 const char *buf2;
1104                 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1105                 if (contig_buf == NULL) {
1106                         RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1107                                         "be allocated\n");
1108                         goto exit;
1109                 }
1110
1111                 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1112                                 ops_processed[i]->produced, contig_buf);
1113
1114                 if (compare_buffers(buf1, strlen(buf1) + 1,
1115                                 buf2, ops_processed[i]->produced) < 0)
1116                         goto exit;
1117
1118                 rte_free(contig_buf);
1119                 contig_buf = NULL;
1120         }
1121
1122         ret_status = 0;
1123
1124 exit:
1125         /* Free resources */
1126         for (i = 0; i < num_bufs; i++) {
1127                 rte_pktmbuf_free(uncomp_bufs[i]);
1128                 rte_pktmbuf_free(comp_bufs[i]);
1129                 rte_comp_op_free(ops[i]);
1130                 rte_comp_op_free(ops_processed[i]);
1131         }
1132         for (i = 0; i < num_priv_xforms; i++) {
1133                 if (priv_xforms[i] != NULL)
1134                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
1135         }
1136         rte_free(contig_buf);
1137
1138         return ret_status;
1139 }
1140
1141 static int
1142 test_compressdev_deflate_stateless_fixed(void)
1143 {
1144         struct comp_testsuite_params *ts_params = &testsuite_params;
1145         const char *test_buffer;
1146         uint16_t i;
1147         int ret;
1148         const struct rte_compressdev_capabilities *capab;
1149
1150         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1151         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1152
1153         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1154                 return -ENOTSUP;
1155
1156         struct rte_comp_xform *compress_xform =
1157                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1158
1159         if (compress_xform == NULL) {
1160                 RTE_LOG(ERR, USER1,
1161                         "Compress xform could not be created\n");
1162                 ret = TEST_FAILED;
1163                 goto exit;
1164         }
1165
1166         memcpy(compress_xform, ts_params->def_comp_xform,
1167                         sizeof(struct rte_comp_xform));
1168         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1169
1170         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1171                 test_buffer = compress_test_bufs[i];
1172
1173                 /* Compress with compressdev, decompress with Zlib */
1174                 if (test_deflate_comp_decomp(&test_buffer, 1,
1175                                 &i,
1176                                 &compress_xform,
1177                                 &ts_params->def_decomp_xform,
1178                                 1,
1179                                 RTE_COMP_OP_STATELESS,
1180                                 0,
1181                                 ZLIB_DECOMPRESS) < 0) {
1182                         ret = TEST_FAILED;
1183                         goto exit;
1184                 }
1185
1186                 /* Compress with Zlib, decompress with compressdev */
1187                 if (test_deflate_comp_decomp(&test_buffer, 1,
1188                                 &i,
1189                                 &compress_xform,
1190                                 &ts_params->def_decomp_xform,
1191                                 1,
1192                                 RTE_COMP_OP_STATELESS,
1193                                 0,
1194                                 ZLIB_COMPRESS) < 0) {
1195                         ret = TEST_FAILED;
1196                         goto exit;
1197                 }
1198         }
1199
1200         ret = TEST_SUCCESS;
1201
1202 exit:
1203         rte_free(compress_xform);
1204         return ret;
1205 }
1206
1207 static int
1208 test_compressdev_deflate_stateless_dynamic(void)
1209 {
1210         struct comp_testsuite_params *ts_params = &testsuite_params;
1211         const char *test_buffer;
1212         uint16_t i;
1213         int ret;
1214         struct rte_comp_xform *compress_xform =
1215                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1216
1217         const struct rte_compressdev_capabilities *capab;
1218
1219         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1220         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1221
1222         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1223                 return -ENOTSUP;
1224
1225         if (compress_xform == NULL) {
1226                 RTE_LOG(ERR, USER1,
1227                         "Compress xform could not be created\n");
1228                 ret = TEST_FAILED;
1229                 goto exit;
1230         }
1231
1232         memcpy(compress_xform, ts_params->def_comp_xform,
1233                         sizeof(struct rte_comp_xform));
1234         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1235
1236         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1237                 test_buffer = compress_test_bufs[i];
1238
1239                 /* Compress with compressdev, decompress with Zlib */
1240                 if (test_deflate_comp_decomp(&test_buffer, 1,
1241                                 &i,
1242                                 &compress_xform,
1243                                 &ts_params->def_decomp_xform,
1244                                 1,
1245                                 RTE_COMP_OP_STATELESS,
1246                                 0,
1247                                 ZLIB_DECOMPRESS) < 0) {
1248                         ret = TEST_FAILED;
1249                         goto exit;
1250                 }
1251
1252                 /* Compress with Zlib, decompress with compressdev */
1253                 if (test_deflate_comp_decomp(&test_buffer, 1,
1254                                 &i,
1255                                 &compress_xform,
1256                                 &ts_params->def_decomp_xform,
1257                                 1,
1258                                 RTE_COMP_OP_STATELESS,
1259                                 0,
1260                                 ZLIB_COMPRESS) < 0) {
1261                         ret = TEST_FAILED;
1262                         goto exit;
1263                 }
1264         }
1265
1266         ret = TEST_SUCCESS;
1267
1268 exit:
1269         rte_free(compress_xform);
1270         return ret;
1271 }
1272
1273 static int
1274 test_compressdev_deflate_stateless_multi_op(void)
1275 {
1276         struct comp_testsuite_params *ts_params = &testsuite_params;
1277         uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1278         uint16_t buf_idx[num_bufs];
1279         uint16_t i;
1280
1281         for (i = 0; i < num_bufs; i++)
1282                 buf_idx[i] = i;
1283
1284         /* Compress with compressdev, decompress with Zlib */
1285         if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1286                         buf_idx,
1287                         &ts_params->def_comp_xform,
1288                         &ts_params->def_decomp_xform,
1289                         1,
1290                         RTE_COMP_OP_STATELESS,
1291                         0,
1292                         ZLIB_DECOMPRESS) < 0)
1293                 return TEST_FAILED;
1294
1295         /* Compress with Zlib, decompress with compressdev */
1296         if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1297                         buf_idx,
1298                         &ts_params->def_comp_xform,
1299                         &ts_params->def_decomp_xform,
1300                         1,
1301                         RTE_COMP_OP_STATELESS,
1302                         0,
1303                         ZLIB_COMPRESS) < 0)
1304                 return TEST_FAILED;
1305
1306         return TEST_SUCCESS;
1307 }
1308
1309 static int
1310 test_compressdev_deflate_stateless_multi_level(void)
1311 {
1312         struct comp_testsuite_params *ts_params = &testsuite_params;
1313         const char *test_buffer;
1314         unsigned int level;
1315         uint16_t i;
1316         int ret;
1317         struct rte_comp_xform *compress_xform =
1318                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1319
1320         if (compress_xform == NULL) {
1321                 RTE_LOG(ERR, USER1,
1322                         "Compress xform could not be created\n");
1323                 ret = TEST_FAILED;
1324                 goto exit;
1325         }
1326
1327         memcpy(compress_xform, ts_params->def_comp_xform,
1328                         sizeof(struct rte_comp_xform));
1329
1330         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1331                 test_buffer = compress_test_bufs[i];
1332                 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1333                                 level++) {
1334                         compress_xform->compress.level = level;
1335                         /* Compress with compressdev, decompress with Zlib */
1336                         if (test_deflate_comp_decomp(&test_buffer, 1,
1337                                         &i,
1338                                         &compress_xform,
1339                                         &ts_params->def_decomp_xform,
1340                                         1,
1341                                         RTE_COMP_OP_STATELESS,
1342                                         0,
1343                                         ZLIB_DECOMPRESS) < 0) {
1344                                 ret = TEST_FAILED;
1345                                 goto exit;
1346                         }
1347                 }
1348         }
1349
1350         ret = TEST_SUCCESS;
1351
1352 exit:
1353         rte_free(compress_xform);
1354         return ret;
1355 }
1356
1357 #define NUM_XFORMS 3
1358 static int
1359 test_compressdev_deflate_stateless_multi_xform(void)
1360 {
1361         struct comp_testsuite_params *ts_params = &testsuite_params;
1362         uint16_t num_bufs = NUM_XFORMS;
1363         struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1364         struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1365         const char *test_buffers[NUM_XFORMS];
1366         uint16_t i;
1367         unsigned int level = RTE_COMP_LEVEL_MIN;
1368         uint16_t buf_idx[num_bufs];
1369
1370         int ret;
1371
1372         /* Create multiple xforms with various levels */
1373         for (i = 0; i < NUM_XFORMS; i++) {
1374                 compress_xforms[i] = rte_malloc(NULL,
1375                                 sizeof(struct rte_comp_xform), 0);
1376                 if (compress_xforms[i] == NULL) {
1377                         RTE_LOG(ERR, USER1,
1378                                 "Compress xform could not be created\n");
1379                         ret = TEST_FAILED;
1380                         goto exit;
1381                 }
1382
1383                 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1384                                 sizeof(struct rte_comp_xform));
1385                 compress_xforms[i]->compress.level = level;
1386                 level++;
1387
1388                 decompress_xforms[i] = rte_malloc(NULL,
1389                                 sizeof(struct rte_comp_xform), 0);
1390                 if (decompress_xforms[i] == NULL) {
1391                         RTE_LOG(ERR, USER1,
1392                                 "Decompress xform could not be created\n");
1393                         ret = TEST_FAILED;
1394                         goto exit;
1395                 }
1396
1397                 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1398                                 sizeof(struct rte_comp_xform));
1399         }
1400
1401         for (i = 0; i < NUM_XFORMS; i++) {
1402                 buf_idx[i] = 0;
1403                 /* Use the same buffer in all sessions */
1404                 test_buffers[i] = compress_test_bufs[0];
1405         }
1406         /* Compress with compressdev, decompress with Zlib */
1407         if (test_deflate_comp_decomp(test_buffers, num_bufs,
1408                         buf_idx,
1409                         compress_xforms,
1410                         decompress_xforms,
1411                         NUM_XFORMS,
1412                         RTE_COMP_OP_STATELESS,
1413                         0,
1414                         ZLIB_DECOMPRESS) < 0) {
1415                 ret = TEST_FAILED;
1416                 goto exit;
1417         }
1418
1419         ret = TEST_SUCCESS;
1420 exit:
1421         for (i = 0; i < NUM_XFORMS; i++) {
1422                 rte_free(compress_xforms[i]);
1423                 rte_free(decompress_xforms[i]);
1424         }
1425
1426         return ret;
1427 }
1428
1429 static int
1430 test_compressdev_deflate_stateless_sgl(void)
1431 {
1432         struct comp_testsuite_params *ts_params = &testsuite_params;
1433         uint16_t i;
1434         const char *test_buffer;
1435         const struct rte_compressdev_capabilities *capab;
1436
1437         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1438         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1439
1440         if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1441                 return -ENOTSUP;
1442
1443         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1444                 test_buffer = compress_test_bufs[i];
1445                 /* Compress with compressdev, decompress with Zlib */
1446                 if (test_deflate_comp_decomp(&test_buffer, 1,
1447                                 &i,
1448                                 &ts_params->def_comp_xform,
1449                                 &ts_params->def_decomp_xform,
1450                                 1,
1451                                 RTE_COMP_OP_STATELESS,
1452                                 1,
1453                                 ZLIB_DECOMPRESS) < 0)
1454                         return TEST_FAILED;
1455
1456                 /* Compress with Zlib, decompress with compressdev */
1457                 if (test_deflate_comp_decomp(&test_buffer, 1,
1458                                 &i,
1459                                 &ts_params->def_comp_xform,
1460                                 &ts_params->def_decomp_xform,
1461                                 1,
1462                                 RTE_COMP_OP_STATELESS,
1463                                 1,
1464                                 ZLIB_COMPRESS) < 0)
1465                         return TEST_FAILED;
1466         }
1467
1468         return TEST_SUCCESS;
1469 }
1470
1471 static struct unit_test_suite compressdev_testsuite  = {
1472         .suite_name = "compressdev unit test suite",
1473         .setup = testsuite_setup,
1474         .teardown = testsuite_teardown,
1475         .unit_test_cases = {
1476                 TEST_CASE_ST(NULL, NULL,
1477                         test_compressdev_invalid_configuration),
1478                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1479                         test_compressdev_deflate_stateless_fixed),
1480                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1481                         test_compressdev_deflate_stateless_dynamic),
1482                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1483                         test_compressdev_deflate_stateless_multi_op),
1484                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1485                         test_compressdev_deflate_stateless_multi_level),
1486                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1487                         test_compressdev_deflate_stateless_multi_xform),
1488                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1489                         test_compressdev_deflate_stateless_sgl),
1490                 TEST_CASES_END() /**< NULL terminate unit test array */
1491         }
1492 };
1493
1494 static int
1495 test_compressdev(void)
1496 {
1497         return unit_test_suite_runner(&compressdev_testsuite);
1498 }
1499
1500 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);