New upstream version 18.11.2
[deb_dpdk.git] / test / test / test_compressdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7 #include <unistd.h>
8
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13 #include <rte_compressdev.h>
14 #include <rte_string_fns.h>
15
16 #include "test_compressdev_test_buffer.h"
17 #include "test.h"
18
19 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
20
21 #define DEFAULT_WINDOW_SIZE 15
22 #define DEFAULT_MEM_LEVEL 8
23 #define MAX_DEQD_RETRIES 10
24 #define DEQUEUE_WAIT_TIME 10000
25
26 /*
27  * 30% extra size for compressed data compared to original data,
28  * in case data size cannot be reduced and it is actually bigger
29  * due to the compress block headers
30  */
31 #define COMPRESS_BUF_SIZE_RATIO 1.3
32 #define NUM_LARGE_MBUFS 16
33 #define SMALL_SEG_SIZE 256
34 #define MAX_SEGS 16
35 #define NUM_OPS 16
36 #define NUM_MAX_XFORMS 16
37 #define NUM_MAX_INFLIGHT_OPS 128
38 #define CACHE_SIZE 0
39
40 const char *
41 huffman_type_strings[] = {
42         [RTE_COMP_HUFFMAN_DEFAULT]      = "PMD default",
43         [RTE_COMP_HUFFMAN_FIXED]        = "Fixed",
44         [RTE_COMP_HUFFMAN_DYNAMIC]      = "Dynamic"
45 };
46
47 enum zlib_direction {
48         ZLIB_NONE,
49         ZLIB_COMPRESS,
50         ZLIB_DECOMPRESS,
51         ZLIB_ALL
52 };
53
54 struct priv_op_data {
55         uint16_t orig_idx;
56 };
57
58 struct comp_testsuite_params {
59         struct rte_mempool *large_mbuf_pool;
60         struct rte_mempool *small_mbuf_pool;
61         struct rte_mempool *op_pool;
62         struct rte_comp_xform *def_comp_xform;
63         struct rte_comp_xform *def_decomp_xform;
64 };
65
66 static struct comp_testsuite_params testsuite_params = { 0 };
67
68 static void
69 testsuite_teardown(void)
70 {
71         struct comp_testsuite_params *ts_params = &testsuite_params;
72
73         rte_mempool_free(ts_params->large_mbuf_pool);
74         rte_mempool_free(ts_params->small_mbuf_pool);
75         rte_mempool_free(ts_params->op_pool);
76         rte_free(ts_params->def_comp_xform);
77         rte_free(ts_params->def_decomp_xform);
78 }
79
80 static int
81 testsuite_setup(void)
82 {
83         struct comp_testsuite_params *ts_params = &testsuite_params;
84         uint32_t max_buf_size = 0;
85         unsigned int i;
86
87         if (rte_compressdev_count() == 0) {
88                 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
89                 return TEST_FAILED;
90         }
91
92         RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
93                                 rte_compressdev_name_get(0));
94
95         for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
96                 max_buf_size = RTE_MAX(max_buf_size,
97                                 strlen(compress_test_bufs[i]) + 1);
98
99         /*
100          * Buffers to be used in compression and decompression.
101          * Since decompressed data might be larger than
102          * compressed data (due to block header),
103          * buffers should be big enough for both cases.
104          */
105         max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
106         ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
107                         NUM_LARGE_MBUFS,
108                         CACHE_SIZE, 0,
109                         max_buf_size + RTE_PKTMBUF_HEADROOM,
110                         rte_socket_id());
111         if (ts_params->large_mbuf_pool == NULL) {
112                 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
113                 return TEST_FAILED;
114         }
115
116         /* Create mempool with smaller buffers for SGL testing */
117         ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
118                         NUM_LARGE_MBUFS * MAX_SEGS,
119                         CACHE_SIZE, 0,
120                         SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
121                         rte_socket_id());
122         if (ts_params->small_mbuf_pool == NULL) {
123                 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
124                 goto exit;
125         }
126
127         ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
128                                 0, sizeof(struct priv_op_data),
129                                 rte_socket_id());
130         if (ts_params->op_pool == NULL) {
131                 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
132                 goto exit;
133         }
134
135         ts_params->def_comp_xform =
136                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
137         if (ts_params->def_comp_xform == NULL) {
138                 RTE_LOG(ERR, USER1,
139                         "Default compress xform could not be created\n");
140                 goto exit;
141         }
142         ts_params->def_decomp_xform =
143                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
144         if (ts_params->def_decomp_xform == NULL) {
145                 RTE_LOG(ERR, USER1,
146                         "Default decompress xform could not be created\n");
147                 goto exit;
148         }
149
150         /* Initializes default values for compress/decompress xforms */
151         ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
152         ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
153         ts_params->def_comp_xform->compress.deflate.huffman =
154                                                 RTE_COMP_HUFFMAN_DEFAULT;
155         ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
156         ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
157         ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
158
159         ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
160         ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
161         ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
162         ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
163
164         return TEST_SUCCESS;
165
166 exit:
167         testsuite_teardown();
168
169         return TEST_FAILED;
170 }
171
172 static int
173 generic_ut_setup(void)
174 {
175         /* Configure compressdev (one device, one queue pair) */
176         struct rte_compressdev_config config = {
177                 .socket_id = rte_socket_id(),
178                 .nb_queue_pairs = 1,
179                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
180                 .max_nb_streams = 0
181         };
182
183         if (rte_compressdev_configure(0, &config) < 0) {
184                 RTE_LOG(ERR, USER1, "Device configuration failed\n");
185                 return -1;
186         }
187
188         if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
189                         rte_socket_id()) < 0) {
190                 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
191                 return -1;
192         }
193
194         if (rte_compressdev_start(0) < 0) {
195                 RTE_LOG(ERR, USER1, "Device could not be started\n");
196                 return -1;
197         }
198
199         return 0;
200 }
201
202 static void
203 generic_ut_teardown(void)
204 {
205         rte_compressdev_stop(0);
206         if (rte_compressdev_close(0) < 0)
207                 RTE_LOG(ERR, USER1, "Device could not be closed\n");
208 }
209
210 static int
211 test_compressdev_invalid_configuration(void)
212 {
213         struct rte_compressdev_config invalid_config;
214         struct rte_compressdev_config valid_config = {
215                 .socket_id = rte_socket_id(),
216                 .nb_queue_pairs = 1,
217                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
218                 .max_nb_streams = 0
219         };
220         struct rte_compressdev_info dev_info;
221
222         /* Invalid configuration with 0 queue pairs */
223         memcpy(&invalid_config, &valid_config,
224                         sizeof(struct rte_compressdev_config));
225         invalid_config.nb_queue_pairs = 0;
226
227         TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
228                         "Device configuration was successful "
229                         "with no queue pairs (invalid)\n");
230
231         /*
232          * Invalid configuration with too many queue pairs
233          * (if there is an actual maximum number of queue pairs)
234          */
235         rte_compressdev_info_get(0, &dev_info);
236         if (dev_info.max_nb_queue_pairs != 0) {
237                 memcpy(&invalid_config, &valid_config,
238                         sizeof(struct rte_compressdev_config));
239                 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
240
241                 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
242                                 "Device configuration was successful "
243                                 "with too many queue pairs (invalid)\n");
244         }
245
246         /* Invalid queue pair setup, with no number of queue pairs set */
247         TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
248                                 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
249                         "Queue pair setup was successful "
250                         "with no queue pairs set (invalid)\n");
251
252         return TEST_SUCCESS;
253 }
254
255 static int
256 compare_buffers(const char *buffer1, uint32_t buffer1_len,
257                 const char *buffer2, uint32_t buffer2_len)
258 {
259         if (buffer1_len != buffer2_len) {
260                 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
261                 return -1;
262         }
263
264         if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
265                 RTE_LOG(ERR, USER1, "Buffers are different\n");
266                 return -1;
267         }
268
269         return 0;
270 }
271
272 /*
273  * Maps compressdev and Zlib flush flags
274  */
275 static int
276 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
277 {
278         switch (flag) {
279         case RTE_COMP_FLUSH_NONE:
280                 return Z_NO_FLUSH;
281         case RTE_COMP_FLUSH_SYNC:
282                 return Z_SYNC_FLUSH;
283         case RTE_COMP_FLUSH_FULL:
284                 return Z_FULL_FLUSH;
285         case RTE_COMP_FLUSH_FINAL:
286                 return Z_FINISH;
287         /*
288          * There should be only the values above,
289          * so this should never happen
290          */
291         default:
292                 return -1;
293         }
294 }
295
296 static int
297 compress_zlib(struct rte_comp_op *op,
298                 const struct rte_comp_xform *xform, int mem_level)
299 {
300         z_stream stream;
301         int zlib_flush;
302         int strategy, window_bits, comp_level;
303         int ret = TEST_FAILED;
304         uint8_t *single_src_buf = NULL;
305         uint8_t *single_dst_buf = NULL;
306
307         /* initialize zlib stream */
308         stream.zalloc = Z_NULL;
309         stream.zfree = Z_NULL;
310         stream.opaque = Z_NULL;
311
312         if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
313                 strategy = Z_FIXED;
314         else
315                 strategy = Z_DEFAULT_STRATEGY;
316
317         /*
318          * Window bits is the base two logarithm of the window size (in bytes).
319          * When doing raw DEFLATE, this number will be negative.
320          */
321         window_bits = -(xform->compress.window_size);
322
323         comp_level = xform->compress.level;
324
325         if (comp_level != RTE_COMP_LEVEL_NONE)
326                 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
327                         window_bits, mem_level, strategy);
328         else
329                 ret = deflateInit(&stream, Z_NO_COMPRESSION);
330
331         if (ret != Z_OK) {
332                 printf("Zlib deflate could not be initialized\n");
333                 goto exit;
334         }
335
336         /* Assuming stateless operation */
337         /* SGL */
338         if (op->m_src->nb_segs > 1) {
339                 single_src_buf = rte_malloc(NULL,
340                                 rte_pktmbuf_pkt_len(op->m_src), 0);
341                 if (single_src_buf == NULL) {
342                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
343                         goto exit;
344                 }
345                 single_dst_buf = rte_malloc(NULL,
346                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
347                 if (single_dst_buf == NULL) {
348                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
349                         goto exit;
350                 }
351                 if (rte_pktmbuf_read(op->m_src, 0,
352                                         rte_pktmbuf_pkt_len(op->m_src),
353                                         single_src_buf) == NULL) {
354                         RTE_LOG(ERR, USER1,
355                                 "Buffer could not be read entirely\n");
356                         goto exit;
357                 }
358
359                 stream.avail_in = op->src.length;
360                 stream.next_in = single_src_buf;
361                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
362                 stream.next_out = single_dst_buf;
363
364         } else {
365                 stream.avail_in = op->src.length;
366                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
367                 stream.avail_out = op->m_dst->data_len;
368                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
369         }
370         /* Stateless operation, all buffer will be compressed in one go */
371         zlib_flush = map_zlib_flush_flag(op->flush_flag);
372         ret = deflate(&stream, zlib_flush);
373
374         if (stream.avail_in != 0) {
375                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
376                 goto exit;
377         }
378
379         if (ret != Z_STREAM_END)
380                 goto exit;
381
382         /* Copy data to destination SGL */
383         if (op->m_src->nb_segs > 1) {
384                 uint32_t remaining_data = stream.total_out;
385                 uint8_t *src_data = single_dst_buf;
386                 struct rte_mbuf *dst_buf = op->m_dst;
387
388                 while (remaining_data > 0) {
389                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
390                                         uint8_t *);
391                         /* Last segment */
392                         if (remaining_data < dst_buf->data_len) {
393                                 memcpy(dst_data, src_data, remaining_data);
394                                 remaining_data = 0;
395                         } else {
396                                 memcpy(dst_data, src_data, dst_buf->data_len);
397                                 remaining_data -= dst_buf->data_len;
398                                 src_data += dst_buf->data_len;
399                                 dst_buf = dst_buf->next;
400                         }
401                 }
402         }
403
404         op->consumed = stream.total_in;
405         op->produced = stream.total_out;
406         op->status = RTE_COMP_OP_STATUS_SUCCESS;
407
408         deflateReset(&stream);
409
410         ret = 0;
411 exit:
412         deflateEnd(&stream);
413         rte_free(single_src_buf);
414         rte_free(single_dst_buf);
415
416         return ret;
417 }
418
419 static int
420 decompress_zlib(struct rte_comp_op *op,
421                 const struct rte_comp_xform *xform)
422 {
423         z_stream stream;
424         int window_bits;
425         int zlib_flush;
426         int ret = TEST_FAILED;
427         uint8_t *single_src_buf = NULL;
428         uint8_t *single_dst_buf = NULL;
429
430         /* initialize zlib stream */
431         stream.zalloc = Z_NULL;
432         stream.zfree = Z_NULL;
433         stream.opaque = Z_NULL;
434
435         /*
436          * Window bits is the base two logarithm of the window size (in bytes).
437          * When doing raw DEFLATE, this number will be negative.
438          */
439         window_bits = -(xform->decompress.window_size);
440
441         ret = inflateInit2(&stream, window_bits);
442
443         if (ret != Z_OK) {
444                 printf("Zlib deflate could not be initialized\n");
445                 goto exit;
446         }
447
448         /* Assuming stateless operation */
449         /* SGL */
450         if (op->m_src->nb_segs > 1) {
451                 single_src_buf = rte_malloc(NULL,
452                                 rte_pktmbuf_pkt_len(op->m_src), 0);
453                 if (single_src_buf == NULL) {
454                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
455                         goto exit;
456                 }
457                 single_dst_buf = rte_malloc(NULL,
458                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
459                 if (single_dst_buf == NULL) {
460                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
461                         goto exit;
462                 }
463                 if (rte_pktmbuf_read(op->m_src, 0,
464                                         rte_pktmbuf_pkt_len(op->m_src),
465                                         single_src_buf) == NULL) {
466                         RTE_LOG(ERR, USER1,
467                                 "Buffer could not be read entirely\n");
468                         goto exit;
469                 }
470
471                 stream.avail_in = op->src.length;
472                 stream.next_in = single_src_buf;
473                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
474                 stream.next_out = single_dst_buf;
475
476         } else {
477                 stream.avail_in = op->src.length;
478                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
479                 stream.avail_out = op->m_dst->data_len;
480                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
481         }
482
483         /* Stateless operation, all buffer will be compressed in one go */
484         zlib_flush = map_zlib_flush_flag(op->flush_flag);
485         ret = inflate(&stream, zlib_flush);
486
487         if (stream.avail_in != 0) {
488                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
489                 goto exit;
490         }
491
492         if (ret != Z_STREAM_END)
493                 goto exit;
494
495         if (op->m_src->nb_segs > 1) {
496                 uint32_t remaining_data = stream.total_out;
497                 uint8_t *src_data = single_dst_buf;
498                 struct rte_mbuf *dst_buf = op->m_dst;
499
500                 while (remaining_data > 0) {
501                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
502                                         uint8_t *);
503                         /* Last segment */
504                         if (remaining_data < dst_buf->data_len) {
505                                 memcpy(dst_data, src_data, remaining_data);
506                                 remaining_data = 0;
507                         } else {
508                                 memcpy(dst_data, src_data, dst_buf->data_len);
509                                 remaining_data -= dst_buf->data_len;
510                                 src_data += dst_buf->data_len;
511                                 dst_buf = dst_buf->next;
512                         }
513                 }
514         }
515
516         op->consumed = stream.total_in;
517         op->produced = stream.total_out;
518         op->status = RTE_COMP_OP_STATUS_SUCCESS;
519
520         inflateReset(&stream);
521
522         ret = 0;
523 exit:
524         inflateEnd(&stream);
525
526         return ret;
527 }
528
529 static int
530 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
531                 uint32_t total_data_size,
532                 struct rte_mempool *small_mbuf_pool,
533                 struct rte_mempool *large_mbuf_pool,
534                 uint8_t limit_segs_in_sgl)
535 {
536         uint32_t remaining_data = total_data_size;
537         uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
538         struct rte_mempool *pool;
539         struct rte_mbuf *next_seg;
540         uint32_t data_size;
541         char *buf_ptr;
542         const char *data_ptr = test_buf;
543         uint16_t i;
544         int ret;
545
546         if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
547                 num_remaining_segs = limit_segs_in_sgl - 1;
548
549         /*
550          * Allocate data in the first segment (header) and
551          * copy data if test buffer is provided
552          */
553         if (remaining_data < SMALL_SEG_SIZE)
554                 data_size = remaining_data;
555         else
556                 data_size = SMALL_SEG_SIZE;
557         buf_ptr = rte_pktmbuf_append(head_buf, data_size);
558         if (buf_ptr == NULL) {
559                 RTE_LOG(ERR, USER1,
560                         "Not enough space in the 1st buffer\n");
561                 return -1;
562         }
563
564         if (data_ptr != NULL) {
565                 /* Copy characters without NULL terminator */
566                 strncpy(buf_ptr, data_ptr, data_size);
567                 data_ptr += data_size;
568         }
569         remaining_data -= data_size;
570         num_remaining_segs--;
571
572         /*
573          * Allocate the rest of the segments,
574          * copy the rest of the data and chain the segments.
575          */
576         for (i = 0; i < num_remaining_segs; i++) {
577
578                 if (i == (num_remaining_segs - 1)) {
579                         /* last segment */
580                         if (remaining_data > SMALL_SEG_SIZE)
581                                 pool = large_mbuf_pool;
582                         else
583                                 pool = small_mbuf_pool;
584                         data_size = remaining_data;
585                 } else {
586                         data_size = SMALL_SEG_SIZE;
587                         pool = small_mbuf_pool;
588                 }
589
590                 next_seg = rte_pktmbuf_alloc(pool);
591                 if (next_seg == NULL) {
592                         RTE_LOG(ERR, USER1,
593                                 "New segment could not be allocated "
594                                 "from the mempool\n");
595                         return -1;
596                 }
597                 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
598                 if (buf_ptr == NULL) {
599                         RTE_LOG(ERR, USER1,
600                                 "Not enough space in the buffer\n");
601                         rte_pktmbuf_free(next_seg);
602                         return -1;
603                 }
604                 if (data_ptr != NULL) {
605                         /* Copy characters without NULL terminator */
606                         strncpy(buf_ptr, data_ptr, data_size);
607                         data_ptr += data_size;
608                 }
609                 remaining_data -= data_size;
610
611                 ret = rte_pktmbuf_chain(head_buf, next_seg);
612                 if (ret != 0) {
613                         rte_pktmbuf_free(next_seg);
614                         RTE_LOG(ERR, USER1,
615                                 "Segment could not chained\n");
616                         return -1;
617                 }
618         }
619
620         return 0;
621 }
622
623 /*
624  * Compresses and decompresses buffer with compressdev API and Zlib API
625  */
626 static int
627 test_deflate_comp_decomp(const char * const test_bufs[],
628                 unsigned int num_bufs,
629                 uint16_t buf_idx[],
630                 struct rte_comp_xform *compress_xforms[],
631                 struct rte_comp_xform *decompress_xforms[],
632                 unsigned int num_xforms,
633                 enum rte_comp_op_type state,
634                 unsigned int sgl,
635                 enum zlib_direction zlib_dir)
636 {
637         struct comp_testsuite_params *ts_params = &testsuite_params;
638         int ret_status = -1;
639         int ret;
640         struct rte_mbuf *uncomp_bufs[num_bufs];
641         struct rte_mbuf *comp_bufs[num_bufs];
642         struct rte_comp_op *ops[num_bufs];
643         struct rte_comp_op *ops_processed[num_bufs];
644         void *priv_xforms[num_bufs];
645         uint16_t num_enqd, num_deqd, num_total_deqd;
646         uint16_t num_priv_xforms = 0;
647         unsigned int deqd_retries = 0;
648         struct priv_op_data *priv_data;
649         char *buf_ptr;
650         unsigned int i;
651         struct rte_mempool *buf_pool;
652         uint32_t data_size;
653         const struct rte_compressdev_capabilities *capa =
654                 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
655         char *contig_buf = NULL;
656
657         /* Initialize all arrays to NULL */
658         memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
659         memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
660         memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
661         memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
662         memset(priv_xforms, 0, sizeof(void *) * num_bufs);
663
664         if (sgl)
665                 buf_pool = ts_params->small_mbuf_pool;
666         else
667                 buf_pool = ts_params->large_mbuf_pool;
668
669         /* Prepare the source mbufs with the data */
670         ret = rte_pktmbuf_alloc_bulk(buf_pool,
671                                 uncomp_bufs, num_bufs);
672         if (ret < 0) {
673                 RTE_LOG(ERR, USER1,
674                         "Source mbufs could not be allocated "
675                         "from the mempool\n");
676                 goto exit;
677         }
678
679         if (sgl) {
680                 for (i = 0; i < num_bufs; i++) {
681                         data_size = strlen(test_bufs[i]) + 1;
682                         if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
683                                         data_size,
684                                         ts_params->small_mbuf_pool,
685                                         ts_params->large_mbuf_pool,
686                                         MAX_SEGS) < 0)
687                                 goto exit;
688                 }
689         } else {
690                 for (i = 0; i < num_bufs; i++) {
691                         data_size = strlen(test_bufs[i]) + 1;
692                         buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
693                         snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
694                 }
695         }
696
697         /* Prepare the destination mbufs */
698         ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
699         if (ret < 0) {
700                 RTE_LOG(ERR, USER1,
701                         "Destination mbufs could not be allocated "
702                         "from the mempool\n");
703                 goto exit;
704         }
705
706         if (sgl) {
707                 for (i = 0; i < num_bufs; i++) {
708                         data_size = strlen(test_bufs[i]) *
709                                 COMPRESS_BUF_SIZE_RATIO;
710                         if (prepare_sgl_bufs(NULL, comp_bufs[i],
711                                         data_size,
712                                         ts_params->small_mbuf_pool,
713                                         ts_params->large_mbuf_pool,
714                                         MAX_SEGS) < 0)
715                                 goto exit;
716                 }
717
718         } else {
719                 for (i = 0; i < num_bufs; i++) {
720                         data_size = strlen(test_bufs[i]) *
721                                 COMPRESS_BUF_SIZE_RATIO;
722                         rte_pktmbuf_append(comp_bufs[i], data_size);
723                 }
724         }
725
726         /* Build the compression operations */
727         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
728         if (ret < 0) {
729                 RTE_LOG(ERR, USER1,
730                         "Compress operations could not be allocated "
731                         "from the mempool\n");
732                 goto exit;
733         }
734
735         for (i = 0; i < num_bufs; i++) {
736                 ops[i]->m_src = uncomp_bufs[i];
737                 ops[i]->m_dst = comp_bufs[i];
738                 ops[i]->src.offset = 0;
739                 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
740                 ops[i]->dst.offset = 0;
741                 if (state == RTE_COMP_OP_STATELESS) {
742                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
743                 } else {
744                         RTE_LOG(ERR, USER1,
745                                 "Stateful operations are not supported "
746                                 "in these tests yet\n");
747                         goto exit;
748                 }
749                 ops[i]->input_chksum = 0;
750                 /*
751                  * Store original operation index in private data,
752                  * since ordering does not have to be maintained,
753                  * when dequeueing from compressdev, so a comparison
754                  * at the end of the test can be done.
755                  */
756                 priv_data = (struct priv_op_data *) (ops[i] + 1);
757                 priv_data->orig_idx = i;
758         }
759
760         /* Compress data (either with Zlib API or compressdev API */
761         if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
762                 for (i = 0; i < num_bufs; i++) {
763                         const struct rte_comp_xform *compress_xform =
764                                 compress_xforms[i % num_xforms];
765                         ret = compress_zlib(ops[i], compress_xform,
766                                         DEFAULT_MEM_LEVEL);
767                         if (ret < 0)
768                                 goto exit;
769
770                         ops_processed[i] = ops[i];
771                 }
772         } else {
773                 /* Create compress private xform data */
774                 for (i = 0; i < num_xforms; i++) {
775                         ret = rte_compressdev_private_xform_create(0,
776                                 (const struct rte_comp_xform *)compress_xforms[i],
777                                 &priv_xforms[i]);
778                         if (ret < 0) {
779                                 RTE_LOG(ERR, USER1,
780                                         "Compression private xform "
781                                         "could not be created\n");
782                                 goto exit;
783                         }
784                         num_priv_xforms++;
785                 }
786
787                 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
788                         /* Attach shareable private xform data to ops */
789                         for (i = 0; i < num_bufs; i++)
790                                 ops[i]->private_xform = priv_xforms[i % num_xforms];
791                 } else {
792                         /* Create rest of the private xforms for the other ops */
793                         for (i = num_xforms; i < num_bufs; i++) {
794                                 ret = rte_compressdev_private_xform_create(0,
795                                         compress_xforms[i % num_xforms],
796                                         &priv_xforms[i]);
797                                 if (ret < 0) {
798                                         RTE_LOG(ERR, USER1,
799                                                 "Compression private xform "
800                                                 "could not be created\n");
801                                         goto exit;
802                                 }
803                                 num_priv_xforms++;
804                         }
805
806                         /* Attach non shareable private xform data to ops */
807                         for (i = 0; i < num_bufs; i++)
808                                 ops[i]->private_xform = priv_xforms[i];
809                 }
810
811                 /* Enqueue and dequeue all operations */
812                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
813                 if (num_enqd < num_bufs) {
814                         RTE_LOG(ERR, USER1,
815                                 "The operations could not be enqueued\n");
816                         goto exit;
817                 }
818
819                 num_total_deqd = 0;
820                 do {
821                         /*
822                          * If retrying a dequeue call, wait for 10 ms to allow
823                          * enough time to the driver to process the operations
824                          */
825                         if (deqd_retries != 0) {
826                                 /*
827                                  * Avoid infinite loop if not all the
828                                  * operations get out of the device
829                                  */
830                                 if (deqd_retries == MAX_DEQD_RETRIES) {
831                                         RTE_LOG(ERR, USER1,
832                                                 "Not all operations could be "
833                                                 "dequeued\n");
834                                         goto exit;
835                                 }
836                                 usleep(DEQUEUE_WAIT_TIME);
837                         }
838                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
839                                         &ops_processed[num_total_deqd], num_bufs);
840                         num_total_deqd += num_deqd;
841                         deqd_retries++;
842                 } while (num_total_deqd < num_enqd);
843
844                 deqd_retries = 0;
845
846                 /* Free compress private xforms */
847                 for (i = 0; i < num_priv_xforms; i++) {
848                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
849                         priv_xforms[i] = NULL;
850                 }
851                 num_priv_xforms = 0;
852         }
853
854         for (i = 0; i < num_bufs; i++) {
855                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
856                 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
857                 const struct rte_comp_compress_xform *compress_xform =
858                                 &compress_xforms[xform_idx]->compress;
859                 enum rte_comp_huffman huffman_type =
860                         compress_xform->deflate.huffman;
861                 char engine[22];
862                 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL)
863                         strlcpy(engine, "zlib (direct, no pmd)", 22);
864                 else
865                         strlcpy(engine, "pmd", 22);
866
867                 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
868                         " %u bytes (level = %d, huffman = %s)\n",
869                         buf_idx[priv_data->orig_idx], engine,
870                         ops_processed[i]->consumed, ops_processed[i]->produced,
871                         compress_xform->level,
872                         huffman_type_strings[huffman_type]);
873                 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
874                         ops_processed[i]->consumed == 0 ? 0 :
875                         (float)ops_processed[i]->produced /
876                         ops_processed[i]->consumed * 100);
877                 ops[i] = NULL;
878         }
879
880         /*
881          * Check operation status and free source mbufs (destination mbuf and
882          * compress operation information is needed for the decompression stage)
883          */
884         for (i = 0; i < num_bufs; i++) {
885                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
886                         RTE_LOG(ERR, USER1,
887                                 "Some operations were not successful\n");
888                         goto exit;
889                 }
890                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
891                 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
892                 uncomp_bufs[priv_data->orig_idx] = NULL;
893         }
894
895         /* Allocate buffers for decompressed data */
896         ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
897         if (ret < 0) {
898                 RTE_LOG(ERR, USER1,
899                         "Destination mbufs could not be allocated "
900                         "from the mempool\n");
901                 goto exit;
902         }
903
904         if (sgl) {
905                 for (i = 0; i < num_bufs; i++) {
906                         priv_data = (struct priv_op_data *)
907                                         (ops_processed[i] + 1);
908                         data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
909                         if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
910                                         data_size,
911                                         ts_params->small_mbuf_pool,
912                                         ts_params->large_mbuf_pool,
913                                         MAX_SEGS) < 0)
914                                 goto exit;
915                 }
916
917         } else {
918                 for (i = 0; i < num_bufs; i++) {
919                         priv_data = (struct priv_op_data *)
920                                         (ops_processed[i] + 1);
921                         data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
922                         rte_pktmbuf_append(uncomp_bufs[i], data_size);
923                 }
924         }
925
926         /* Build the decompression operations */
927         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
928         if (ret < 0) {
929                 RTE_LOG(ERR, USER1,
930                         "Decompress operations could not be allocated "
931                         "from the mempool\n");
932                 goto exit;
933         }
934
935         /* Source buffer is the compressed data from the previous operations */
936         for (i = 0; i < num_bufs; i++) {
937                 ops[i]->m_src = ops_processed[i]->m_dst;
938                 ops[i]->m_dst = uncomp_bufs[i];
939                 ops[i]->src.offset = 0;
940                 /*
941                  * Set the length of the compressed data to the
942                  * number of bytes that were produced in the previous stage
943                  */
944                 ops[i]->src.length = ops_processed[i]->produced;
945                 ops[i]->dst.offset = 0;
946                 if (state == RTE_COMP_OP_STATELESS) {
947                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
948                 } else {
949                         RTE_LOG(ERR, USER1,
950                                 "Stateful operations are not supported "
951                                 "in these tests yet\n");
952                         goto exit;
953                 }
954                 ops[i]->input_chksum = 0;
955                 /*
956                  * Copy private data from previous operations,
957                  * to keep the pointer to the original buffer
958                  */
959                 memcpy(ops[i] + 1, ops_processed[i] + 1,
960                                 sizeof(struct priv_op_data));
961         }
962
963         /*
964          * Free the previous compress operations,
965          * as it is not needed anymore
966          */
967         for (i = 0; i < num_bufs; i++) {
968                 rte_comp_op_free(ops_processed[i]);
969                 ops_processed[i] = NULL;
970         }
971
972         /* Decompress data (either with Zlib API or compressdev API */
973         if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
974                 for (i = 0; i < num_bufs; i++) {
975                         priv_data = (struct priv_op_data *)(ops[i] + 1);
976                         uint16_t xform_idx = priv_data->orig_idx % num_xforms;
977                         const struct rte_comp_xform *decompress_xform =
978                                 decompress_xforms[xform_idx];
979
980                         ret = decompress_zlib(ops[i], decompress_xform);
981                         if (ret < 0)
982                                 goto exit;
983
984                         ops_processed[i] = ops[i];
985                 }
986         } else {
987                 /* Create decompress private xform data */
988                 for (i = 0; i < num_xforms; i++) {
989                         ret = rte_compressdev_private_xform_create(0,
990                                 (const struct rte_comp_xform *)decompress_xforms[i],
991                                 &priv_xforms[i]);
992                         if (ret < 0) {
993                                 RTE_LOG(ERR, USER1,
994                                         "Decompression private xform "
995                                         "could not be created\n");
996                                 goto exit;
997                         }
998                         num_priv_xforms++;
999                 }
1000
1001                 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1002                         /* Attach shareable private xform data to ops */
1003                         for (i = 0; i < num_bufs; i++) {
1004                                 priv_data = (struct priv_op_data *)(ops[i] + 1);
1005                                 uint16_t xform_idx = priv_data->orig_idx %
1006                                                                 num_xforms;
1007                                 ops[i]->private_xform = priv_xforms[xform_idx];
1008                         }
1009                 } else {
1010                         /* Create rest of the private xforms for the other ops */
1011                         for (i = num_xforms; i < num_bufs; i++) {
1012                                 ret = rte_compressdev_private_xform_create(0,
1013                                         decompress_xforms[i % num_xforms],
1014                                         &priv_xforms[i]);
1015                                 if (ret < 0) {
1016                                         RTE_LOG(ERR, USER1,
1017                                                 "Decompression private xform "
1018                                                 "could not be created\n");
1019                                         goto exit;
1020                                 }
1021                                 num_priv_xforms++;
1022                         }
1023
1024                         /* Attach non shareable private xform data to ops */
1025                         for (i = 0; i < num_bufs; i++) {
1026                                 priv_data = (struct priv_op_data *)(ops[i] + 1);
1027                                 uint16_t xform_idx = priv_data->orig_idx;
1028                                 ops[i]->private_xform = priv_xforms[xform_idx];
1029                         }
1030                 }
1031
1032                 /* Enqueue and dequeue all operations */
1033                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1034                 if (num_enqd < num_bufs) {
1035                         RTE_LOG(ERR, USER1,
1036                                 "The operations could not be enqueued\n");
1037                         goto exit;
1038                 }
1039
1040                 num_total_deqd = 0;
1041                 do {
1042                         /*
1043                          * If retrying a dequeue call, wait for 10 ms to allow
1044                          * enough time to the driver to process the operations
1045                          */
1046                         if (deqd_retries != 0) {
1047                                 /*
1048                                  * Avoid infinite loop if not all the
1049                                  * operations get out of the device
1050                                  */
1051                                 if (deqd_retries == MAX_DEQD_RETRIES) {
1052                                         RTE_LOG(ERR, USER1,
1053                                                 "Not all operations could be "
1054                                                 "dequeued\n");
1055                                         goto exit;
1056                                 }
1057                                 usleep(DEQUEUE_WAIT_TIME);
1058                         }
1059                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
1060                                         &ops_processed[num_total_deqd], num_bufs);
1061                         num_total_deqd += num_deqd;
1062                         deqd_retries++;
1063                 } while (num_total_deqd < num_enqd);
1064
1065                 deqd_retries = 0;
1066         }
1067
1068         for (i = 0; i < num_bufs; i++) {
1069                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1070                 char engine[22];
1071                 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL)
1072                         strlcpy(engine, "zlib (direct, no pmd)", 22);
1073                 else
1074                         strlcpy(engine, "pmd", 22);
1075                 RTE_LOG(DEBUG, USER1,
1076                         "Buffer %u decompressed by %s from %u to %u bytes\n",
1077                         buf_idx[priv_data->orig_idx], engine,
1078                         ops_processed[i]->consumed, ops_processed[i]->produced);
1079                 ops[i] = NULL;
1080         }
1081
1082         /*
1083          * Check operation status and free source mbuf (destination mbuf and
1084          * compress operation information is still needed)
1085          */
1086         for (i = 0; i < num_bufs; i++) {
1087                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1088                         RTE_LOG(ERR, USER1,
1089                                 "Some operations were not successful\n");
1090                         goto exit;
1091                 }
1092                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1093                 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1094                 comp_bufs[priv_data->orig_idx] = NULL;
1095         }
1096
1097         /*
1098          * Compare the original stream with the decompressed stream
1099          * (in size and the data)
1100          */
1101         for (i = 0; i < num_bufs; i++) {
1102                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1103                 const char *buf1 = test_bufs[priv_data->orig_idx];
1104                 const char *buf2;
1105                 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1106                 if (contig_buf == NULL) {
1107                         RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1108                                         "be allocated\n");
1109                         goto exit;
1110                 }
1111
1112                 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1113                                 ops_processed[i]->produced, contig_buf);
1114
1115                 if (compare_buffers(buf1, strlen(buf1) + 1,
1116                                 buf2, ops_processed[i]->produced) < 0)
1117                         goto exit;
1118
1119                 rte_free(contig_buf);
1120                 contig_buf = NULL;
1121         }
1122
1123         ret_status = 0;
1124
1125 exit:
1126         /* Free resources */
1127         for (i = 0; i < num_bufs; i++) {
1128                 rte_pktmbuf_free(uncomp_bufs[i]);
1129                 rte_pktmbuf_free(comp_bufs[i]);
1130                 rte_comp_op_free(ops[i]);
1131                 rte_comp_op_free(ops_processed[i]);
1132         }
1133         for (i = 0; i < num_priv_xforms; i++) {
1134                 if (priv_xforms[i] != NULL)
1135                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
1136         }
1137         rte_free(contig_buf);
1138
1139         return ret_status;
1140 }
1141
1142 static int
1143 test_compressdev_deflate_stateless_fixed(void)
1144 {
1145         struct comp_testsuite_params *ts_params = &testsuite_params;
1146         const char *test_buffer;
1147         uint16_t i;
1148         int ret;
1149         const struct rte_compressdev_capabilities *capab;
1150
1151         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1152         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1153
1154         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1155                 return -ENOTSUP;
1156
1157         struct rte_comp_xform *compress_xform =
1158                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1159
1160         if (compress_xform == NULL) {
1161                 RTE_LOG(ERR, USER1,
1162                         "Compress xform could not be created\n");
1163                 ret = TEST_FAILED;
1164                 goto exit;
1165         }
1166
1167         memcpy(compress_xform, ts_params->def_comp_xform,
1168                         sizeof(struct rte_comp_xform));
1169         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1170
1171         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1172                 test_buffer = compress_test_bufs[i];
1173
1174                 /* Compress with compressdev, decompress with Zlib */
1175                 if (test_deflate_comp_decomp(&test_buffer, 1,
1176                                 &i,
1177                                 &compress_xform,
1178                                 &ts_params->def_decomp_xform,
1179                                 1,
1180                                 RTE_COMP_OP_STATELESS,
1181                                 0,
1182                                 ZLIB_DECOMPRESS) < 0) {
1183                         ret = TEST_FAILED;
1184                         goto exit;
1185                 }
1186
1187                 /* Compress with Zlib, decompress with compressdev */
1188                 if (test_deflate_comp_decomp(&test_buffer, 1,
1189                                 &i,
1190                                 &compress_xform,
1191                                 &ts_params->def_decomp_xform,
1192                                 1,
1193                                 RTE_COMP_OP_STATELESS,
1194                                 0,
1195                                 ZLIB_COMPRESS) < 0) {
1196                         ret = TEST_FAILED;
1197                         goto exit;
1198                 }
1199         }
1200
1201         ret = TEST_SUCCESS;
1202
1203 exit:
1204         rte_free(compress_xform);
1205         return ret;
1206 }
1207
1208 static int
1209 test_compressdev_deflate_stateless_dynamic(void)
1210 {
1211         struct comp_testsuite_params *ts_params = &testsuite_params;
1212         const char *test_buffer;
1213         uint16_t i;
1214         int ret;
1215         struct rte_comp_xform *compress_xform =
1216                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1217
1218         const struct rte_compressdev_capabilities *capab;
1219
1220         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1221         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1222
1223         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1224                 return -ENOTSUP;
1225
1226         if (compress_xform == NULL) {
1227                 RTE_LOG(ERR, USER1,
1228                         "Compress xform could not be created\n");
1229                 ret = TEST_FAILED;
1230                 goto exit;
1231         }
1232
1233         memcpy(compress_xform, ts_params->def_comp_xform,
1234                         sizeof(struct rte_comp_xform));
1235         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1236
1237         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1238                 test_buffer = compress_test_bufs[i];
1239
1240                 /* Compress with compressdev, decompress with Zlib */
1241                 if (test_deflate_comp_decomp(&test_buffer, 1,
1242                                 &i,
1243                                 &compress_xform,
1244                                 &ts_params->def_decomp_xform,
1245                                 1,
1246                                 RTE_COMP_OP_STATELESS,
1247                                 0,
1248                                 ZLIB_DECOMPRESS) < 0) {
1249                         ret = TEST_FAILED;
1250                         goto exit;
1251                 }
1252
1253                 /* Compress with Zlib, decompress with compressdev */
1254                 if (test_deflate_comp_decomp(&test_buffer, 1,
1255                                 &i,
1256                                 &compress_xform,
1257                                 &ts_params->def_decomp_xform,
1258                                 1,
1259                                 RTE_COMP_OP_STATELESS,
1260                                 0,
1261                                 ZLIB_COMPRESS) < 0) {
1262                         ret = TEST_FAILED;
1263                         goto exit;
1264                 }
1265         }
1266
1267         ret = TEST_SUCCESS;
1268
1269 exit:
1270         rte_free(compress_xform);
1271         return ret;
1272 }
1273
1274 static int
1275 test_compressdev_deflate_stateless_multi_op(void)
1276 {
1277         struct comp_testsuite_params *ts_params = &testsuite_params;
1278         uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1279         uint16_t buf_idx[num_bufs];
1280         uint16_t i;
1281
1282         for (i = 0; i < num_bufs; i++)
1283                 buf_idx[i] = i;
1284
1285         /* Compress with compressdev, decompress with Zlib */
1286         if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1287                         buf_idx,
1288                         &ts_params->def_comp_xform,
1289                         &ts_params->def_decomp_xform,
1290                         1,
1291                         RTE_COMP_OP_STATELESS,
1292                         0,
1293                         ZLIB_DECOMPRESS) < 0)
1294                 return TEST_FAILED;
1295
1296         /* Compress with Zlib, decompress with compressdev */
1297         if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1298                         buf_idx,
1299                         &ts_params->def_comp_xform,
1300                         &ts_params->def_decomp_xform,
1301                         1,
1302                         RTE_COMP_OP_STATELESS,
1303                         0,
1304                         ZLIB_COMPRESS) < 0)
1305                 return TEST_FAILED;
1306
1307         return TEST_SUCCESS;
1308 }
1309
1310 static int
1311 test_compressdev_deflate_stateless_multi_level(void)
1312 {
1313         struct comp_testsuite_params *ts_params = &testsuite_params;
1314         const char *test_buffer;
1315         unsigned int level;
1316         uint16_t i;
1317         int ret;
1318         struct rte_comp_xform *compress_xform =
1319                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1320
1321         if (compress_xform == NULL) {
1322                 RTE_LOG(ERR, USER1,
1323                         "Compress xform could not be created\n");
1324                 ret = TEST_FAILED;
1325                 goto exit;
1326         }
1327
1328         memcpy(compress_xform, ts_params->def_comp_xform,
1329                         sizeof(struct rte_comp_xform));
1330
1331         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1332                 test_buffer = compress_test_bufs[i];
1333                 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1334                                 level++) {
1335                         compress_xform->compress.level = level;
1336                         /* Compress with compressdev, decompress with Zlib */
1337                         if (test_deflate_comp_decomp(&test_buffer, 1,
1338                                         &i,
1339                                         &compress_xform,
1340                                         &ts_params->def_decomp_xform,
1341                                         1,
1342                                         RTE_COMP_OP_STATELESS,
1343                                         0,
1344                                         ZLIB_DECOMPRESS) < 0) {
1345                                 ret = TEST_FAILED;
1346                                 goto exit;
1347                         }
1348                 }
1349         }
1350
1351         ret = TEST_SUCCESS;
1352
1353 exit:
1354         rte_free(compress_xform);
1355         return ret;
1356 }
1357
1358 #define NUM_XFORMS 3
1359 static int
1360 test_compressdev_deflate_stateless_multi_xform(void)
1361 {
1362         struct comp_testsuite_params *ts_params = &testsuite_params;
1363         uint16_t num_bufs = NUM_XFORMS;
1364         struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1365         struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1366         const char *test_buffers[NUM_XFORMS];
1367         uint16_t i;
1368         unsigned int level = RTE_COMP_LEVEL_MIN;
1369         uint16_t buf_idx[num_bufs];
1370
1371         int ret;
1372
1373         /* Create multiple xforms with various levels */
1374         for (i = 0; i < NUM_XFORMS; i++) {
1375                 compress_xforms[i] = rte_malloc(NULL,
1376                                 sizeof(struct rte_comp_xform), 0);
1377                 if (compress_xforms[i] == NULL) {
1378                         RTE_LOG(ERR, USER1,
1379                                 "Compress xform could not be created\n");
1380                         ret = TEST_FAILED;
1381                         goto exit;
1382                 }
1383
1384                 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1385                                 sizeof(struct rte_comp_xform));
1386                 compress_xforms[i]->compress.level = level;
1387                 level++;
1388
1389                 decompress_xforms[i] = rte_malloc(NULL,
1390                                 sizeof(struct rte_comp_xform), 0);
1391                 if (decompress_xforms[i] == NULL) {
1392                         RTE_LOG(ERR, USER1,
1393                                 "Decompress xform could not be created\n");
1394                         ret = TEST_FAILED;
1395                         goto exit;
1396                 }
1397
1398                 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1399                                 sizeof(struct rte_comp_xform));
1400         }
1401
1402         for (i = 0; i < NUM_XFORMS; i++) {
1403                 buf_idx[i] = 0;
1404                 /* Use the same buffer in all sessions */
1405                 test_buffers[i] = compress_test_bufs[0];
1406         }
1407         /* Compress with compressdev, decompress with Zlib */
1408         if (test_deflate_comp_decomp(test_buffers, num_bufs,
1409                         buf_idx,
1410                         compress_xforms,
1411                         decompress_xforms,
1412                         NUM_XFORMS,
1413                         RTE_COMP_OP_STATELESS,
1414                         0,
1415                         ZLIB_DECOMPRESS) < 0) {
1416                 ret = TEST_FAILED;
1417                 goto exit;
1418         }
1419
1420         ret = TEST_SUCCESS;
1421 exit:
1422         for (i = 0; i < NUM_XFORMS; i++) {
1423                 rte_free(compress_xforms[i]);
1424                 rte_free(decompress_xforms[i]);
1425         }
1426
1427         return ret;
1428 }
1429
1430 static int
1431 test_compressdev_deflate_stateless_sgl(void)
1432 {
1433         struct comp_testsuite_params *ts_params = &testsuite_params;
1434         uint16_t i;
1435         const char *test_buffer;
1436         const struct rte_compressdev_capabilities *capab;
1437
1438         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1439         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1440
1441         if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1442                 return -ENOTSUP;
1443
1444         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1445                 test_buffer = compress_test_bufs[i];
1446                 /* Compress with compressdev, decompress with Zlib */
1447                 if (test_deflate_comp_decomp(&test_buffer, 1,
1448                                 &i,
1449                                 &ts_params->def_comp_xform,
1450                                 &ts_params->def_decomp_xform,
1451                                 1,
1452                                 RTE_COMP_OP_STATELESS,
1453                                 1,
1454                                 ZLIB_DECOMPRESS) < 0)
1455                         return TEST_FAILED;
1456
1457                 /* Compress with Zlib, decompress with compressdev */
1458                 if (test_deflate_comp_decomp(&test_buffer, 1,
1459                                 &i,
1460                                 &ts_params->def_comp_xform,
1461                                 &ts_params->def_decomp_xform,
1462                                 1,
1463                                 RTE_COMP_OP_STATELESS,
1464                                 1,
1465                                 ZLIB_COMPRESS) < 0)
1466                         return TEST_FAILED;
1467         }
1468
1469         return TEST_SUCCESS;
1470 }
1471
1472 static struct unit_test_suite compressdev_testsuite  = {
1473         .suite_name = "compressdev unit test suite",
1474         .setup = testsuite_setup,
1475         .teardown = testsuite_teardown,
1476         .unit_test_cases = {
1477                 TEST_CASE_ST(NULL, NULL,
1478                         test_compressdev_invalid_configuration),
1479                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1480                         test_compressdev_deflate_stateless_fixed),
1481                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1482                         test_compressdev_deflate_stateless_dynamic),
1483                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1484                         test_compressdev_deflate_stateless_multi_op),
1485                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1486                         test_compressdev_deflate_stateless_multi_level),
1487                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1488                         test_compressdev_deflate_stateless_multi_xform),
1489                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1490                         test_compressdev_deflate_stateless_sgl),
1491                 TEST_CASES_END() /**< NULL terminate unit test array */
1492         }
1493 };
1494
1495 static int
1496 test_compressdev(void)
1497 {
1498         return unit_test_suite_runner(&compressdev_testsuite);
1499 }
1500
1501 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);