1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
13 #include <rte_compressdev.h>
14 #include <rte_string_fns.h>
16 #include "test_compressdev_test_buffer.h"
19 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
21 #define DEFAULT_WINDOW_SIZE 15
22 #define DEFAULT_MEM_LEVEL 8
23 #define MAX_DEQD_RETRIES 10
24 #define DEQUEUE_WAIT_TIME 10000
27 * 30% extra size for compressed data compared to original data,
28 * in case data size cannot be reduced and it is actually bigger
29 * due to the compress block headers
31 #define COMPRESS_BUF_SIZE_RATIO 1.3
32 #define NUM_LARGE_MBUFS 16
33 #define SMALL_SEG_SIZE 256
36 #define NUM_MAX_XFORMS 16
37 #define NUM_MAX_INFLIGHT_OPS 128
41 huffman_type_strings[] = {
42 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
43 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
44 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
58 struct comp_testsuite_params {
59 struct rte_mempool *large_mbuf_pool;
60 struct rte_mempool *small_mbuf_pool;
61 struct rte_mempool *op_pool;
62 struct rte_comp_xform *def_comp_xform;
63 struct rte_comp_xform *def_decomp_xform;
66 static struct comp_testsuite_params testsuite_params = { 0 };
69 testsuite_teardown(void)
71 struct comp_testsuite_params *ts_params = &testsuite_params;
73 rte_mempool_free(ts_params->large_mbuf_pool);
74 rte_mempool_free(ts_params->small_mbuf_pool);
75 rte_mempool_free(ts_params->op_pool);
76 rte_free(ts_params->def_comp_xform);
77 rte_free(ts_params->def_decomp_xform);
83 struct comp_testsuite_params *ts_params = &testsuite_params;
84 uint32_t max_buf_size = 0;
87 if (rte_compressdev_count() == 0) {
88 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
92 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
93 rte_compressdev_name_get(0));
95 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
96 max_buf_size = RTE_MAX(max_buf_size,
97 strlen(compress_test_bufs[i]) + 1);
100 * Buffers to be used in compression and decompression.
101 * Since decompressed data might be larger than
102 * compressed data (due to block header),
103 * buffers should be big enough for both cases.
105 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
106 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
109 max_buf_size + RTE_PKTMBUF_HEADROOM,
111 if (ts_params->large_mbuf_pool == NULL) {
112 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
116 /* Create mempool with smaller buffers for SGL testing */
117 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
118 NUM_LARGE_MBUFS * MAX_SEGS,
120 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
122 if (ts_params->small_mbuf_pool == NULL) {
123 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
127 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
128 0, sizeof(struct priv_op_data),
130 if (ts_params->op_pool == NULL) {
131 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
135 ts_params->def_comp_xform =
136 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
137 if (ts_params->def_comp_xform == NULL) {
139 "Default compress xform could not be created\n");
142 ts_params->def_decomp_xform =
143 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
144 if (ts_params->def_decomp_xform == NULL) {
146 "Default decompress xform could not be created\n");
150 /* Initializes default values for compress/decompress xforms */
151 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
152 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
153 ts_params->def_comp_xform->compress.deflate.huffman =
154 RTE_COMP_HUFFMAN_DEFAULT;
155 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
156 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
157 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
159 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
160 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
161 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
162 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
167 testsuite_teardown();
173 generic_ut_setup(void)
175 /* Configure compressdev (one device, one queue pair) */
176 struct rte_compressdev_config config = {
177 .socket_id = rte_socket_id(),
179 .max_nb_priv_xforms = NUM_MAX_XFORMS,
183 if (rte_compressdev_configure(0, &config) < 0) {
184 RTE_LOG(ERR, USER1, "Device configuration failed\n");
188 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
189 rte_socket_id()) < 0) {
190 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
194 if (rte_compressdev_start(0) < 0) {
195 RTE_LOG(ERR, USER1, "Device could not be started\n");
203 generic_ut_teardown(void)
205 rte_compressdev_stop(0);
206 if (rte_compressdev_close(0) < 0)
207 RTE_LOG(ERR, USER1, "Device could not be closed\n");
211 test_compressdev_invalid_configuration(void)
213 struct rte_compressdev_config invalid_config;
214 struct rte_compressdev_config valid_config = {
215 .socket_id = rte_socket_id(),
217 .max_nb_priv_xforms = NUM_MAX_XFORMS,
220 struct rte_compressdev_info dev_info;
222 /* Invalid configuration with 0 queue pairs */
223 memcpy(&invalid_config, &valid_config,
224 sizeof(struct rte_compressdev_config));
225 invalid_config.nb_queue_pairs = 0;
227 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
228 "Device configuration was successful "
229 "with no queue pairs (invalid)\n");
232 * Invalid configuration with too many queue pairs
233 * (if there is an actual maximum number of queue pairs)
235 rte_compressdev_info_get(0, &dev_info);
236 if (dev_info.max_nb_queue_pairs != 0) {
237 memcpy(&invalid_config, &valid_config,
238 sizeof(struct rte_compressdev_config));
239 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
241 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
242 "Device configuration was successful "
243 "with too many queue pairs (invalid)\n");
246 /* Invalid queue pair setup, with no number of queue pairs set */
247 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
248 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
249 "Queue pair setup was successful "
250 "with no queue pairs set (invalid)\n");
256 compare_buffers(const char *buffer1, uint32_t buffer1_len,
257 const char *buffer2, uint32_t buffer2_len)
259 if (buffer1_len != buffer2_len) {
260 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
264 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
265 RTE_LOG(ERR, USER1, "Buffers are different\n");
273 * Maps compressdev and Zlib flush flags
276 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
279 case RTE_COMP_FLUSH_NONE:
281 case RTE_COMP_FLUSH_SYNC:
283 case RTE_COMP_FLUSH_FULL:
285 case RTE_COMP_FLUSH_FINAL:
288 * There should be only the values above,
289 * so this should never happen
297 compress_zlib(struct rte_comp_op *op,
298 const struct rte_comp_xform *xform, int mem_level)
302 int strategy, window_bits, comp_level;
303 int ret = TEST_FAILED;
304 uint8_t *single_src_buf = NULL;
305 uint8_t *single_dst_buf = NULL;
307 /* initialize zlib stream */
308 stream.zalloc = Z_NULL;
309 stream.zfree = Z_NULL;
310 stream.opaque = Z_NULL;
312 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
315 strategy = Z_DEFAULT_STRATEGY;
318 * Window bits is the base two logarithm of the window size (in bytes).
319 * When doing raw DEFLATE, this number will be negative.
321 window_bits = -(xform->compress.window_size);
323 comp_level = xform->compress.level;
325 if (comp_level != RTE_COMP_LEVEL_NONE)
326 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
327 window_bits, mem_level, strategy);
329 ret = deflateInit(&stream, Z_NO_COMPRESSION);
332 printf("Zlib deflate could not be initialized\n");
336 /* Assuming stateless operation */
338 if (op->m_src->nb_segs > 1) {
339 single_src_buf = rte_malloc(NULL,
340 rte_pktmbuf_pkt_len(op->m_src), 0);
341 if (single_src_buf == NULL) {
342 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
345 single_dst_buf = rte_malloc(NULL,
346 rte_pktmbuf_pkt_len(op->m_dst), 0);
347 if (single_dst_buf == NULL) {
348 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
351 if (rte_pktmbuf_read(op->m_src, 0,
352 rte_pktmbuf_pkt_len(op->m_src),
353 single_src_buf) == NULL) {
355 "Buffer could not be read entirely\n");
359 stream.avail_in = op->src.length;
360 stream.next_in = single_src_buf;
361 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
362 stream.next_out = single_dst_buf;
365 stream.avail_in = op->src.length;
366 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
367 stream.avail_out = op->m_dst->data_len;
368 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
370 /* Stateless operation, all buffer will be compressed in one go */
371 zlib_flush = map_zlib_flush_flag(op->flush_flag);
372 ret = deflate(&stream, zlib_flush);
374 if (stream.avail_in != 0) {
375 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
379 if (ret != Z_STREAM_END)
382 /* Copy data to destination SGL */
383 if (op->m_src->nb_segs > 1) {
384 uint32_t remaining_data = stream.total_out;
385 uint8_t *src_data = single_dst_buf;
386 struct rte_mbuf *dst_buf = op->m_dst;
388 while (remaining_data > 0) {
389 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
392 if (remaining_data < dst_buf->data_len) {
393 memcpy(dst_data, src_data, remaining_data);
396 memcpy(dst_data, src_data, dst_buf->data_len);
397 remaining_data -= dst_buf->data_len;
398 src_data += dst_buf->data_len;
399 dst_buf = dst_buf->next;
404 op->consumed = stream.total_in;
405 op->produced = stream.total_out;
406 op->status = RTE_COMP_OP_STATUS_SUCCESS;
408 deflateReset(&stream);
413 rte_free(single_src_buf);
414 rte_free(single_dst_buf);
420 decompress_zlib(struct rte_comp_op *op,
421 const struct rte_comp_xform *xform)
426 int ret = TEST_FAILED;
427 uint8_t *single_src_buf = NULL;
428 uint8_t *single_dst_buf = NULL;
430 /* initialize zlib stream */
431 stream.zalloc = Z_NULL;
432 stream.zfree = Z_NULL;
433 stream.opaque = Z_NULL;
436 * Window bits is the base two logarithm of the window size (in bytes).
437 * When doing raw DEFLATE, this number will be negative.
439 window_bits = -(xform->decompress.window_size);
441 ret = inflateInit2(&stream, window_bits);
444 printf("Zlib deflate could not be initialized\n");
448 /* Assuming stateless operation */
450 if (op->m_src->nb_segs > 1) {
451 single_src_buf = rte_malloc(NULL,
452 rte_pktmbuf_pkt_len(op->m_src), 0);
453 if (single_src_buf == NULL) {
454 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
457 single_dst_buf = rte_malloc(NULL,
458 rte_pktmbuf_pkt_len(op->m_dst), 0);
459 if (single_dst_buf == NULL) {
460 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
463 if (rte_pktmbuf_read(op->m_src, 0,
464 rte_pktmbuf_pkt_len(op->m_src),
465 single_src_buf) == NULL) {
467 "Buffer could not be read entirely\n");
471 stream.avail_in = op->src.length;
472 stream.next_in = single_src_buf;
473 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
474 stream.next_out = single_dst_buf;
477 stream.avail_in = op->src.length;
478 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
479 stream.avail_out = op->m_dst->data_len;
480 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
483 /* Stateless operation, all buffer will be compressed in one go */
484 zlib_flush = map_zlib_flush_flag(op->flush_flag);
485 ret = inflate(&stream, zlib_flush);
487 if (stream.avail_in != 0) {
488 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
492 if (ret != Z_STREAM_END)
495 if (op->m_src->nb_segs > 1) {
496 uint32_t remaining_data = stream.total_out;
497 uint8_t *src_data = single_dst_buf;
498 struct rte_mbuf *dst_buf = op->m_dst;
500 while (remaining_data > 0) {
501 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
504 if (remaining_data < dst_buf->data_len) {
505 memcpy(dst_data, src_data, remaining_data);
508 memcpy(dst_data, src_data, dst_buf->data_len);
509 remaining_data -= dst_buf->data_len;
510 src_data += dst_buf->data_len;
511 dst_buf = dst_buf->next;
516 op->consumed = stream.total_in;
517 op->produced = stream.total_out;
518 op->status = RTE_COMP_OP_STATUS_SUCCESS;
520 inflateReset(&stream);
530 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
531 uint32_t total_data_size,
532 struct rte_mempool *small_mbuf_pool,
533 struct rte_mempool *large_mbuf_pool,
534 uint8_t limit_segs_in_sgl)
536 uint32_t remaining_data = total_data_size;
537 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
538 struct rte_mempool *pool;
539 struct rte_mbuf *next_seg;
542 const char *data_ptr = test_buf;
546 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
547 num_remaining_segs = limit_segs_in_sgl - 1;
550 * Allocate data in the first segment (header) and
551 * copy data if test buffer is provided
553 if (remaining_data < SMALL_SEG_SIZE)
554 data_size = remaining_data;
556 data_size = SMALL_SEG_SIZE;
557 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
558 if (buf_ptr == NULL) {
560 "Not enough space in the 1st buffer\n");
564 if (data_ptr != NULL) {
565 /* Copy characters without NULL terminator */
566 strncpy(buf_ptr, data_ptr, data_size);
567 data_ptr += data_size;
569 remaining_data -= data_size;
570 num_remaining_segs--;
573 * Allocate the rest of the segments,
574 * copy the rest of the data and chain the segments.
576 for (i = 0; i < num_remaining_segs; i++) {
578 if (i == (num_remaining_segs - 1)) {
580 if (remaining_data > SMALL_SEG_SIZE)
581 pool = large_mbuf_pool;
583 pool = small_mbuf_pool;
584 data_size = remaining_data;
586 data_size = SMALL_SEG_SIZE;
587 pool = small_mbuf_pool;
590 next_seg = rte_pktmbuf_alloc(pool);
591 if (next_seg == NULL) {
593 "New segment could not be allocated "
594 "from the mempool\n");
597 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
598 if (buf_ptr == NULL) {
600 "Not enough space in the buffer\n");
601 rte_pktmbuf_free(next_seg);
604 if (data_ptr != NULL) {
605 /* Copy characters without NULL terminator */
606 strncpy(buf_ptr, data_ptr, data_size);
607 data_ptr += data_size;
609 remaining_data -= data_size;
611 ret = rte_pktmbuf_chain(head_buf, next_seg);
613 rte_pktmbuf_free(next_seg);
615 "Segment could not chained\n");
624 * Compresses and decompresses buffer with compressdev API and Zlib API
627 test_deflate_comp_decomp(const char * const test_bufs[],
628 unsigned int num_bufs,
630 struct rte_comp_xform *compress_xforms[],
631 struct rte_comp_xform *decompress_xforms[],
632 unsigned int num_xforms,
633 enum rte_comp_op_type state,
635 enum zlib_direction zlib_dir)
637 struct comp_testsuite_params *ts_params = &testsuite_params;
640 struct rte_mbuf *uncomp_bufs[num_bufs];
641 struct rte_mbuf *comp_bufs[num_bufs];
642 struct rte_comp_op *ops[num_bufs];
643 struct rte_comp_op *ops_processed[num_bufs];
644 void *priv_xforms[num_bufs];
645 uint16_t num_enqd, num_deqd, num_total_deqd;
646 uint16_t num_priv_xforms = 0;
647 unsigned int deqd_retries = 0;
648 struct priv_op_data *priv_data;
651 struct rte_mempool *buf_pool;
653 const struct rte_compressdev_capabilities *capa =
654 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
655 char *contig_buf = NULL;
657 /* Initialize all arrays to NULL */
658 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
659 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
660 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
661 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
662 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
665 buf_pool = ts_params->small_mbuf_pool;
667 buf_pool = ts_params->large_mbuf_pool;
669 /* Prepare the source mbufs with the data */
670 ret = rte_pktmbuf_alloc_bulk(buf_pool,
671 uncomp_bufs, num_bufs);
674 "Source mbufs could not be allocated "
675 "from the mempool\n");
680 for (i = 0; i < num_bufs; i++) {
681 data_size = strlen(test_bufs[i]) + 1;
682 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
684 ts_params->small_mbuf_pool,
685 ts_params->large_mbuf_pool,
690 for (i = 0; i < num_bufs; i++) {
691 data_size = strlen(test_bufs[i]) + 1;
692 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
693 snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
697 /* Prepare the destination mbufs */
698 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
701 "Destination mbufs could not be allocated "
702 "from the mempool\n");
707 for (i = 0; i < num_bufs; i++) {
708 data_size = strlen(test_bufs[i]) *
709 COMPRESS_BUF_SIZE_RATIO;
710 if (prepare_sgl_bufs(NULL, comp_bufs[i],
712 ts_params->small_mbuf_pool,
713 ts_params->large_mbuf_pool,
719 for (i = 0; i < num_bufs; i++) {
720 data_size = strlen(test_bufs[i]) *
721 COMPRESS_BUF_SIZE_RATIO;
722 rte_pktmbuf_append(comp_bufs[i], data_size);
726 /* Build the compression operations */
727 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
730 "Compress operations could not be allocated "
731 "from the mempool\n");
735 for (i = 0; i < num_bufs; i++) {
736 ops[i]->m_src = uncomp_bufs[i];
737 ops[i]->m_dst = comp_bufs[i];
738 ops[i]->src.offset = 0;
739 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
740 ops[i]->dst.offset = 0;
741 if (state == RTE_COMP_OP_STATELESS) {
742 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
745 "Stateful operations are not supported "
746 "in these tests yet\n");
749 ops[i]->input_chksum = 0;
751 * Store original operation index in private data,
752 * since ordering does not have to be maintained,
753 * when dequeueing from compressdev, so a comparison
754 * at the end of the test can be done.
756 priv_data = (struct priv_op_data *) (ops[i] + 1);
757 priv_data->orig_idx = i;
760 /* Compress data (either with Zlib API or compressdev API */
761 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
762 for (i = 0; i < num_bufs; i++) {
763 const struct rte_comp_xform *compress_xform =
764 compress_xforms[i % num_xforms];
765 ret = compress_zlib(ops[i], compress_xform,
770 ops_processed[i] = ops[i];
773 /* Create compress private xform data */
774 for (i = 0; i < num_xforms; i++) {
775 ret = rte_compressdev_private_xform_create(0,
776 (const struct rte_comp_xform *)compress_xforms[i],
780 "Compression private xform "
781 "could not be created\n");
787 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
788 /* Attach shareable private xform data to ops */
789 for (i = 0; i < num_bufs; i++)
790 ops[i]->private_xform = priv_xforms[i % num_xforms];
792 /* Create rest of the private xforms for the other ops */
793 for (i = num_xforms; i < num_bufs; i++) {
794 ret = rte_compressdev_private_xform_create(0,
795 compress_xforms[i % num_xforms],
799 "Compression private xform "
800 "could not be created\n");
806 /* Attach non shareable private xform data to ops */
807 for (i = 0; i < num_bufs; i++)
808 ops[i]->private_xform = priv_xforms[i];
811 /* Enqueue and dequeue all operations */
812 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
813 if (num_enqd < num_bufs) {
815 "The operations could not be enqueued\n");
822 * If retrying a dequeue call, wait for 10 ms to allow
823 * enough time to the driver to process the operations
825 if (deqd_retries != 0) {
827 * Avoid infinite loop if not all the
828 * operations get out of the device
830 if (deqd_retries == MAX_DEQD_RETRIES) {
832 "Not all operations could be "
836 usleep(DEQUEUE_WAIT_TIME);
838 num_deqd = rte_compressdev_dequeue_burst(0, 0,
839 &ops_processed[num_total_deqd], num_bufs);
840 num_total_deqd += num_deqd;
842 } while (num_total_deqd < num_enqd);
846 /* Free compress private xforms */
847 for (i = 0; i < num_priv_xforms; i++) {
848 rte_compressdev_private_xform_free(0, priv_xforms[i]);
849 priv_xforms[i] = NULL;
854 for (i = 0; i < num_bufs; i++) {
855 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
856 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
857 const struct rte_comp_compress_xform *compress_xform =
858 &compress_xforms[xform_idx]->compress;
859 enum rte_comp_huffman huffman_type =
860 compress_xform->deflate.huffman;
862 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL)
863 strlcpy(engine, "zlib (direct, no pmd)", 22);
865 strlcpy(engine, "pmd", 22);
867 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
868 " %u bytes (level = %d, huffman = %s)\n",
869 buf_idx[priv_data->orig_idx], engine,
870 ops_processed[i]->consumed, ops_processed[i]->produced,
871 compress_xform->level,
872 huffman_type_strings[huffman_type]);
873 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
874 ops_processed[i]->consumed == 0 ? 0 :
875 (float)ops_processed[i]->produced /
876 ops_processed[i]->consumed * 100);
881 * Check operation status and free source mbufs (destination mbuf and
882 * compress operation information is needed for the decompression stage)
884 for (i = 0; i < num_bufs; i++) {
885 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
887 "Some operations were not successful\n");
890 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
891 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
892 uncomp_bufs[priv_data->orig_idx] = NULL;
895 /* Allocate buffers for decompressed data */
896 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
899 "Destination mbufs could not be allocated "
900 "from the mempool\n");
905 for (i = 0; i < num_bufs; i++) {
906 priv_data = (struct priv_op_data *)
907 (ops_processed[i] + 1);
908 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
909 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
911 ts_params->small_mbuf_pool,
912 ts_params->large_mbuf_pool,
918 for (i = 0; i < num_bufs; i++) {
919 priv_data = (struct priv_op_data *)
920 (ops_processed[i] + 1);
921 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
922 rte_pktmbuf_append(uncomp_bufs[i], data_size);
926 /* Build the decompression operations */
927 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
930 "Decompress operations could not be allocated "
931 "from the mempool\n");
935 /* Source buffer is the compressed data from the previous operations */
936 for (i = 0; i < num_bufs; i++) {
937 ops[i]->m_src = ops_processed[i]->m_dst;
938 ops[i]->m_dst = uncomp_bufs[i];
939 ops[i]->src.offset = 0;
941 * Set the length of the compressed data to the
942 * number of bytes that were produced in the previous stage
944 ops[i]->src.length = ops_processed[i]->produced;
945 ops[i]->dst.offset = 0;
946 if (state == RTE_COMP_OP_STATELESS) {
947 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
950 "Stateful operations are not supported "
951 "in these tests yet\n");
954 ops[i]->input_chksum = 0;
956 * Copy private data from previous operations,
957 * to keep the pointer to the original buffer
959 memcpy(ops[i] + 1, ops_processed[i] + 1,
960 sizeof(struct priv_op_data));
964 * Free the previous compress operations,
965 * as it is not needed anymore
967 for (i = 0; i < num_bufs; i++) {
968 rte_comp_op_free(ops_processed[i]);
969 ops_processed[i] = NULL;
972 /* Decompress data (either with Zlib API or compressdev API */
973 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
974 for (i = 0; i < num_bufs; i++) {
975 priv_data = (struct priv_op_data *)(ops[i] + 1);
976 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
977 const struct rte_comp_xform *decompress_xform =
978 decompress_xforms[xform_idx];
980 ret = decompress_zlib(ops[i], decompress_xform);
984 ops_processed[i] = ops[i];
987 /* Create decompress private xform data */
988 for (i = 0; i < num_xforms; i++) {
989 ret = rte_compressdev_private_xform_create(0,
990 (const struct rte_comp_xform *)decompress_xforms[i],
994 "Decompression private xform "
995 "could not be created\n");
1001 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1002 /* Attach shareable private xform data to ops */
1003 for (i = 0; i < num_bufs; i++) {
1004 priv_data = (struct priv_op_data *)(ops[i] + 1);
1005 uint16_t xform_idx = priv_data->orig_idx %
1007 ops[i]->private_xform = priv_xforms[xform_idx];
1010 /* Create rest of the private xforms for the other ops */
1011 for (i = num_xforms; i < num_bufs; i++) {
1012 ret = rte_compressdev_private_xform_create(0,
1013 decompress_xforms[i % num_xforms],
1017 "Decompression private xform "
1018 "could not be created\n");
1024 /* Attach non shareable private xform data to ops */
1025 for (i = 0; i < num_bufs; i++) {
1026 priv_data = (struct priv_op_data *)(ops[i] + 1);
1027 uint16_t xform_idx = priv_data->orig_idx;
1028 ops[i]->private_xform = priv_xforms[xform_idx];
1032 /* Enqueue and dequeue all operations */
1033 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1034 if (num_enqd < num_bufs) {
1036 "The operations could not be enqueued\n");
1043 * If retrying a dequeue call, wait for 10 ms to allow
1044 * enough time to the driver to process the operations
1046 if (deqd_retries != 0) {
1048 * Avoid infinite loop if not all the
1049 * operations get out of the device
1051 if (deqd_retries == MAX_DEQD_RETRIES) {
1053 "Not all operations could be "
1057 usleep(DEQUEUE_WAIT_TIME);
1059 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1060 &ops_processed[num_total_deqd], num_bufs);
1061 num_total_deqd += num_deqd;
1063 } while (num_total_deqd < num_enqd);
1068 for (i = 0; i < num_bufs; i++) {
1069 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1071 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL)
1072 strlcpy(engine, "zlib (direct, no pmd)", 22);
1074 strlcpy(engine, "pmd", 22);
1075 RTE_LOG(DEBUG, USER1,
1076 "Buffer %u decompressed by %s from %u to %u bytes\n",
1077 buf_idx[priv_data->orig_idx], engine,
1078 ops_processed[i]->consumed, ops_processed[i]->produced);
1083 * Check operation status and free source mbuf (destination mbuf and
1084 * compress operation information is still needed)
1086 for (i = 0; i < num_bufs; i++) {
1087 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1089 "Some operations were not successful\n");
1092 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1093 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1094 comp_bufs[priv_data->orig_idx] = NULL;
1098 * Compare the original stream with the decompressed stream
1099 * (in size and the data)
1101 for (i = 0; i < num_bufs; i++) {
1102 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1103 const char *buf1 = test_bufs[priv_data->orig_idx];
1105 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1106 if (contig_buf == NULL) {
1107 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1112 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1113 ops_processed[i]->produced, contig_buf);
1115 if (compare_buffers(buf1, strlen(buf1) + 1,
1116 buf2, ops_processed[i]->produced) < 0)
1119 rte_free(contig_buf);
1126 /* Free resources */
1127 for (i = 0; i < num_bufs; i++) {
1128 rte_pktmbuf_free(uncomp_bufs[i]);
1129 rte_pktmbuf_free(comp_bufs[i]);
1130 rte_comp_op_free(ops[i]);
1131 rte_comp_op_free(ops_processed[i]);
1133 for (i = 0; i < num_priv_xforms; i++) {
1134 if (priv_xforms[i] != NULL)
1135 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1137 rte_free(contig_buf);
1143 test_compressdev_deflate_stateless_fixed(void)
1145 struct comp_testsuite_params *ts_params = &testsuite_params;
1146 const char *test_buffer;
1149 const struct rte_compressdev_capabilities *capab;
1151 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1152 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1154 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1157 struct rte_comp_xform *compress_xform =
1158 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1160 if (compress_xform == NULL) {
1162 "Compress xform could not be created\n");
1167 memcpy(compress_xform, ts_params->def_comp_xform,
1168 sizeof(struct rte_comp_xform));
1169 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1171 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1172 test_buffer = compress_test_bufs[i];
1174 /* Compress with compressdev, decompress with Zlib */
1175 if (test_deflate_comp_decomp(&test_buffer, 1,
1178 &ts_params->def_decomp_xform,
1180 RTE_COMP_OP_STATELESS,
1182 ZLIB_DECOMPRESS) < 0) {
1187 /* Compress with Zlib, decompress with compressdev */
1188 if (test_deflate_comp_decomp(&test_buffer, 1,
1191 &ts_params->def_decomp_xform,
1193 RTE_COMP_OP_STATELESS,
1195 ZLIB_COMPRESS) < 0) {
1204 rte_free(compress_xform);
1209 test_compressdev_deflate_stateless_dynamic(void)
1211 struct comp_testsuite_params *ts_params = &testsuite_params;
1212 const char *test_buffer;
1215 struct rte_comp_xform *compress_xform =
1216 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1218 const struct rte_compressdev_capabilities *capab;
1220 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1221 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1223 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1226 if (compress_xform == NULL) {
1228 "Compress xform could not be created\n");
1233 memcpy(compress_xform, ts_params->def_comp_xform,
1234 sizeof(struct rte_comp_xform));
1235 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1237 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1238 test_buffer = compress_test_bufs[i];
1240 /* Compress with compressdev, decompress with Zlib */
1241 if (test_deflate_comp_decomp(&test_buffer, 1,
1244 &ts_params->def_decomp_xform,
1246 RTE_COMP_OP_STATELESS,
1248 ZLIB_DECOMPRESS) < 0) {
1253 /* Compress with Zlib, decompress with compressdev */
1254 if (test_deflate_comp_decomp(&test_buffer, 1,
1257 &ts_params->def_decomp_xform,
1259 RTE_COMP_OP_STATELESS,
1261 ZLIB_COMPRESS) < 0) {
1270 rte_free(compress_xform);
1275 test_compressdev_deflate_stateless_multi_op(void)
1277 struct comp_testsuite_params *ts_params = &testsuite_params;
1278 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1279 uint16_t buf_idx[num_bufs];
1282 for (i = 0; i < num_bufs; i++)
1285 /* Compress with compressdev, decompress with Zlib */
1286 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1288 &ts_params->def_comp_xform,
1289 &ts_params->def_decomp_xform,
1291 RTE_COMP_OP_STATELESS,
1293 ZLIB_DECOMPRESS) < 0)
1296 /* Compress with Zlib, decompress with compressdev */
1297 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1299 &ts_params->def_comp_xform,
1300 &ts_params->def_decomp_xform,
1302 RTE_COMP_OP_STATELESS,
1307 return TEST_SUCCESS;
1311 test_compressdev_deflate_stateless_multi_level(void)
1313 struct comp_testsuite_params *ts_params = &testsuite_params;
1314 const char *test_buffer;
1318 struct rte_comp_xform *compress_xform =
1319 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1321 if (compress_xform == NULL) {
1323 "Compress xform could not be created\n");
1328 memcpy(compress_xform, ts_params->def_comp_xform,
1329 sizeof(struct rte_comp_xform));
1331 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1332 test_buffer = compress_test_bufs[i];
1333 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1335 compress_xform->compress.level = level;
1336 /* Compress with compressdev, decompress with Zlib */
1337 if (test_deflate_comp_decomp(&test_buffer, 1,
1340 &ts_params->def_decomp_xform,
1342 RTE_COMP_OP_STATELESS,
1344 ZLIB_DECOMPRESS) < 0) {
1354 rte_free(compress_xform);
1358 #define NUM_XFORMS 3
1360 test_compressdev_deflate_stateless_multi_xform(void)
1362 struct comp_testsuite_params *ts_params = &testsuite_params;
1363 uint16_t num_bufs = NUM_XFORMS;
1364 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1365 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1366 const char *test_buffers[NUM_XFORMS];
1368 unsigned int level = RTE_COMP_LEVEL_MIN;
1369 uint16_t buf_idx[num_bufs];
1373 /* Create multiple xforms with various levels */
1374 for (i = 0; i < NUM_XFORMS; i++) {
1375 compress_xforms[i] = rte_malloc(NULL,
1376 sizeof(struct rte_comp_xform), 0);
1377 if (compress_xforms[i] == NULL) {
1379 "Compress xform could not be created\n");
1384 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1385 sizeof(struct rte_comp_xform));
1386 compress_xforms[i]->compress.level = level;
1389 decompress_xforms[i] = rte_malloc(NULL,
1390 sizeof(struct rte_comp_xform), 0);
1391 if (decompress_xforms[i] == NULL) {
1393 "Decompress xform could not be created\n");
1398 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1399 sizeof(struct rte_comp_xform));
1402 for (i = 0; i < NUM_XFORMS; i++) {
1404 /* Use the same buffer in all sessions */
1405 test_buffers[i] = compress_test_bufs[0];
1407 /* Compress with compressdev, decompress with Zlib */
1408 if (test_deflate_comp_decomp(test_buffers, num_bufs,
1413 RTE_COMP_OP_STATELESS,
1415 ZLIB_DECOMPRESS) < 0) {
1422 for (i = 0; i < NUM_XFORMS; i++) {
1423 rte_free(compress_xforms[i]);
1424 rte_free(decompress_xforms[i]);
1431 test_compressdev_deflate_stateless_sgl(void)
1433 struct comp_testsuite_params *ts_params = &testsuite_params;
1435 const char *test_buffer;
1436 const struct rte_compressdev_capabilities *capab;
1438 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1439 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1441 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1444 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1445 test_buffer = compress_test_bufs[i];
1446 /* Compress with compressdev, decompress with Zlib */
1447 if (test_deflate_comp_decomp(&test_buffer, 1,
1449 &ts_params->def_comp_xform,
1450 &ts_params->def_decomp_xform,
1452 RTE_COMP_OP_STATELESS,
1454 ZLIB_DECOMPRESS) < 0)
1457 /* Compress with Zlib, decompress with compressdev */
1458 if (test_deflate_comp_decomp(&test_buffer, 1,
1460 &ts_params->def_comp_xform,
1461 &ts_params->def_decomp_xform,
1463 RTE_COMP_OP_STATELESS,
1469 return TEST_SUCCESS;
1472 static struct unit_test_suite compressdev_testsuite = {
1473 .suite_name = "compressdev unit test suite",
1474 .setup = testsuite_setup,
1475 .teardown = testsuite_teardown,
1476 .unit_test_cases = {
1477 TEST_CASE_ST(NULL, NULL,
1478 test_compressdev_invalid_configuration),
1479 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1480 test_compressdev_deflate_stateless_fixed),
1481 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1482 test_compressdev_deflate_stateless_dynamic),
1483 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1484 test_compressdev_deflate_stateless_multi_op),
1485 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1486 test_compressdev_deflate_stateless_multi_level),
1487 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1488 test_compressdev_deflate_stateless_multi_xform),
1489 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1490 test_compressdev_deflate_stateless_sgl),
1491 TEST_CASES_END() /**< NULL terminate unit test array */
1496 test_compressdev(void)
1498 return unit_test_suite_runner(&compressdev_testsuite);
1501 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);