New upstream version 18.11-rc1
[deb_dpdk.git] / app / test-bbdev / test_bbdev_perf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <inttypes.h>
7 #include <math.h>
8
9 #include <rte_eal.h>
10 #include <rte_common.h>
11 #include <rte_dev.h>
12 #include <rte_launch.h>
13 #include <rte_bbdev.h>
14 #include <rte_cycles.h>
15 #include <rte_lcore.h>
16 #include <rte_malloc.h>
17 #include <rte_random.h>
18 #include <rte_hexdump.h>
19
20 #include "main.h"
21 #include "test_bbdev_vector.h"
22
23 #define GET_SOCKET(socket_id) (((socket_id) == SOCKET_ID_ANY) ? 0 : (socket_id))
24
25 #define MAX_QUEUES RTE_MAX_LCORE
26
27 #define OPS_CACHE_SIZE 256U
28 #define OPS_POOL_SIZE_MIN 511U /* 0.5K per queue */
29
30 #define SYNC_WAIT 0
31 #define SYNC_START 1
32
33 #define INVALID_QUEUE_ID -1
34
35 static struct test_bbdev_vector test_vector;
36
37 /* Switch between PMD and Interrupt for throughput TC */
38 static bool intr_enabled;
39
40 /* Represents tested active devices */
41 static struct active_device {
42         const char *driver_name;
43         uint8_t dev_id;
44         uint16_t supported_ops;
45         uint16_t queue_ids[MAX_QUEUES];
46         uint16_t nb_queues;
47         struct rte_mempool *ops_mempool;
48         struct rte_mempool *in_mbuf_pool;
49         struct rte_mempool *hard_out_mbuf_pool;
50         struct rte_mempool *soft_out_mbuf_pool;
51 } active_devs[RTE_BBDEV_MAX_DEVS];
52
53 static uint8_t nb_active_devs;
54
55 /* Data buffers used by BBDEV ops */
56 struct test_buffers {
57         struct rte_bbdev_op_data *inputs;
58         struct rte_bbdev_op_data *hard_outputs;
59         struct rte_bbdev_op_data *soft_outputs;
60 };
61
62 /* Operation parameters specific for given test case */
63 struct test_op_params {
64         struct rte_mempool *mp;
65         struct rte_bbdev_dec_op *ref_dec_op;
66         struct rte_bbdev_enc_op *ref_enc_op;
67         uint16_t burst_sz;
68         uint16_t num_to_process;
69         uint16_t num_lcores;
70         int vector_mask;
71         rte_atomic16_t sync;
72         struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
73 };
74
75 /* Contains per lcore params */
76 struct thread_params {
77         uint8_t dev_id;
78         uint16_t queue_id;
79         uint64_t start_time;
80         double mops;
81         double mbps;
82         rte_atomic16_t nb_dequeued;
83         rte_atomic16_t processing_status;
84         struct test_op_params *op_params;
85 };
86
87 #ifdef RTE_BBDEV_OFFLOAD_COST
88 /* Stores time statistics */
89 struct test_time_stats {
90         /* Stores software enqueue total working time */
91         uint64_t enq_sw_tot_time;
92         /* Stores minimum value of software enqueue working time */
93         uint64_t enq_sw_min_time;
94         /* Stores maximum value of software enqueue working time */
95         uint64_t enq_sw_max_time;
96         /* Stores turbo enqueue total working time */
97         uint64_t enq_tur_tot_time;
98         /* Stores minimum value of turbo enqueue working time */
99         uint64_t enq_tur_min_time;
100         /* Stores maximum value of turbo enqueue working time */
101         uint64_t enq_tur_max_time;
102         /* Stores dequeue total working time */
103         uint64_t deq_tot_time;
104         /* Stores minimum value of dequeue working time */
105         uint64_t deq_min_time;
106         /* Stores maximum value of dequeue working time */
107         uint64_t deq_max_time;
108 };
109 #endif
110
111 typedef int (test_case_function)(struct active_device *ad,
112                 struct test_op_params *op_params);
113
114 static inline void
115 set_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
116 {
117         ad->supported_ops |= (1 << op_type);
118 }
119
120 static inline bool
121 is_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
122 {
123         return ad->supported_ops & (1 << op_type);
124 }
125
126 static inline bool
127 flags_match(uint32_t flags_req, uint32_t flags_present)
128 {
129         return (flags_req & flags_present) == flags_req;
130 }
131
132 static void
133 clear_soft_out_cap(uint32_t *op_flags)
134 {
135         *op_flags &= ~RTE_BBDEV_TURBO_SOFT_OUTPUT;
136         *op_flags &= ~RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT;
137         *op_flags &= ~RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT;
138 }
139
140 static int
141 check_dev_cap(const struct rte_bbdev_info *dev_info)
142 {
143         unsigned int i;
144         unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs;
145         const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities;
146
147         nb_inputs = test_vector.entries[DATA_INPUT].nb_segments;
148         nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments;
149         nb_hard_outputs = test_vector.entries[DATA_HARD_OUTPUT].nb_segments;
150
151         for (i = 0; op_cap->type != RTE_BBDEV_OP_NONE; ++i, ++op_cap) {
152                 if (op_cap->type != test_vector.op_type)
153                         continue;
154
155                 if (op_cap->type == RTE_BBDEV_OP_TURBO_DEC) {
156                         const struct rte_bbdev_op_cap_turbo_dec *cap =
157                                         &op_cap->cap.turbo_dec;
158                         /* Ignore lack of soft output capability, just skip
159                          * checking if soft output is valid.
160                          */
161                         if ((test_vector.turbo_dec.op_flags &
162                                         RTE_BBDEV_TURBO_SOFT_OUTPUT) &&
163                                         !(cap->capability_flags &
164                                         RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
165                                 printf(
166                                         "WARNING: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
167                                         dev_info->dev_name);
168                                 clear_soft_out_cap(
169                                         &test_vector.turbo_dec.op_flags);
170                         }
171
172                         if (!flags_match(test_vector.turbo_dec.op_flags,
173                                         cap->capability_flags))
174                                 return TEST_FAILED;
175                         if (nb_inputs > cap->num_buffers_src) {
176                                 printf("Too many inputs defined: %u, max: %u\n",
177                                         nb_inputs, cap->num_buffers_src);
178                                 return TEST_FAILED;
179                         }
180                         if (nb_soft_outputs > cap->num_buffers_soft_out &&
181                                         (test_vector.turbo_dec.op_flags &
182                                         RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
183                                 printf(
184                                         "Too many soft outputs defined: %u, max: %u\n",
185                                                 nb_soft_outputs,
186                                                 cap->num_buffers_soft_out);
187                                 return TEST_FAILED;
188                         }
189                         if (nb_hard_outputs > cap->num_buffers_hard_out) {
190                                 printf(
191                                         "Too many hard outputs defined: %u, max: %u\n",
192                                                 nb_hard_outputs,
193                                                 cap->num_buffers_hard_out);
194                                 return TEST_FAILED;
195                         }
196                         if (intr_enabled && !(cap->capability_flags &
197                                         RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
198                                 printf(
199                                         "Dequeue interrupts are not supported!\n");
200                                 return TEST_FAILED;
201                         }
202
203                         return TEST_SUCCESS;
204                 } else if (op_cap->type == RTE_BBDEV_OP_TURBO_ENC) {
205                         const struct rte_bbdev_op_cap_turbo_enc *cap =
206                                         &op_cap->cap.turbo_enc;
207
208                         if (!flags_match(test_vector.turbo_enc.op_flags,
209                                         cap->capability_flags))
210                                 return TEST_FAILED;
211                         if (nb_inputs > cap->num_buffers_src) {
212                                 printf("Too many inputs defined: %u, max: %u\n",
213                                         nb_inputs, cap->num_buffers_src);
214                                 return TEST_FAILED;
215                         }
216                         if (nb_hard_outputs > cap->num_buffers_dst) {
217                                 printf(
218                                         "Too many hard outputs defined: %u, max: %u\n",
219                                         nb_hard_outputs, cap->num_buffers_src);
220                                 return TEST_FAILED;
221                         }
222                         if (intr_enabled && !(cap->capability_flags &
223                                         RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
224                                 printf(
225                                         "Dequeue interrupts are not supported!\n");
226                                 return TEST_FAILED;
227                         }
228
229                         return TEST_SUCCESS;
230                 }
231         }
232
233         if ((i == 0) && (test_vector.op_type == RTE_BBDEV_OP_NONE))
234                 return TEST_SUCCESS; /* Special case for NULL device */
235
236         return TEST_FAILED;
237 }
238
239 /* calculates optimal mempool size not smaller than the val */
240 static unsigned int
241 optimal_mempool_size(unsigned int val)
242 {
243         return rte_align32pow2(val + 1) - 1;
244 }
245
246 /* allocates mbuf mempool for inputs and outputs */
247 static struct rte_mempool *
248 create_mbuf_pool(struct op_data_entries *entries, uint8_t dev_id,
249                 int socket_id, unsigned int mbuf_pool_size,
250                 const char *op_type_str)
251 {
252         unsigned int i;
253         uint32_t max_seg_sz = 0;
254         char pool_name[RTE_MEMPOOL_NAMESIZE];
255
256         /* find max input segment size */
257         for (i = 0; i < entries->nb_segments; ++i)
258                 if (entries->segments[i].length > max_seg_sz)
259                         max_seg_sz = entries->segments[i].length;
260
261         snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
262                         dev_id);
263         return rte_pktmbuf_pool_create(pool_name, mbuf_pool_size, 0, 0,
264                         RTE_MAX(max_seg_sz + RTE_PKTMBUF_HEADROOM,
265                         (unsigned int)RTE_MBUF_DEFAULT_BUF_SIZE), socket_id);
266 }
267
268 static int
269 create_mempools(struct active_device *ad, int socket_id,
270                 enum rte_bbdev_op_type org_op_type, uint16_t num_ops)
271 {
272         struct rte_mempool *mp;
273         unsigned int ops_pool_size, mbuf_pool_size = 0;
274         char pool_name[RTE_MEMPOOL_NAMESIZE];
275         const char *op_type_str;
276         enum rte_bbdev_op_type op_type = org_op_type;
277
278         struct op_data_entries *in = &test_vector.entries[DATA_INPUT];
279         struct op_data_entries *hard_out =
280                         &test_vector.entries[DATA_HARD_OUTPUT];
281         struct op_data_entries *soft_out =
282                         &test_vector.entries[DATA_SOFT_OUTPUT];
283
284         /* allocate ops mempool */
285         ops_pool_size = optimal_mempool_size(RTE_MAX(
286                         /* Ops used plus 1 reference op */
287                         RTE_MAX((unsigned int)(ad->nb_queues * num_ops + 1),
288                         /* Minimal cache size plus 1 reference op */
289                         (unsigned int)(1.5 * rte_lcore_count() *
290                                         OPS_CACHE_SIZE + 1)),
291                         OPS_POOL_SIZE_MIN));
292
293         if (org_op_type == RTE_BBDEV_OP_NONE)
294                 op_type = RTE_BBDEV_OP_TURBO_ENC;
295
296         op_type_str = rte_bbdev_op_type_str(op_type);
297         TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
298
299         snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
300                         ad->dev_id);
301         mp = rte_bbdev_op_pool_create(pool_name, op_type,
302                         ops_pool_size, OPS_CACHE_SIZE, socket_id);
303         TEST_ASSERT_NOT_NULL(mp,
304                         "ERROR Failed to create %u items ops pool for dev %u on socket %u.",
305                         ops_pool_size,
306                         ad->dev_id,
307                         socket_id);
308         ad->ops_mempool = mp;
309
310         /* Do not create inputs and outputs mbufs for BaseBand Null Device */
311         if (org_op_type == RTE_BBDEV_OP_NONE)
312                 return TEST_SUCCESS;
313
314         /* Inputs */
315         mbuf_pool_size = optimal_mempool_size(ops_pool_size * in->nb_segments);
316         mp = create_mbuf_pool(in, ad->dev_id, socket_id, mbuf_pool_size, "in");
317         TEST_ASSERT_NOT_NULL(mp,
318                         "ERROR Failed to create %u items input pktmbuf pool for dev %u on socket %u.",
319                         mbuf_pool_size,
320                         ad->dev_id,
321                         socket_id);
322         ad->in_mbuf_pool = mp;
323
324         /* Hard outputs */
325         mbuf_pool_size = optimal_mempool_size(ops_pool_size *
326                         hard_out->nb_segments);
327         mp = create_mbuf_pool(hard_out, ad->dev_id, socket_id, mbuf_pool_size,
328                         "hard_out");
329         TEST_ASSERT_NOT_NULL(mp,
330                         "ERROR Failed to create %u items hard output pktmbuf pool for dev %u on socket %u.",
331                         mbuf_pool_size,
332                         ad->dev_id,
333                         socket_id);
334         ad->hard_out_mbuf_pool = mp;
335
336         if (soft_out->nb_segments == 0)
337                 return TEST_SUCCESS;
338
339         /* Soft outputs */
340         mbuf_pool_size = optimal_mempool_size(ops_pool_size *
341                         soft_out->nb_segments);
342         mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id, mbuf_pool_size,
343                         "soft_out");
344         TEST_ASSERT_NOT_NULL(mp,
345                         "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
346                         mbuf_pool_size,
347                         ad->dev_id,
348                         socket_id);
349         ad->soft_out_mbuf_pool = mp;
350
351         return 0;
352 }
353
354 static int
355 add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
356                 struct test_bbdev_vector *vector)
357 {
358         int ret;
359         unsigned int queue_id;
360         struct rte_bbdev_queue_conf qconf;
361         struct active_device *ad = &active_devs[nb_active_devs];
362         unsigned int nb_queues;
363         enum rte_bbdev_op_type op_type = vector->op_type;
364
365         nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues);
366         /* setup device */
367         ret = rte_bbdev_setup_queues(dev_id, nb_queues, info->socket_id);
368         if (ret < 0) {
369                 printf("rte_bbdev_setup_queues(%u, %u, %d) ret %i\n",
370                                 dev_id, nb_queues, info->socket_id, ret);
371                 return TEST_FAILED;
372         }
373
374         /* configure interrupts if needed */
375         if (intr_enabled) {
376                 ret = rte_bbdev_intr_enable(dev_id);
377                 if (ret < 0) {
378                         printf("rte_bbdev_intr_enable(%u) ret %i\n", dev_id,
379                                         ret);
380                         return TEST_FAILED;
381                 }
382         }
383
384         /* setup device queues */
385         qconf.socket = info->socket_id;
386         qconf.queue_size = info->drv.default_queue_conf.queue_size;
387         qconf.priority = 0;
388         qconf.deferred_start = 0;
389         qconf.op_type = op_type;
390
391         for (queue_id = 0; queue_id < nb_queues; ++queue_id) {
392                 ret = rte_bbdev_queue_configure(dev_id, queue_id, &qconf);
393                 if (ret != 0) {
394                         printf(
395                                         "Allocated all queues (id=%u) at prio%u on dev%u\n",
396                                         queue_id, qconf.priority, dev_id);
397                         qconf.priority++;
398                         ret = rte_bbdev_queue_configure(ad->dev_id, queue_id,
399                                         &qconf);
400                 }
401                 if (ret != 0) {
402                         printf("All queues on dev %u allocated: %u\n",
403                                         dev_id, queue_id);
404                         break;
405                 }
406                 ad->queue_ids[queue_id] = queue_id;
407         }
408         TEST_ASSERT(queue_id != 0,
409                         "ERROR Failed to configure any queues on dev %u",
410                         dev_id);
411         ad->nb_queues = queue_id;
412
413         set_avail_op(ad, op_type);
414
415         return TEST_SUCCESS;
416 }
417
418 static int
419 add_active_device(uint8_t dev_id, struct rte_bbdev_info *info,
420                 struct test_bbdev_vector *vector)
421 {
422         int ret;
423
424         active_devs[nb_active_devs].driver_name = info->drv.driver_name;
425         active_devs[nb_active_devs].dev_id = dev_id;
426
427         ret = add_bbdev_dev(dev_id, info, vector);
428         if (ret == TEST_SUCCESS)
429                 ++nb_active_devs;
430         return ret;
431 }
432
433 static uint8_t
434 populate_active_devices(void)
435 {
436         int ret;
437         uint8_t dev_id;
438         uint8_t nb_devs_added = 0;
439         struct rte_bbdev_info info;
440
441         RTE_BBDEV_FOREACH(dev_id) {
442                 rte_bbdev_info_get(dev_id, &info);
443
444                 if (check_dev_cap(&info)) {
445                         printf(
446                                 "Device %d (%s) does not support specified capabilities\n",
447                                         dev_id, info.dev_name);
448                         continue;
449                 }
450
451                 ret = add_active_device(dev_id, &info, &test_vector);
452                 if (ret != 0) {
453                         printf("Adding active bbdev %s skipped\n",
454                                         info.dev_name);
455                         continue;
456                 }
457                 nb_devs_added++;
458         }
459
460         return nb_devs_added;
461 }
462
463 static int
464 read_test_vector(void)
465 {
466         int ret;
467
468         memset(&test_vector, 0, sizeof(test_vector));
469         printf("Test vector file = %s\n", get_vector_filename());
470         ret = test_bbdev_vector_read(get_vector_filename(), &test_vector);
471         TEST_ASSERT_SUCCESS(ret, "Failed to parse file %s\n",
472                         get_vector_filename());
473
474         return TEST_SUCCESS;
475 }
476
477 static int
478 testsuite_setup(void)
479 {
480         TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
481
482         if (populate_active_devices() == 0) {
483                 printf("No suitable devices found!\n");
484                 return TEST_SKIPPED;
485         }
486
487         return TEST_SUCCESS;
488 }
489
490 static int
491 interrupt_testsuite_setup(void)
492 {
493         TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
494
495         /* Enable interrupts */
496         intr_enabled = true;
497
498         /* Special case for NULL device (RTE_BBDEV_OP_NONE) */
499         if (populate_active_devices() == 0 ||
500                         test_vector.op_type == RTE_BBDEV_OP_NONE) {
501                 intr_enabled = false;
502                 printf("No suitable devices found!\n");
503                 return TEST_SKIPPED;
504         }
505
506         return TEST_SUCCESS;
507 }
508
509 static void
510 testsuite_teardown(void)
511 {
512         uint8_t dev_id;
513
514         /* Unconfigure devices */
515         RTE_BBDEV_FOREACH(dev_id)
516                 rte_bbdev_close(dev_id);
517
518         /* Clear active devices structs. */
519         memset(active_devs, 0, sizeof(active_devs));
520         nb_active_devs = 0;
521 }
522
523 static int
524 ut_setup(void)
525 {
526         uint8_t i, dev_id;
527
528         for (i = 0; i < nb_active_devs; i++) {
529                 dev_id = active_devs[i].dev_id;
530                 /* reset bbdev stats */
531                 TEST_ASSERT_SUCCESS(rte_bbdev_stats_reset(dev_id),
532                                 "Failed to reset stats of bbdev %u", dev_id);
533                 /* start the device */
534                 TEST_ASSERT_SUCCESS(rte_bbdev_start(dev_id),
535                                 "Failed to start bbdev %u", dev_id);
536         }
537
538         return TEST_SUCCESS;
539 }
540
541 static void
542 ut_teardown(void)
543 {
544         uint8_t i, dev_id;
545         struct rte_bbdev_stats stats;
546
547         for (i = 0; i < nb_active_devs; i++) {
548                 dev_id = active_devs[i].dev_id;
549                 /* read stats and print */
550                 rte_bbdev_stats_get(dev_id, &stats);
551                 /* Stop the device */
552                 rte_bbdev_stop(dev_id);
553         }
554 }
555
556 static int
557 init_op_data_objs(struct rte_bbdev_op_data *bufs,
558                 struct op_data_entries *ref_entries,
559                 struct rte_mempool *mbuf_pool, const uint16_t n,
560                 enum op_data_type op_type, uint16_t min_alignment)
561 {
562         int ret;
563         unsigned int i, j;
564
565         for (i = 0; i < n; ++i) {
566                 char *data;
567                 struct op_data_buf *seg = &ref_entries->segments[0];
568                 struct rte_mbuf *m_head = rte_pktmbuf_alloc(mbuf_pool);
569                 TEST_ASSERT_NOT_NULL(m_head,
570                                 "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
571                                 op_type, n * ref_entries->nb_segments,
572                                 mbuf_pool->size);
573
574                 bufs[i].data = m_head;
575                 bufs[i].offset = 0;
576                 bufs[i].length = 0;
577
578                 if (op_type == DATA_INPUT) {
579                         data = rte_pktmbuf_append(m_head, seg->length);
580                         TEST_ASSERT_NOT_NULL(data,
581                                         "Couldn't append %u bytes to mbuf from %d data type mbuf pool",
582                                         seg->length, op_type);
583
584                         TEST_ASSERT(data == RTE_PTR_ALIGN(data, min_alignment),
585                                         "Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
586                                         data, min_alignment);
587                         rte_memcpy(data, seg->addr, seg->length);
588                         bufs[i].length += seg->length;
589
590
591                         for (j = 1; j < ref_entries->nb_segments; ++j) {
592                                 struct rte_mbuf *m_tail =
593                                                 rte_pktmbuf_alloc(mbuf_pool);
594                                 TEST_ASSERT_NOT_NULL(m_tail,
595                                                 "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
596                                                 op_type,
597                                                 n * ref_entries->nb_segments,
598                                                 mbuf_pool->size);
599                                 seg += 1;
600
601                                 data = rte_pktmbuf_append(m_tail, seg->length);
602                                 TEST_ASSERT_NOT_NULL(data,
603                                                 "Couldn't append %u bytes to mbuf from %d data type mbuf pool",
604                                                 seg->length, op_type);
605
606                                 TEST_ASSERT(data == RTE_PTR_ALIGN(data,
607                                                 min_alignment),
608                                                 "Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
609                                                 data, min_alignment);
610                                 rte_memcpy(data, seg->addr, seg->length);
611                                 bufs[i].length += seg->length;
612
613                                 ret = rte_pktmbuf_chain(m_head, m_tail);
614                                 TEST_ASSERT_SUCCESS(ret,
615                                                 "Couldn't chain mbufs from %d data type mbuf pool",
616                                                 op_type);
617                         }
618                 }
619         }
620
621         return 0;
622 }
623
624 static int
625 allocate_buffers_on_socket(struct rte_bbdev_op_data **buffers, const int len,
626                 const int socket)
627 {
628         int i;
629
630         *buffers = rte_zmalloc_socket(NULL, len, 0, socket);
631         if (*buffers == NULL) {
632                 printf("WARNING: Failed to allocate op_data on socket %d\n",
633                                 socket);
634                 /* try to allocate memory on other detected sockets */
635                 for (i = 0; i < socket; i++) {
636                         *buffers = rte_zmalloc_socket(NULL, len, 0, i);
637                         if (*buffers != NULL)
638                                 break;
639                 }
640         }
641
642         return (*buffers == NULL) ? TEST_FAILED : TEST_SUCCESS;
643 }
644
645 static void
646 limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
647                 uint16_t n, int8_t max_llr_modulus)
648 {
649         uint16_t i, byte_idx;
650
651         for (i = 0; i < n; ++i) {
652                 struct rte_mbuf *m = input_ops[i].data;
653                 while (m != NULL) {
654                         int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
655                                         input_ops[i].offset);
656                         for (byte_idx = 0; byte_idx < input_ops[i].length;
657                                         ++byte_idx)
658                                 llr[byte_idx] = round((double)max_llr_modulus *
659                                                 llr[byte_idx] / INT8_MAX);
660
661                         m = m->next;
662                 }
663         }
664 }
665
666 static int
667 fill_queue_buffers(struct test_op_params *op_params,
668                 struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp,
669                 struct rte_mempool *soft_out_mp, uint16_t queue_id,
670                 const struct rte_bbdev_op_cap *capabilities,
671                 uint16_t min_alignment, const int socket_id)
672 {
673         int ret;
674         enum op_data_type type;
675         const uint16_t n = op_params->num_to_process;
676
677         struct rte_mempool *mbuf_pools[DATA_NUM_TYPES] = {
678                 in_mp,
679                 soft_out_mp,
680                 hard_out_mp,
681         };
682
683         struct rte_bbdev_op_data **queue_ops[DATA_NUM_TYPES] = {
684                 &op_params->q_bufs[socket_id][queue_id].inputs,
685                 &op_params->q_bufs[socket_id][queue_id].soft_outputs,
686                 &op_params->q_bufs[socket_id][queue_id].hard_outputs,
687         };
688
689         for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
690                 struct op_data_entries *ref_entries =
691                                 &test_vector.entries[type];
692                 if (ref_entries->nb_segments == 0)
693                         continue;
694
695                 ret = allocate_buffers_on_socket(queue_ops[type],
696                                 n * sizeof(struct rte_bbdev_op_data),
697                                 socket_id);
698                 TEST_ASSERT_SUCCESS(ret,
699                                 "Couldn't allocate memory for rte_bbdev_op_data structs");
700
701                 ret = init_op_data_objs(*queue_ops[type], ref_entries,
702                                 mbuf_pools[type], n, type, min_alignment);
703                 TEST_ASSERT_SUCCESS(ret,
704                                 "Couldn't init rte_bbdev_op_data structs");
705         }
706
707         if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
708                 limit_input_llr_val_range(*queue_ops[DATA_INPUT], n,
709                         capabilities->cap.turbo_dec.max_llr_modulus);
710
711         return 0;
712 }
713
714 static void
715 free_buffers(struct active_device *ad, struct test_op_params *op_params)
716 {
717         unsigned int i, j;
718
719         rte_mempool_free(ad->ops_mempool);
720         rte_mempool_free(ad->in_mbuf_pool);
721         rte_mempool_free(ad->hard_out_mbuf_pool);
722         rte_mempool_free(ad->soft_out_mbuf_pool);
723
724         for (i = 0; i < rte_lcore_count(); ++i) {
725                 for (j = 0; j < RTE_MAX_NUMA_NODES; ++j) {
726                         rte_free(op_params->q_bufs[j][i].inputs);
727                         rte_free(op_params->q_bufs[j][i].hard_outputs);
728                         rte_free(op_params->q_bufs[j][i].soft_outputs);
729                 }
730         }
731 }
732
733 static void
734 copy_reference_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
735                 unsigned int start_idx,
736                 struct rte_bbdev_op_data *inputs,
737                 struct rte_bbdev_op_data *hard_outputs,
738                 struct rte_bbdev_op_data *soft_outputs,
739                 struct rte_bbdev_dec_op *ref_op)
740 {
741         unsigned int i;
742         struct rte_bbdev_op_turbo_dec *turbo_dec = &ref_op->turbo_dec;
743
744         for (i = 0; i < n; ++i) {
745                 if (turbo_dec->code_block_mode == 0) {
746                         ops[i]->turbo_dec.tb_params.ea =
747                                         turbo_dec->tb_params.ea;
748                         ops[i]->turbo_dec.tb_params.eb =
749                                         turbo_dec->tb_params.eb;
750                         ops[i]->turbo_dec.tb_params.k_pos =
751                                         turbo_dec->tb_params.k_pos;
752                         ops[i]->turbo_dec.tb_params.k_neg =
753                                         turbo_dec->tb_params.k_neg;
754                         ops[i]->turbo_dec.tb_params.c =
755                                         turbo_dec->tb_params.c;
756                         ops[i]->turbo_dec.tb_params.c_neg =
757                                         turbo_dec->tb_params.c_neg;
758                         ops[i]->turbo_dec.tb_params.cab =
759                                         turbo_dec->tb_params.cab;
760                 } else {
761                         ops[i]->turbo_dec.cb_params.e = turbo_dec->cb_params.e;
762                         ops[i]->turbo_dec.cb_params.k = turbo_dec->cb_params.k;
763                 }
764
765                 ops[i]->turbo_dec.ext_scale = turbo_dec->ext_scale;
766                 ops[i]->turbo_dec.iter_max = turbo_dec->iter_max;
767                 ops[i]->turbo_dec.iter_min = turbo_dec->iter_min;
768                 ops[i]->turbo_dec.op_flags = turbo_dec->op_flags;
769                 ops[i]->turbo_dec.rv_index = turbo_dec->rv_index;
770                 ops[i]->turbo_dec.num_maps = turbo_dec->num_maps;
771                 ops[i]->turbo_dec.code_block_mode = turbo_dec->code_block_mode;
772
773                 ops[i]->turbo_dec.hard_output = hard_outputs[start_idx + i];
774                 ops[i]->turbo_dec.input = inputs[start_idx + i];
775                 if (soft_outputs != NULL)
776                         ops[i]->turbo_dec.soft_output =
777                                 soft_outputs[start_idx + i];
778         }
779 }
780
781 static void
782 copy_reference_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
783                 unsigned int start_idx,
784                 struct rte_bbdev_op_data *inputs,
785                 struct rte_bbdev_op_data *outputs,
786                 struct rte_bbdev_enc_op *ref_op)
787 {
788         unsigned int i;
789         struct rte_bbdev_op_turbo_enc *turbo_enc = &ref_op->turbo_enc;
790         for (i = 0; i < n; ++i) {
791                 if (turbo_enc->code_block_mode == 0) {
792                         ops[i]->turbo_enc.tb_params.ea =
793                                         turbo_enc->tb_params.ea;
794                         ops[i]->turbo_enc.tb_params.eb =
795                                         turbo_enc->tb_params.eb;
796                         ops[i]->turbo_enc.tb_params.k_pos =
797                                         turbo_enc->tb_params.k_pos;
798                         ops[i]->turbo_enc.tb_params.k_neg =
799                                         turbo_enc->tb_params.k_neg;
800                         ops[i]->turbo_enc.tb_params.c =
801                                         turbo_enc->tb_params.c;
802                         ops[i]->turbo_enc.tb_params.c_neg =
803                                         turbo_enc->tb_params.c_neg;
804                         ops[i]->turbo_enc.tb_params.cab =
805                                         turbo_enc->tb_params.cab;
806                         ops[i]->turbo_enc.tb_params.ncb_pos =
807                                         turbo_enc->tb_params.ncb_pos;
808                         ops[i]->turbo_enc.tb_params.ncb_neg =
809                                         turbo_enc->tb_params.ncb_neg;
810                         ops[i]->turbo_enc.tb_params.r = turbo_enc->tb_params.r;
811                 } else {
812                         ops[i]->turbo_enc.cb_params.e = turbo_enc->cb_params.e;
813                         ops[i]->turbo_enc.cb_params.k = turbo_enc->cb_params.k;
814                         ops[i]->turbo_enc.cb_params.ncb =
815                                         turbo_enc->cb_params.ncb;
816                 }
817                 ops[i]->turbo_enc.rv_index = turbo_enc->rv_index;
818                 ops[i]->turbo_enc.op_flags = turbo_enc->op_flags;
819                 ops[i]->turbo_enc.code_block_mode = turbo_enc->code_block_mode;
820
821                 ops[i]->turbo_enc.output = outputs[start_idx + i];
822                 ops[i]->turbo_enc.input = inputs[start_idx + i];
823         }
824 }
825
826 static int
827 check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
828                 unsigned int order_idx, const int expected_status)
829 {
830         TEST_ASSERT(op->status == expected_status,
831                         "op_status (%d) != expected_status (%d)",
832                         op->status, expected_status);
833
834         TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
835                         "Ordering error, expected %p, got %p",
836                         (void *)(uintptr_t)order_idx, op->opaque_data);
837
838         return TEST_SUCCESS;
839 }
840
841 static int
842 check_enc_status_and_ordering(struct rte_bbdev_enc_op *op,
843                 unsigned int order_idx, const int expected_status)
844 {
845         TEST_ASSERT(op->status == expected_status,
846                         "op_status (%d) != expected_status (%d)",
847                         op->status, expected_status);
848
849         TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
850                         "Ordering error, expected %p, got %p",
851                         (void *)(uintptr_t)order_idx, op->opaque_data);
852
853         return TEST_SUCCESS;
854 }
855
856 static inline int
857 validate_op_chain(struct rte_bbdev_op_data *op,
858                 struct op_data_entries *orig_op)
859 {
860         uint8_t i;
861         struct rte_mbuf *m = op->data;
862         uint8_t nb_dst_segments = orig_op->nb_segments;
863
864         TEST_ASSERT(nb_dst_segments == m->nb_segs,
865                         "Number of segments differ in original (%u) and filled (%u) op",
866                         nb_dst_segments, m->nb_segs);
867
868         for (i = 0; i < nb_dst_segments; ++i) {
869                 /* Apply offset to the first mbuf segment */
870                 uint16_t offset = (i == 0) ? op->offset : 0;
871                 uint16_t data_len = m->data_len - offset;
872
873                 TEST_ASSERT(orig_op->segments[i].length == data_len,
874                                 "Length of segment differ in original (%u) and filled (%u) op",
875                                 orig_op->segments[i].length, data_len);
876                 TEST_ASSERT_BUFFERS_ARE_EQUAL(orig_op->segments[i].addr,
877                                 rte_pktmbuf_mtod_offset(m, uint32_t *, offset),
878                                 data_len,
879                                 "Output buffers (CB=%u) are not equal", i);
880                 m = m->next;
881         }
882
883         return TEST_SUCCESS;
884 }
885
886 static int
887 validate_dec_buffers(struct rte_bbdev_dec_op *ref_op, struct test_buffers *bufs,
888                 const uint16_t num_to_process)
889 {
890         int i;
891
892         struct op_data_entries *hard_data_orig =
893                         &test_vector.entries[DATA_HARD_OUTPUT];
894         struct op_data_entries *soft_data_orig =
895                         &test_vector.entries[DATA_SOFT_OUTPUT];
896
897         for (i = 0; i < num_to_process; i++) {
898                 TEST_ASSERT_SUCCESS(validate_op_chain(&bufs->hard_outputs[i],
899                                 hard_data_orig),
900                                 "Hard output buffers are not equal");
901                 if (ref_op->turbo_dec.op_flags &
902                                 RTE_BBDEV_TURBO_SOFT_OUTPUT)
903                         TEST_ASSERT_SUCCESS(validate_op_chain(
904                                         &bufs->soft_outputs[i],
905                                         soft_data_orig),
906                                         "Soft output buffers are not equal");
907         }
908
909         return TEST_SUCCESS;
910 }
911
912 static int
913 validate_enc_buffers(struct test_buffers *bufs, const uint16_t num_to_process)
914 {
915         int i;
916
917         struct op_data_entries *hard_data_orig =
918                         &test_vector.entries[DATA_HARD_OUTPUT];
919
920         for (i = 0; i < num_to_process; i++)
921                 TEST_ASSERT_SUCCESS(validate_op_chain(&bufs->hard_outputs[i],
922                                 hard_data_orig), "");
923
924         return TEST_SUCCESS;
925 }
926
927 static int
928 validate_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
929                 struct rte_bbdev_dec_op *ref_op, const int vector_mask)
930 {
931         unsigned int i;
932         int ret;
933         struct op_data_entries *hard_data_orig =
934                         &test_vector.entries[DATA_HARD_OUTPUT];
935         struct op_data_entries *soft_data_orig =
936                         &test_vector.entries[DATA_SOFT_OUTPUT];
937         struct rte_bbdev_op_turbo_dec *ops_td;
938         struct rte_bbdev_op_data *hard_output;
939         struct rte_bbdev_op_data *soft_output;
940         struct rte_bbdev_op_turbo_dec *ref_td = &ref_op->turbo_dec;
941
942         for (i = 0; i < n; ++i) {
943                 ops_td = &ops[i]->turbo_dec;
944                 hard_output = &ops_td->hard_output;
945                 soft_output = &ops_td->soft_output;
946
947                 if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
948                         TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
949                                         "Returned iter_count (%d) > expected iter_count (%d)",
950                                         ops_td->iter_count, ref_td->iter_count);
951                 ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
952                 TEST_ASSERT_SUCCESS(ret,
953                                 "Checking status and ordering for decoder failed");
954
955                 TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
956                                 hard_data_orig),
957                                 "Hard output buffers (CB=%u) are not equal",
958                                 i);
959
960                 if (ref_op->turbo_dec.op_flags & RTE_BBDEV_TURBO_SOFT_OUTPUT)
961                         TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
962                                         soft_data_orig),
963                                         "Soft output buffers (CB=%u) are not equal",
964                                         i);
965         }
966
967         return TEST_SUCCESS;
968 }
969
970 static int
971 validate_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
972                 struct rte_bbdev_enc_op *ref_op)
973 {
974         unsigned int i;
975         int ret;
976         struct op_data_entries *hard_data_orig =
977                         &test_vector.entries[DATA_HARD_OUTPUT];
978
979         for (i = 0; i < n; ++i) {
980                 ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
981                 TEST_ASSERT_SUCCESS(ret,
982                                 "Checking status and ordering for encoder failed");
983                 TEST_ASSERT_SUCCESS(validate_op_chain(
984                                 &ops[i]->turbo_enc.output,
985                                 hard_data_orig),
986                                 "Output buffers (CB=%u) are not equal",
987                                 i);
988         }
989
990         return TEST_SUCCESS;
991 }
992
993 static void
994 create_reference_dec_op(struct rte_bbdev_dec_op *op)
995 {
996         unsigned int i;
997         struct op_data_entries *entry;
998
999         op->turbo_dec = test_vector.turbo_dec;
1000         entry = &test_vector.entries[DATA_INPUT];
1001         for (i = 0; i < entry->nb_segments; ++i)
1002                 op->turbo_dec.input.length +=
1003                                 entry->segments[i].length;
1004 }
1005
1006 static void
1007 create_reference_enc_op(struct rte_bbdev_enc_op *op)
1008 {
1009         unsigned int i;
1010         struct op_data_entries *entry;
1011
1012         op->turbo_enc = test_vector.turbo_enc;
1013         entry = &test_vector.entries[DATA_INPUT];
1014         for (i = 0; i < entry->nb_segments; ++i)
1015                 op->turbo_enc.input.length +=
1016                                 entry->segments[i].length;
1017 }
1018
1019 static int
1020 init_test_op_params(struct test_op_params *op_params,
1021                 enum rte_bbdev_op_type op_type, const int expected_status,
1022                 const int vector_mask, struct rte_mempool *ops_mp,
1023                 uint16_t burst_sz, uint16_t num_to_process, uint16_t num_lcores)
1024 {
1025         int ret = 0;
1026         if (op_type == RTE_BBDEV_OP_TURBO_DEC)
1027                 ret = rte_bbdev_dec_op_alloc_bulk(ops_mp,
1028                                 &op_params->ref_dec_op, 1);
1029         else
1030                 ret = rte_bbdev_enc_op_alloc_bulk(ops_mp,
1031                                 &op_params->ref_enc_op, 1);
1032
1033         TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
1034
1035         op_params->mp = ops_mp;
1036         op_params->burst_sz = burst_sz;
1037         op_params->num_to_process = num_to_process;
1038         op_params->num_lcores = num_lcores;
1039         op_params->vector_mask = vector_mask;
1040         if (op_type == RTE_BBDEV_OP_TURBO_DEC)
1041                 op_params->ref_dec_op->status = expected_status;
1042         else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
1043                 op_params->ref_enc_op->status = expected_status;
1044
1045         return 0;
1046 }
1047
1048 static int
1049 run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
1050                 struct test_op_params *op_params)
1051 {
1052         int t_ret, f_ret, socket_id = SOCKET_ID_ANY;
1053         unsigned int i;
1054         struct active_device *ad;
1055         unsigned int burst_sz = get_burst_sz();
1056         enum rte_bbdev_op_type op_type = test_vector.op_type;
1057         const struct rte_bbdev_op_cap *capabilities = NULL;
1058
1059         ad = &active_devs[dev_id];
1060
1061         /* Check if device supports op_type */
1062         if (!is_avail_op(ad, test_vector.op_type))
1063                 return TEST_SUCCESS;
1064
1065         struct rte_bbdev_info info;
1066         rte_bbdev_info_get(ad->dev_id, &info);
1067         socket_id = GET_SOCKET(info.socket_id);
1068
1069         f_ret = create_mempools(ad, socket_id, op_type,
1070                         get_num_ops());
1071         if (f_ret != TEST_SUCCESS) {
1072                 printf("Couldn't create mempools");
1073                 goto fail;
1074         }
1075         if (op_type == RTE_BBDEV_OP_NONE)
1076                 op_type = RTE_BBDEV_OP_TURBO_ENC;
1077
1078         f_ret = init_test_op_params(op_params, test_vector.op_type,
1079                         test_vector.expected_status,
1080                         test_vector.mask,
1081                         ad->ops_mempool,
1082                         burst_sz,
1083                         get_num_ops(),
1084                         get_num_lcores());
1085         if (f_ret != TEST_SUCCESS) {
1086                 printf("Couldn't init test op params");
1087                 goto fail;
1088         }
1089
1090         if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
1091                 /* Find Decoder capabilities */
1092                 const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
1093                 while (cap->type != RTE_BBDEV_OP_NONE) {
1094                         if (cap->type == RTE_BBDEV_OP_TURBO_DEC) {
1095                                 capabilities = cap;
1096                                 break;
1097                         }
1098                 }
1099                 TEST_ASSERT_NOT_NULL(capabilities,
1100                                 "Couldn't find Decoder capabilities");
1101
1102                 create_reference_dec_op(op_params->ref_dec_op);
1103         } else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
1104                 create_reference_enc_op(op_params->ref_enc_op);
1105
1106         for (i = 0; i < ad->nb_queues; ++i) {
1107                 f_ret = fill_queue_buffers(op_params,
1108                                 ad->in_mbuf_pool,
1109                                 ad->hard_out_mbuf_pool,
1110                                 ad->soft_out_mbuf_pool,
1111                                 ad->queue_ids[i],
1112                                 capabilities,
1113                                 info.drv.min_alignment,
1114                                 socket_id);
1115                 if (f_ret != TEST_SUCCESS) {
1116                         printf("Couldn't init queue buffers");
1117                         goto fail;
1118                 }
1119         }
1120
1121         /* Run test case function */
1122         t_ret = test_case_func(ad, op_params);
1123
1124         /* Free active device resources and return */
1125         free_buffers(ad, op_params);
1126         return t_ret;
1127
1128 fail:
1129         free_buffers(ad, op_params);
1130         return TEST_FAILED;
1131 }
1132
1133 /* Run given test function per active device per supported op type
1134  * per burst size.
1135  */
1136 static int
1137 run_test_case(test_case_function *test_case_func)
1138 {
1139         int ret = 0;
1140         uint8_t dev;
1141
1142         /* Alloc op_params */
1143         struct test_op_params *op_params = rte_zmalloc(NULL,
1144                         sizeof(struct test_op_params), RTE_CACHE_LINE_SIZE);
1145         TEST_ASSERT_NOT_NULL(op_params, "Failed to alloc %zuB for op_params",
1146                         RTE_ALIGN(sizeof(struct test_op_params),
1147                                 RTE_CACHE_LINE_SIZE));
1148
1149         /* For each device run test case function */
1150         for (dev = 0; dev < nb_active_devs; ++dev)
1151                 ret |= run_test_case_on_device(test_case_func, dev, op_params);
1152
1153         rte_free(op_params);
1154
1155         return ret;
1156 }
1157
1158 static void
1159 dequeue_event_callback(uint16_t dev_id,
1160                 enum rte_bbdev_event_type event, void *cb_arg,
1161                 void *ret_param)
1162 {
1163         int ret;
1164         uint16_t i;
1165         uint64_t total_time;
1166         uint16_t deq, burst_sz, num_to_process;
1167         uint16_t queue_id = INVALID_QUEUE_ID;
1168         struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
1169         struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
1170         struct test_buffers *bufs;
1171         struct rte_bbdev_info info;
1172
1173         /* Input length in bytes, million operations per second,
1174          * million bits per second.
1175          */
1176         double in_len;
1177
1178         struct thread_params *tp = cb_arg;
1179         RTE_SET_USED(ret_param);
1180         queue_id = tp->queue_id;
1181
1182         /* Find matching thread params using queue_id */
1183         for (i = 0; i < MAX_QUEUES; ++i, ++tp)
1184                 if (tp->queue_id == queue_id)
1185                         break;
1186
1187         if (i == MAX_QUEUES) {
1188                 printf("%s: Queue_id from interrupt details was not found!\n",
1189                                 __func__);
1190                 return;
1191         }
1192
1193         if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
1194                 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1195                 printf(
1196                         "Dequeue interrupt handler called for incorrect event!\n");
1197                 return;
1198         }
1199
1200         burst_sz = tp->op_params->burst_sz;
1201         num_to_process = tp->op_params->num_to_process;
1202
1203         if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1204                 deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id, dec_ops,
1205                                 burst_sz);
1206         else
1207                 deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id, enc_ops,
1208                                 burst_sz);
1209
1210         if (deq < burst_sz) {
1211                 printf(
1212                         "After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
1213                         burst_sz, deq);
1214                 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1215                 return;
1216         }
1217
1218         if (rte_atomic16_read(&tp->nb_dequeued) + deq < num_to_process) {
1219                 rte_atomic16_add(&tp->nb_dequeued, deq);
1220                 return;
1221         }
1222
1223         total_time = rte_rdtsc_precise() - tp->start_time;
1224
1225         rte_bbdev_info_get(dev_id, &info);
1226
1227         bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1228
1229         ret = TEST_SUCCESS;
1230         if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1231                 ret = validate_dec_buffers(tp->op_params->ref_dec_op, bufs,
1232                                 num_to_process);
1233         else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
1234                 ret = validate_enc_buffers(bufs, num_to_process);
1235
1236         if (ret) {
1237                 printf("Buffers validation failed\n");
1238                 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1239         }
1240
1241         switch (test_vector.op_type) {
1242         case RTE_BBDEV_OP_TURBO_DEC:
1243                 in_len = tp->op_params->ref_dec_op->turbo_dec.input.length;
1244                 break;
1245         case RTE_BBDEV_OP_TURBO_ENC:
1246                 in_len = tp->op_params->ref_enc_op->turbo_enc.input.length;
1247                 break;
1248         case RTE_BBDEV_OP_NONE:
1249                 in_len = 0.0;
1250                 break;
1251         default:
1252                 printf("Unknown op type: %d\n", test_vector.op_type);
1253                 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1254                 return;
1255         }
1256
1257         tp->mops = ((double)num_to_process / 1000000.0) /
1258                         ((double)total_time / (double)rte_get_tsc_hz());
1259         tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) /
1260                         ((double)total_time / (double)rte_get_tsc_hz());
1261
1262         rte_atomic16_add(&tp->nb_dequeued, deq);
1263 }
1264
1265 static int
1266 throughput_intr_lcore_dec(void *arg)
1267 {
1268         struct thread_params *tp = arg;
1269         unsigned int enqueued;
1270         struct rte_bbdev_dec_op *ops[MAX_BURST];
1271         const uint16_t queue_id = tp->queue_id;
1272         const uint16_t burst_sz = tp->op_params->burst_sz;
1273         const uint16_t num_to_process = tp->op_params->num_to_process;
1274         struct test_buffers *bufs = NULL;
1275         unsigned int allocs_failed = 0;
1276         struct rte_bbdev_info info;
1277         int ret;
1278
1279         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1280                         "BURST_SIZE should be <= %u", MAX_BURST);
1281
1282         TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
1283                         "Failed to enable interrupts for dev: %u, queue_id: %u",
1284                         tp->dev_id, queue_id);
1285
1286         rte_bbdev_info_get(tp->dev_id, &info);
1287         bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1288
1289         rte_atomic16_clear(&tp->processing_status);
1290         rte_atomic16_clear(&tp->nb_dequeued);
1291
1292         while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1293                 rte_pause();
1294
1295         tp->start_time = rte_rdtsc_precise();
1296         for (enqueued = 0; enqueued < num_to_process;) {
1297
1298                 uint16_t num_to_enq = burst_sz;
1299
1300                 if (unlikely(num_to_process - enqueued < num_to_enq))
1301                         num_to_enq = num_to_process - enqueued;
1302
1303                 ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
1304                                 num_to_enq);
1305                 if (ret != 0) {
1306                         allocs_failed++;
1307                         continue;
1308                 }
1309
1310                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1311                         copy_reference_dec_op(ops, num_to_enq, enqueued,
1312                                         bufs->inputs,
1313                                         bufs->hard_outputs,
1314                                         bufs->soft_outputs,
1315                                         tp->op_params->ref_dec_op);
1316
1317                 enqueued += rte_bbdev_enqueue_dec_ops(tp->dev_id, queue_id, ops,
1318                                 num_to_enq);
1319
1320                 rte_bbdev_dec_op_free_bulk(ops, num_to_enq);
1321         }
1322
1323         if (allocs_failed > 0)
1324                 printf("WARNING: op allocations failed: %u times\n",
1325                                 allocs_failed);
1326
1327         return TEST_SUCCESS;
1328 }
1329
1330 static int
1331 throughput_intr_lcore_enc(void *arg)
1332 {
1333         struct thread_params *tp = arg;
1334         unsigned int enqueued;
1335         struct rte_bbdev_enc_op *ops[MAX_BURST];
1336         const uint16_t queue_id = tp->queue_id;
1337         const uint16_t burst_sz = tp->op_params->burst_sz;
1338         const uint16_t num_to_process = tp->op_params->num_to_process;
1339         struct test_buffers *bufs = NULL;
1340         unsigned int allocs_failed = 0;
1341         struct rte_bbdev_info info;
1342         int ret;
1343
1344         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1345                         "BURST_SIZE should be <= %u", MAX_BURST);
1346
1347         TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
1348                         "Failed to enable interrupts for dev: %u, queue_id: %u",
1349                         tp->dev_id, queue_id);
1350
1351         rte_bbdev_info_get(tp->dev_id, &info);
1352         bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1353
1354         rte_atomic16_clear(&tp->processing_status);
1355         rte_atomic16_clear(&tp->nb_dequeued);
1356
1357         while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1358                 rte_pause();
1359
1360         tp->start_time = rte_rdtsc_precise();
1361         for (enqueued = 0; enqueued < num_to_process;) {
1362
1363                 uint16_t num_to_enq = burst_sz;
1364
1365                 if (unlikely(num_to_process - enqueued < num_to_enq))
1366                         num_to_enq = num_to_process - enqueued;
1367
1368                 ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
1369                                 num_to_enq);
1370                 if (ret != 0) {
1371                         allocs_failed++;
1372                         continue;
1373                 }
1374
1375                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1376                         copy_reference_enc_op(ops, num_to_enq, enqueued,
1377                                         bufs->inputs,
1378                                         bufs->hard_outputs,
1379                                         tp->op_params->ref_enc_op);
1380
1381                 enqueued += rte_bbdev_enqueue_enc_ops(tp->dev_id, queue_id, ops,
1382                                 num_to_enq);
1383
1384                 rte_bbdev_enc_op_free_bulk(ops, num_to_enq);
1385         }
1386
1387         if (allocs_failed > 0)
1388                 printf("WARNING: op allocations failed: %u times\n",
1389                                 allocs_failed);
1390
1391         return TEST_SUCCESS;
1392 }
1393
1394 static int
1395 throughput_pmd_lcore_dec(void *arg)
1396 {
1397         struct thread_params *tp = arg;
1398         unsigned int enqueued, dequeued;
1399         struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1400         uint64_t total_time, start_time;
1401         const uint16_t queue_id = tp->queue_id;
1402         const uint16_t burst_sz = tp->op_params->burst_sz;
1403         const uint16_t num_to_process = tp->op_params->num_to_process;
1404         struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
1405         struct test_buffers *bufs = NULL;
1406         unsigned int allocs_failed = 0;
1407         int ret;
1408         struct rte_bbdev_info info;
1409
1410         /* Input length in bytes, million operations per second, million bits
1411          * per second.
1412          */
1413         double in_len;
1414
1415         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1416                         "BURST_SIZE should be <= %u", MAX_BURST);
1417
1418         rte_bbdev_info_get(tp->dev_id, &info);
1419         bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1420
1421         while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1422                 rte_pause();
1423
1424         start_time = rte_rdtsc_precise();
1425         for (enqueued = 0, dequeued = 0; dequeued < num_to_process;) {
1426                 uint16_t deq;
1427
1428                 if (likely(enqueued < num_to_process)) {
1429
1430                         uint16_t num_to_enq = burst_sz;
1431
1432                         if (unlikely(num_to_process - enqueued < num_to_enq))
1433                                 num_to_enq = num_to_process - enqueued;
1434
1435                         ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp,
1436                                         ops_enq, num_to_enq);
1437                         if (ret != 0) {
1438                                 allocs_failed++;
1439                                 goto do_dequeue;
1440                         }
1441
1442                         if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1443                                 copy_reference_dec_op(ops_enq, num_to_enq,
1444                                                 enqueued,
1445                                                 bufs->inputs,
1446                                                 bufs->hard_outputs,
1447                                                 bufs->soft_outputs,
1448                                                 ref_op);
1449
1450                         enqueued += rte_bbdev_enqueue_dec_ops(tp->dev_id,
1451                                         queue_id, ops_enq, num_to_enq);
1452                 }
1453 do_dequeue:
1454                 deq = rte_bbdev_dequeue_dec_ops(tp->dev_id, queue_id, ops_deq,
1455                                 burst_sz);
1456                 dequeued += deq;
1457                 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
1458         }
1459         total_time = rte_rdtsc_precise() - start_time;
1460
1461         if (allocs_failed > 0)
1462                 printf("WARNING: op allocations failed: %u times\n",
1463                                 allocs_failed);
1464
1465         TEST_ASSERT(enqueued == dequeued, "enqueued (%u) != dequeued (%u)",
1466                         enqueued, dequeued);
1467
1468         if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1469                 ret = validate_dec_buffers(ref_op, bufs, num_to_process);
1470                 TEST_ASSERT_SUCCESS(ret, "Buffers validation failed");
1471         }
1472
1473         in_len = ref_op->turbo_dec.input.length;
1474         tp->mops = ((double)num_to_process / 1000000.0) /
1475                         ((double)total_time / (double)rte_get_tsc_hz());
1476         tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) /
1477                         ((double)total_time / (double)rte_get_tsc_hz());
1478
1479         return TEST_SUCCESS;
1480 }
1481
1482 static int
1483 throughput_pmd_lcore_enc(void *arg)
1484 {
1485         struct thread_params *tp = arg;
1486         unsigned int enqueued, dequeued;
1487         struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1488         uint64_t total_time, start_time;
1489         const uint16_t queue_id = tp->queue_id;
1490         const uint16_t burst_sz = tp->op_params->burst_sz;
1491         const uint16_t num_to_process = tp->op_params->num_to_process;
1492         struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
1493         struct test_buffers *bufs = NULL;
1494         unsigned int allocs_failed = 0;
1495         int ret;
1496         struct rte_bbdev_info info;
1497
1498         /* Input length in bytes, million operations per second, million bits
1499          * per second.
1500          */
1501         double in_len;
1502
1503         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1504                         "BURST_SIZE should be <= %u", MAX_BURST);
1505
1506         rte_bbdev_info_get(tp->dev_id, &info);
1507         bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1508
1509         while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1510                 rte_pause();
1511
1512         start_time = rte_rdtsc_precise();
1513         for (enqueued = 0, dequeued = 0; dequeued < num_to_process;) {
1514                 uint16_t deq;
1515
1516                 if (likely(enqueued < num_to_process)) {
1517
1518                         uint16_t num_to_enq = burst_sz;
1519
1520                         if (unlikely(num_to_process - enqueued < num_to_enq))
1521                                 num_to_enq = num_to_process - enqueued;
1522
1523                         ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp,
1524                                         ops_enq, num_to_enq);
1525                         if (ret != 0) {
1526                                 allocs_failed++;
1527                                 goto do_dequeue;
1528                         }
1529
1530                         if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1531                                 copy_reference_enc_op(ops_enq, num_to_enq,
1532                                                 enqueued,
1533                                                 bufs->inputs,
1534                                                 bufs->hard_outputs,
1535                                                 ref_op);
1536
1537                         enqueued += rte_bbdev_enqueue_enc_ops(tp->dev_id,
1538                                         queue_id, ops_enq, num_to_enq);
1539                 }
1540 do_dequeue:
1541                 deq = rte_bbdev_dequeue_enc_ops(tp->dev_id, queue_id, ops_deq,
1542                                 burst_sz);
1543                 dequeued += deq;
1544                 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
1545         }
1546         total_time = rte_rdtsc_precise() - start_time;
1547
1548         if (allocs_failed > 0)
1549                 printf("WARNING: op allocations failed: %u times\n",
1550                                 allocs_failed);
1551
1552         TEST_ASSERT(enqueued == dequeued, "enqueued (%u) != dequeued (%u)",
1553                         enqueued, dequeued);
1554
1555         if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1556                 ret = validate_enc_buffers(bufs, num_to_process);
1557                 TEST_ASSERT_SUCCESS(ret, "Buffers validation failed");
1558         }
1559
1560         in_len = ref_op->turbo_enc.input.length;
1561
1562         tp->mops = ((double)num_to_process / 1000000.0) /
1563                         ((double)total_time / (double)rte_get_tsc_hz());
1564         tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) /
1565                         ((double)total_time / (double)rte_get_tsc_hz());
1566
1567         return TEST_SUCCESS;
1568 }
1569 static void
1570 print_throughput(struct thread_params *t_params, unsigned int used_cores)
1571 {
1572         unsigned int lcore_id, iter = 0;
1573         double total_mops = 0, total_mbps = 0;
1574
1575         RTE_LCORE_FOREACH(lcore_id) {
1576                 if (iter++ >= used_cores)
1577                         break;
1578                 printf("\tlcore_id: %u, throughput: %.8lg MOPS, %.8lg Mbps\n",
1579                 lcore_id, t_params[lcore_id].mops, t_params[lcore_id].mbps);
1580                 total_mops += t_params[lcore_id].mops;
1581                 total_mbps += t_params[lcore_id].mbps;
1582         }
1583         printf(
1584                 "\n\tTotal stats for %u cores: throughput: %.8lg MOPS, %.8lg Mbps\n",
1585                 used_cores, total_mops, total_mbps);
1586 }
1587
1588 /*
1589  * Test function that determines how long an enqueue + dequeue of a burst
1590  * takes on available lcores.
1591  */
1592 static int
1593 throughput_test(struct active_device *ad,
1594                 struct test_op_params *op_params)
1595 {
1596         int ret;
1597         unsigned int lcore_id, used_cores = 0;
1598         struct thread_params t_params[MAX_QUEUES];
1599         struct rte_bbdev_info info;
1600         lcore_function_t *throughput_function;
1601         struct thread_params *tp;
1602         uint16_t num_lcores;
1603         const char *op_type_str;
1604
1605         rte_bbdev_info_get(ad->dev_id, &info);
1606
1607         op_type_str = rte_bbdev_op_type_str(test_vector.op_type);
1608         TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
1609                         test_vector.op_type);
1610
1611         printf(
1612                 "Throughput test: dev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, int mode: %s, GHz: %lg\n",
1613                         info.dev_name, ad->nb_queues, op_params->burst_sz,
1614                         op_params->num_to_process, op_params->num_lcores,
1615                         op_type_str,
1616                         intr_enabled ? "Interrupt mode" : "PMD mode",
1617                         (double)rte_get_tsc_hz() / 1000000000.0);
1618
1619         /* Set number of lcores */
1620         num_lcores = (ad->nb_queues < (op_params->num_lcores))
1621                         ? ad->nb_queues
1622                         : op_params->num_lcores;
1623
1624         if (intr_enabled) {
1625                 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1626                         throughput_function = throughput_intr_lcore_dec;
1627                 else
1628                         throughput_function = throughput_intr_lcore_enc;
1629
1630                 /* Dequeue interrupt callback registration */
1631                 ret = rte_bbdev_callback_register(ad->dev_id,
1632                                 RTE_BBDEV_EVENT_DEQUEUE, dequeue_event_callback,
1633                                 &t_params);
1634                 if (ret < 0)
1635                         return ret;
1636         } else {
1637                 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1638                         throughput_function = throughput_pmd_lcore_dec;
1639                 else
1640                         throughput_function = throughput_pmd_lcore_enc;
1641         }
1642
1643         rte_atomic16_set(&op_params->sync, SYNC_WAIT);
1644
1645         t_params[rte_lcore_id()].dev_id = ad->dev_id;
1646         t_params[rte_lcore_id()].op_params = op_params;
1647         t_params[rte_lcore_id()].queue_id =
1648                         ad->queue_ids[used_cores++];
1649
1650         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1651                 if (used_cores >= num_lcores)
1652                         break;
1653
1654                 t_params[lcore_id].dev_id = ad->dev_id;
1655                 t_params[lcore_id].op_params = op_params;
1656                 t_params[lcore_id].queue_id = ad->queue_ids[used_cores++];
1657
1658                 rte_eal_remote_launch(throughput_function, &t_params[lcore_id],
1659                                 lcore_id);
1660         }
1661
1662         rte_atomic16_set(&op_params->sync, SYNC_START);
1663         ret = throughput_function(&t_params[rte_lcore_id()]);
1664
1665         /* Master core is always used */
1666         used_cores = 1;
1667         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1668                 if (used_cores++ >= num_lcores)
1669                         break;
1670
1671                 ret |= rte_eal_wait_lcore(lcore_id);
1672         }
1673
1674         /* Return if test failed */
1675         if (ret)
1676                 return ret;
1677
1678         /* Print throughput if interrupts are disabled and test passed */
1679         if (!intr_enabled) {
1680                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1681                         print_throughput(t_params, num_lcores);
1682                 return ret;
1683         }
1684
1685         /* In interrupt TC we need to wait for the interrupt callback to deqeue
1686          * all pending operations. Skip waiting for queues which reported an
1687          * error using processing_status variable.
1688          * Wait for master lcore operations.
1689          */
1690         tp = &t_params[rte_lcore_id()];
1691         while ((rte_atomic16_read(&tp->nb_dequeued) <
1692                         op_params->num_to_process) &&
1693                         (rte_atomic16_read(&tp->processing_status) !=
1694                         TEST_FAILED))
1695                 rte_pause();
1696
1697         ret |= rte_atomic16_read(&tp->processing_status);
1698
1699         /* Wait for slave lcores operations */
1700         used_cores = 1;
1701         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1702                 tp = &t_params[lcore_id];
1703                 if (used_cores++ >= num_lcores)
1704                         break;
1705
1706                 while ((rte_atomic16_read(&tp->nb_dequeued) <
1707                                 op_params->num_to_process) &&
1708                                 (rte_atomic16_read(&tp->processing_status) !=
1709                                 TEST_FAILED))
1710                         rte_pause();
1711
1712                 ret |= rte_atomic16_read(&tp->processing_status);
1713         }
1714
1715         /* Print throughput if test passed */
1716         if (!ret && test_vector.op_type != RTE_BBDEV_OP_NONE)
1717                 print_throughput(t_params, num_lcores);
1718
1719         return ret;
1720 }
1721
1722 static int
1723 latency_test_dec(struct rte_mempool *mempool,
1724                 struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
1725                 int vector_mask, uint16_t dev_id, uint16_t queue_id,
1726                 const uint16_t num_to_process, uint16_t burst_sz,
1727                 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
1728 {
1729         int ret = TEST_SUCCESS;
1730         uint16_t i, j, dequeued;
1731         struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1732         uint64_t start_time = 0, last_time = 0;
1733
1734         for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
1735                 uint16_t enq = 0, deq = 0;
1736                 bool first_time = true;
1737                 last_time = 0;
1738
1739                 if (unlikely(num_to_process - dequeued < burst_sz))
1740                         burst_sz = num_to_process - dequeued;
1741
1742                 ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
1743                 TEST_ASSERT_SUCCESS(ret,
1744                                 "rte_bbdev_dec_op_alloc_bulk() failed");
1745                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1746                         copy_reference_dec_op(ops_enq, burst_sz, dequeued,
1747                                         bufs->inputs,
1748                                         bufs->hard_outputs,
1749                                         bufs->soft_outputs,
1750                                         ref_op);
1751
1752                 /* Set counter to validate the ordering */
1753                 for (j = 0; j < burst_sz; ++j)
1754                         ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
1755
1756                 start_time = rte_rdtsc_precise();
1757
1758                 enq = rte_bbdev_enqueue_dec_ops(dev_id, queue_id, &ops_enq[enq],
1759                                 burst_sz);
1760                 TEST_ASSERT(enq == burst_sz,
1761                                 "Error enqueueing burst, expected %u, got %u",
1762                                 burst_sz, enq);
1763
1764                 /* Dequeue */
1765                 do {
1766                         deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
1767                                         &ops_deq[deq], burst_sz - deq);
1768                         if (likely(first_time && (deq > 0))) {
1769                                 last_time = rte_rdtsc_precise() - start_time;
1770                                 first_time = false;
1771                         }
1772                 } while (unlikely(burst_sz != deq));
1773
1774                 *max_time = RTE_MAX(*max_time, last_time);
1775                 *min_time = RTE_MIN(*min_time, last_time);
1776                 *total_time += last_time;
1777
1778                 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1779                         ret = validate_dec_op(ops_deq, burst_sz, ref_op,
1780                                         vector_mask);
1781                         TEST_ASSERT_SUCCESS(ret, "Validation failed!");
1782                 }
1783
1784                 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
1785                 dequeued += deq;
1786         }
1787
1788         return i;
1789 }
1790
1791 static int
1792 latency_test_enc(struct rte_mempool *mempool,
1793                 struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
1794                 uint16_t dev_id, uint16_t queue_id,
1795                 const uint16_t num_to_process, uint16_t burst_sz,
1796                 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
1797 {
1798         int ret = TEST_SUCCESS;
1799         uint16_t i, j, dequeued;
1800         struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1801         uint64_t start_time = 0, last_time = 0;
1802
1803         for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
1804                 uint16_t enq = 0, deq = 0;
1805                 bool first_time = true;
1806                 last_time = 0;
1807
1808                 if (unlikely(num_to_process - dequeued < burst_sz))
1809                         burst_sz = num_to_process - dequeued;
1810
1811                 ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
1812                 TEST_ASSERT_SUCCESS(ret,
1813                                 "rte_bbdev_enc_op_alloc_bulk() failed");
1814                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1815                         copy_reference_enc_op(ops_enq, burst_sz, dequeued,
1816                                         bufs->inputs,
1817                                         bufs->hard_outputs,
1818                                         ref_op);
1819
1820                 /* Set counter to validate the ordering */
1821                 for (j = 0; j < burst_sz; ++j)
1822                         ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
1823
1824                 start_time = rte_rdtsc_precise();
1825
1826                 enq = rte_bbdev_enqueue_enc_ops(dev_id, queue_id, &ops_enq[enq],
1827                                 burst_sz);
1828                 TEST_ASSERT(enq == burst_sz,
1829                                 "Error enqueueing burst, expected %u, got %u",
1830                                 burst_sz, enq);
1831
1832                 /* Dequeue */
1833                 do {
1834                         deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
1835                                         &ops_deq[deq], burst_sz - deq);
1836                         if (likely(first_time && (deq > 0))) {
1837                                 last_time += rte_rdtsc_precise() - start_time;
1838                                 first_time = false;
1839                         }
1840                 } while (unlikely(burst_sz != deq));
1841
1842                 *max_time = RTE_MAX(*max_time, last_time);
1843                 *min_time = RTE_MIN(*min_time, last_time);
1844                 *total_time += last_time;
1845
1846                 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1847                         ret = validate_enc_op(ops_deq, burst_sz, ref_op);
1848                         TEST_ASSERT_SUCCESS(ret, "Validation failed!");
1849                 }
1850
1851                 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
1852                 dequeued += deq;
1853         }
1854
1855         return i;
1856 }
1857
1858 static int
1859 latency_test(struct active_device *ad,
1860                 struct test_op_params *op_params)
1861 {
1862         int iter;
1863         uint16_t burst_sz = op_params->burst_sz;
1864         const uint16_t num_to_process = op_params->num_to_process;
1865         const enum rte_bbdev_op_type op_type = test_vector.op_type;
1866         const uint16_t queue_id = ad->queue_ids[0];
1867         struct test_buffers *bufs = NULL;
1868         struct rte_bbdev_info info;
1869         uint64_t total_time, min_time, max_time;
1870         const char *op_type_str;
1871
1872         total_time = max_time = 0;
1873         min_time = UINT64_MAX;
1874
1875         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1876                         "BURST_SIZE should be <= %u", MAX_BURST);
1877
1878         rte_bbdev_info_get(ad->dev_id, &info);
1879         bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1880
1881         op_type_str = rte_bbdev_op_type_str(op_type);
1882         TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
1883
1884         printf(
1885                 "Validation/Latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
1886                         info.dev_name, burst_sz, num_to_process, op_type_str);
1887
1888         if (op_type == RTE_BBDEV_OP_TURBO_DEC)
1889                 iter = latency_test_dec(op_params->mp, bufs,
1890                                 op_params->ref_dec_op, op_params->vector_mask,
1891                                 ad->dev_id, queue_id, num_to_process,
1892                                 burst_sz, &total_time, &min_time, &max_time);
1893         else
1894                 iter = latency_test_enc(op_params->mp, bufs,
1895                                 op_params->ref_enc_op, ad->dev_id, queue_id,
1896                                 num_to_process, burst_sz, &total_time,
1897                                 &min_time, &max_time);
1898
1899         if (iter <= 0)
1900                 return TEST_FAILED;
1901
1902         printf("\toperation latency:\n"
1903                         "\t\tavg latency: %lg cycles, %lg us\n"
1904                         "\t\tmin latency: %lg cycles, %lg us\n"
1905                         "\t\tmax latency: %lg cycles, %lg us\n",
1906                         (double)total_time / (double)iter,
1907                         (double)(total_time * 1000000) / (double)iter /
1908                         (double)rte_get_tsc_hz(), (double)min_time,
1909                         (double)(min_time * 1000000) / (double)rte_get_tsc_hz(),
1910                         (double)max_time, (double)(max_time * 1000000) /
1911                         (double)rte_get_tsc_hz());
1912
1913         return TEST_SUCCESS;
1914 }
1915
1916 #ifdef RTE_BBDEV_OFFLOAD_COST
1917 static int
1918 get_bbdev_queue_stats(uint16_t dev_id, uint16_t queue_id,
1919                 struct rte_bbdev_stats *stats)
1920 {
1921         struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
1922         struct rte_bbdev_stats *q_stats;
1923
1924         if (queue_id >= dev->data->num_queues)
1925                 return -1;
1926
1927         q_stats = &dev->data->queues[queue_id].queue_stats;
1928
1929         stats->enqueued_count = q_stats->enqueued_count;
1930         stats->dequeued_count = q_stats->dequeued_count;
1931         stats->enqueue_err_count = q_stats->enqueue_err_count;
1932         stats->dequeue_err_count = q_stats->dequeue_err_count;
1933         stats->offload_time = q_stats->offload_time;
1934
1935         return 0;
1936 }
1937
1938 static int
1939 offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs,
1940                 struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
1941                 uint16_t queue_id, const uint16_t num_to_process,
1942                 uint16_t burst_sz, struct test_time_stats *time_st)
1943 {
1944         int i, dequeued, ret;
1945         struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1946         uint64_t enq_start_time, deq_start_time;
1947         uint64_t enq_sw_last_time, deq_last_time;
1948         struct rte_bbdev_stats stats;
1949
1950         for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
1951                 uint16_t enq = 0, deq = 0;
1952
1953                 if (unlikely(num_to_process - dequeued < burst_sz))
1954                         burst_sz = num_to_process - dequeued;
1955
1956                 rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
1957                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1958                         copy_reference_dec_op(ops_enq, burst_sz, dequeued,
1959                                         bufs->inputs,
1960                                         bufs->hard_outputs,
1961                                         bufs->soft_outputs,
1962                                         ref_op);
1963
1964                 /* Start time meas for enqueue function offload latency */
1965                 enq_start_time = rte_rdtsc_precise();
1966                 do {
1967                         enq += rte_bbdev_enqueue_dec_ops(dev_id, queue_id,
1968                                         &ops_enq[enq], burst_sz - enq);
1969                 } while (unlikely(burst_sz != enq));
1970
1971                 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
1972                 TEST_ASSERT_SUCCESS(ret,
1973                                 "Failed to get stats for queue (%u) of device (%u)",
1974                                 queue_id, dev_id);
1975
1976                 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
1977                                 stats.offload_time;
1978                 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
1979                                 enq_sw_last_time);
1980                 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
1981                                 enq_sw_last_time);
1982                 time_st->enq_sw_tot_time += enq_sw_last_time;
1983
1984                 time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time,
1985                                 stats.offload_time);
1986                 time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time,
1987                                 stats.offload_time);
1988                 time_st->enq_tur_tot_time += stats.offload_time;
1989
1990                 /* ensure enqueue has been completed */
1991                 rte_delay_ms(10);
1992
1993                 /* Start time meas for dequeue function offload latency */
1994                 deq_start_time = rte_rdtsc_precise();
1995                 /* Dequeue one operation */
1996                 do {
1997                         deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
1998                                         &ops_deq[deq], 1);
1999                 } while (unlikely(deq != 1));
2000
2001                 deq_last_time = rte_rdtsc_precise() - deq_start_time;
2002                 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
2003                                 deq_last_time);
2004                 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
2005                                 deq_last_time);
2006                 time_st->deq_tot_time += deq_last_time;
2007
2008                 /* Dequeue remaining operations if needed*/
2009                 while (burst_sz != deq)
2010                         deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
2011                                         &ops_deq[deq], burst_sz - deq);
2012
2013                 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
2014                 dequeued += deq;
2015         }
2016
2017         return i;
2018 }
2019
2020 static int
2021 offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
2022                 struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
2023                 uint16_t queue_id, const uint16_t num_to_process,
2024                 uint16_t burst_sz, struct test_time_stats *time_st)
2025 {
2026         int i, dequeued, ret;
2027         struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
2028         uint64_t enq_start_time, deq_start_time;
2029         uint64_t enq_sw_last_time, deq_last_time;
2030         struct rte_bbdev_stats stats;
2031
2032         for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
2033                 uint16_t enq = 0, deq = 0;
2034
2035                 if (unlikely(num_to_process - dequeued < burst_sz))
2036                         burst_sz = num_to_process - dequeued;
2037
2038                 rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
2039                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2040                         copy_reference_enc_op(ops_enq, burst_sz, dequeued,
2041                                         bufs->inputs,
2042                                         bufs->hard_outputs,
2043                                         ref_op);
2044
2045                 /* Start time meas for enqueue function offload latency */
2046                 enq_start_time = rte_rdtsc_precise();
2047                 do {
2048                         enq += rte_bbdev_enqueue_enc_ops(dev_id, queue_id,
2049                                         &ops_enq[enq], burst_sz - enq);
2050                 } while (unlikely(burst_sz != enq));
2051
2052                 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
2053                 TEST_ASSERT_SUCCESS(ret,
2054                                 "Failed to get stats for queue (%u) of device (%u)",
2055                                 queue_id, dev_id);
2056
2057                 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
2058                                 stats.offload_time;
2059                 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
2060                                 enq_sw_last_time);
2061                 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
2062                                 enq_sw_last_time);
2063                 time_st->enq_sw_tot_time += enq_sw_last_time;
2064
2065                 time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time,
2066                                 stats.offload_time);
2067                 time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time,
2068                                 stats.offload_time);
2069                 time_st->enq_tur_tot_time += stats.offload_time;
2070
2071                 /* ensure enqueue has been completed */
2072                 rte_delay_ms(10);
2073
2074                 /* Start time meas for dequeue function offload latency */
2075                 deq_start_time = rte_rdtsc_precise();
2076                 /* Dequeue one operation */
2077                 do {
2078                         deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
2079                                         &ops_deq[deq], 1);
2080                 } while (unlikely(deq != 1));
2081
2082                 deq_last_time = rte_rdtsc_precise() - deq_start_time;
2083                 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
2084                                 deq_last_time);
2085                 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
2086                                 deq_last_time);
2087                 time_st->deq_tot_time += deq_last_time;
2088
2089                 while (burst_sz != deq)
2090                         deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
2091                                         &ops_deq[deq], burst_sz - deq);
2092
2093                 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
2094                 dequeued += deq;
2095         }
2096
2097         return i;
2098 }
2099 #endif
2100
2101 static int
2102 offload_cost_test(struct active_device *ad,
2103                 struct test_op_params *op_params)
2104 {
2105 #ifndef RTE_BBDEV_OFFLOAD_COST
2106         RTE_SET_USED(ad);
2107         RTE_SET_USED(op_params);
2108         printf("Offload latency test is disabled.\n");
2109         printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
2110         return TEST_SKIPPED;
2111 #else
2112         int iter;
2113         uint16_t burst_sz = op_params->burst_sz;
2114         const uint16_t num_to_process = op_params->num_to_process;
2115         const enum rte_bbdev_op_type op_type = test_vector.op_type;
2116         const uint16_t queue_id = ad->queue_ids[0];
2117         struct test_buffers *bufs = NULL;
2118         struct rte_bbdev_info info;
2119         const char *op_type_str;
2120         struct test_time_stats time_st;
2121
2122         memset(&time_st, 0, sizeof(struct test_time_stats));
2123         time_st.enq_sw_min_time = UINT64_MAX;
2124         time_st.enq_tur_min_time = UINT64_MAX;
2125         time_st.deq_min_time = UINT64_MAX;
2126
2127         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2128                         "BURST_SIZE should be <= %u", MAX_BURST);
2129
2130         rte_bbdev_info_get(ad->dev_id, &info);
2131         bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2132
2133         op_type_str = rte_bbdev_op_type_str(op_type);
2134         TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
2135
2136         printf(
2137                 "Offload latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
2138                         info.dev_name, burst_sz, num_to_process, op_type_str);
2139
2140         if (op_type == RTE_BBDEV_OP_TURBO_DEC)
2141                 iter = offload_latency_test_dec(op_params->mp, bufs,
2142                                 op_params->ref_dec_op, ad->dev_id, queue_id,
2143                                 num_to_process, burst_sz, &time_st);
2144         else
2145                 iter = offload_latency_test_enc(op_params->mp, bufs,
2146                                 op_params->ref_enc_op, ad->dev_id, queue_id,
2147                                 num_to_process, burst_sz, &time_st);
2148
2149         if (iter <= 0)
2150                 return TEST_FAILED;
2151
2152         printf("\tenq offload cost latency:\n"
2153                         "\t\tsoftware avg %lg cycles, %lg us\n"
2154                         "\t\tsoftware min %lg cycles, %lg us\n"
2155                         "\t\tsoftware max %lg cycles, %lg us\n"
2156                         "\t\tturbo avg %lg cycles, %lg us\n"
2157                         "\t\tturbo min %lg cycles, %lg us\n"
2158                         "\t\tturbo max %lg cycles, %lg us\n",
2159                         (double)time_st.enq_sw_tot_time / (double)iter,
2160                         (double)(time_st.enq_sw_tot_time * 1000000) /
2161                         (double)iter / (double)rte_get_tsc_hz(),
2162                         (double)time_st.enq_sw_min_time,
2163                         (double)(time_st.enq_sw_min_time * 1000000) /
2164                         rte_get_tsc_hz(), (double)time_st.enq_sw_max_time,
2165                         (double)(time_st.enq_sw_max_time * 1000000) /
2166                         rte_get_tsc_hz(), (double)time_st.enq_tur_tot_time /
2167                         (double)iter,
2168                         (double)(time_st.enq_tur_tot_time * 1000000) /
2169                         (double)iter / (double)rte_get_tsc_hz(),
2170                         (double)time_st.enq_tur_min_time,
2171                         (double)(time_st.enq_tur_min_time * 1000000) /
2172                         rte_get_tsc_hz(), (double)time_st.enq_tur_max_time,
2173                         (double)(time_st.enq_tur_max_time * 1000000) /
2174                         rte_get_tsc_hz());
2175
2176         printf("\tdeq offload cost latency - one op:\n"
2177                         "\t\tavg %lg cycles, %lg us\n"
2178                         "\t\tmin %lg cycles, %lg us\n"
2179                         "\t\tmax %lg cycles, %lg us\n",
2180                         (double)time_st.deq_tot_time / (double)iter,
2181                         (double)(time_st.deq_tot_time * 1000000) /
2182                         (double)iter / (double)rte_get_tsc_hz(),
2183                         (double)time_st.deq_min_time,
2184                         (double)(time_st.deq_min_time * 1000000) /
2185                         rte_get_tsc_hz(), (double)time_st.deq_max_time,
2186                         (double)(time_st.deq_max_time * 1000000) /
2187                         rte_get_tsc_hz());
2188
2189         return TEST_SUCCESS;
2190 #endif
2191 }
2192
2193 #ifdef RTE_BBDEV_OFFLOAD_COST
2194 static int
2195 offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
2196                 const uint16_t num_to_process, uint16_t burst_sz,
2197                 uint64_t *deq_tot_time, uint64_t *deq_min_time,
2198                 uint64_t *deq_max_time)
2199 {
2200         int i, deq_total;
2201         struct rte_bbdev_dec_op *ops[MAX_BURST];
2202         uint64_t deq_start_time, deq_last_time;
2203
2204         /* Test deq offload latency from an empty queue */
2205
2206         for (i = 0, deq_total = 0; deq_total < num_to_process;
2207                         ++i, deq_total += burst_sz) {
2208                 deq_start_time = rte_rdtsc_precise();
2209
2210                 if (unlikely(num_to_process - deq_total < burst_sz))
2211                         burst_sz = num_to_process - deq_total;
2212                 rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops, burst_sz);
2213
2214                 deq_last_time = rte_rdtsc_precise() - deq_start_time;
2215                 *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
2216                 *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
2217                 *deq_tot_time += deq_last_time;
2218         }
2219
2220         return i;
2221 }
2222
2223 static int
2224 offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
2225                 const uint16_t num_to_process, uint16_t burst_sz,
2226                 uint64_t *deq_tot_time, uint64_t *deq_min_time,
2227                 uint64_t *deq_max_time)
2228 {
2229         int i, deq_total;
2230         struct rte_bbdev_enc_op *ops[MAX_BURST];
2231         uint64_t deq_start_time, deq_last_time;
2232
2233         /* Test deq offload latency from an empty queue */
2234         for (i = 0, deq_total = 0; deq_total < num_to_process;
2235                         ++i, deq_total += burst_sz) {
2236                 deq_start_time = rte_rdtsc_precise();
2237
2238                 if (unlikely(num_to_process - deq_total < burst_sz))
2239                         burst_sz = num_to_process - deq_total;
2240                 rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops, burst_sz);
2241
2242                 deq_last_time = rte_rdtsc_precise() - deq_start_time;
2243                 *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
2244                 *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
2245                 *deq_tot_time += deq_last_time;
2246         }
2247
2248         return i;
2249 }
2250 #endif
2251
2252 static int
2253 offload_latency_empty_q_test(struct active_device *ad,
2254                 struct test_op_params *op_params)
2255 {
2256 #ifndef RTE_BBDEV_OFFLOAD_COST
2257         RTE_SET_USED(ad);
2258         RTE_SET_USED(op_params);
2259         printf("Offload latency empty dequeue test is disabled.\n");
2260         printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
2261         return TEST_SKIPPED;
2262 #else
2263         int iter;
2264         uint64_t deq_tot_time, deq_min_time, deq_max_time;
2265         uint16_t burst_sz = op_params->burst_sz;
2266         const uint16_t num_to_process = op_params->num_to_process;
2267         const enum rte_bbdev_op_type op_type = test_vector.op_type;
2268         const uint16_t queue_id = ad->queue_ids[0];
2269         struct rte_bbdev_info info;
2270         const char *op_type_str;
2271
2272         deq_tot_time = deq_max_time = 0;
2273         deq_min_time = UINT64_MAX;
2274
2275         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2276                         "BURST_SIZE should be <= %u", MAX_BURST);
2277
2278         rte_bbdev_info_get(ad->dev_id, &info);
2279
2280         op_type_str = rte_bbdev_op_type_str(op_type);
2281         TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
2282
2283         printf(
2284                 "Offload latency empty dequeue test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
2285                         info.dev_name, burst_sz, num_to_process, op_type_str);
2286
2287         if (op_type == RTE_BBDEV_OP_TURBO_DEC)
2288                 iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id,
2289                                 num_to_process, burst_sz, &deq_tot_time,
2290                                 &deq_min_time, &deq_max_time);
2291         else
2292                 iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id,
2293                                 num_to_process, burst_sz, &deq_tot_time,
2294                                 &deq_min_time, &deq_max_time);
2295
2296         if (iter <= 0)
2297                 return TEST_FAILED;
2298
2299         printf("\tempty deq offload\n"
2300                         "\t\tavg. latency: %lg cycles, %lg us\n"
2301                         "\t\tmin. latency: %lg cycles, %lg us\n"
2302                         "\t\tmax. latency: %lg cycles, %lg us\n",
2303                         (double)deq_tot_time / (double)iter,
2304                         (double)(deq_tot_time * 1000000) / (double)iter /
2305                         (double)rte_get_tsc_hz(), (double)deq_min_time,
2306                         (double)(deq_min_time * 1000000) / rte_get_tsc_hz(),
2307                         (double)deq_max_time, (double)(deq_max_time * 1000000) /
2308                         rte_get_tsc_hz());
2309
2310         return TEST_SUCCESS;
2311 #endif
2312 }
2313
2314 static int
2315 throughput_tc(void)
2316 {
2317         return run_test_case(throughput_test);
2318 }
2319
2320 static int
2321 offload_cost_tc(void)
2322 {
2323         return run_test_case(offload_cost_test);
2324 }
2325
2326 static int
2327 offload_latency_empty_q_tc(void)
2328 {
2329         return run_test_case(offload_latency_empty_q_test);
2330 }
2331
2332 static int
2333 latency_tc(void)
2334 {
2335         return run_test_case(latency_test);
2336 }
2337
2338 static int
2339 interrupt_tc(void)
2340 {
2341         return run_test_case(throughput_test);
2342 }
2343
2344 static struct unit_test_suite bbdev_throughput_testsuite = {
2345         .suite_name = "BBdev Throughput Tests",
2346         .setup = testsuite_setup,
2347         .teardown = testsuite_teardown,
2348         .unit_test_cases = {
2349                 TEST_CASE_ST(ut_setup, ut_teardown, throughput_tc),
2350                 TEST_CASES_END() /**< NULL terminate unit test array */
2351         }
2352 };
2353
2354 static struct unit_test_suite bbdev_validation_testsuite = {
2355         .suite_name = "BBdev Validation Tests",
2356         .setup = testsuite_setup,
2357         .teardown = testsuite_teardown,
2358         .unit_test_cases = {
2359                 TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
2360                 TEST_CASES_END() /**< NULL terminate unit test array */
2361         }
2362 };
2363
2364 static struct unit_test_suite bbdev_latency_testsuite = {
2365         .suite_name = "BBdev Latency Tests",
2366         .setup = testsuite_setup,
2367         .teardown = testsuite_teardown,
2368         .unit_test_cases = {
2369                 TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
2370                 TEST_CASES_END() /**< NULL terminate unit test array */
2371         }
2372 };
2373
2374 static struct unit_test_suite bbdev_offload_cost_testsuite = {
2375         .suite_name = "BBdev Offload Cost Tests",
2376         .setup = testsuite_setup,
2377         .teardown = testsuite_teardown,
2378         .unit_test_cases = {
2379                 TEST_CASE_ST(ut_setup, ut_teardown, offload_cost_tc),
2380                 TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_empty_q_tc),
2381                 TEST_CASES_END() /**< NULL terminate unit test array */
2382         }
2383 };
2384
2385 static struct unit_test_suite bbdev_interrupt_testsuite = {
2386         .suite_name = "BBdev Interrupt Tests",
2387         .setup = interrupt_testsuite_setup,
2388         .teardown = testsuite_teardown,
2389         .unit_test_cases = {
2390                 TEST_CASE_ST(ut_setup, ut_teardown, interrupt_tc),
2391                 TEST_CASES_END() /**< NULL terminate unit test array */
2392         }
2393 };
2394
2395 REGISTER_TEST_COMMAND(throughput, bbdev_throughput_testsuite);
2396 REGISTER_TEST_COMMAND(validation, bbdev_validation_testsuite);
2397 REGISTER_TEST_COMMAND(latency, bbdev_latency_testsuite);
2398 REGISTER_TEST_COMMAND(offload, bbdev_offload_cost_testsuite);
2399 REGISTER_TEST_COMMAND(interrupt, bbdev_interrupt_testsuite);