New upstream version 18.08
[deb_dpdk.git] / app / test-bbdev / test_bbdev_perf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <inttypes.h>
7 #include <math.h>
8
9 #include <rte_eal.h>
10 #include <rte_common.h>
11 #include <rte_dev.h>
12 #include <rte_launch.h>
13 #include <rte_bbdev.h>
14 #include <rte_cycles.h>
15 #include <rte_lcore.h>
16 #include <rte_malloc.h>
17 #include <rte_random.h>
18 #include <rte_hexdump.h>
19
20 #include "main.h"
21 #include "test_bbdev_vector.h"
22
23 #define GET_SOCKET(socket_id) (((socket_id) == SOCKET_ID_ANY) ? 0 : (socket_id))
24
25 #define MAX_QUEUES RTE_MAX_LCORE
26
27 #define OPS_CACHE_SIZE 256U
28 #define OPS_POOL_SIZE_MIN 511U /* 0.5K per queue */
29
30 #define SYNC_WAIT 0
31 #define SYNC_START 1
32
33 #define INVALID_QUEUE_ID -1
34
35 static struct test_bbdev_vector test_vector;
36
37 /* Switch between PMD and Interrupt for throughput TC */
38 static bool intr_enabled;
39
40 /* Represents tested active devices */
41 static struct active_device {
42         const char *driver_name;
43         uint8_t dev_id;
44         uint16_t supported_ops;
45         uint16_t queue_ids[MAX_QUEUES];
46         uint16_t nb_queues;
47         struct rte_mempool *ops_mempool;
48         struct rte_mempool *in_mbuf_pool;
49         struct rte_mempool *hard_out_mbuf_pool;
50         struct rte_mempool *soft_out_mbuf_pool;
51 } active_devs[RTE_BBDEV_MAX_DEVS];
52
53 static uint8_t nb_active_devs;
54
55 /* Data buffers used by BBDEV ops */
56 struct test_buffers {
57         struct rte_bbdev_op_data *inputs;
58         struct rte_bbdev_op_data *hard_outputs;
59         struct rte_bbdev_op_data *soft_outputs;
60 };
61
62 /* Operation parameters specific for given test case */
63 struct test_op_params {
64         struct rte_mempool *mp;
65         struct rte_bbdev_dec_op *ref_dec_op;
66         struct rte_bbdev_enc_op *ref_enc_op;
67         uint16_t burst_sz;
68         uint16_t num_to_process;
69         uint16_t num_lcores;
70         int vector_mask;
71         rte_atomic16_t sync;
72         struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
73 };
74
75 /* Contains per lcore params */
76 struct thread_params {
77         uint8_t dev_id;
78         uint16_t queue_id;
79         uint64_t start_time;
80         double mops;
81         double mbps;
82         rte_atomic16_t nb_dequeued;
83         rte_atomic16_t processing_status;
84         struct test_op_params *op_params;
85 };
86
87 #ifdef RTE_BBDEV_OFFLOAD_COST
88 /* Stores time statistics */
89 struct test_time_stats {
90         /* Stores software enqueue total working time */
91         uint64_t enq_sw_tot_time;
92         /* Stores minimum value of software enqueue working time */
93         uint64_t enq_sw_min_time;
94         /* Stores maximum value of software enqueue working time */
95         uint64_t enq_sw_max_time;
96         /* Stores turbo enqueue total working time */
97         uint64_t enq_tur_tot_time;
98         /* Stores minimum value of turbo enqueue working time */
99         uint64_t enq_tur_min_time;
100         /* Stores maximum value of turbo enqueue working time */
101         uint64_t enq_tur_max_time;
102         /* Stores dequeue total working time */
103         uint64_t deq_tot_time;
104         /* Stores minimum value of dequeue working time */
105         uint64_t deq_min_time;
106         /* Stores maximum value of dequeue working time */
107         uint64_t deq_max_time;
108 };
109 #endif
110
111 typedef int (test_case_function)(struct active_device *ad,
112                 struct test_op_params *op_params);
113
114 static inline void
115 set_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
116 {
117         ad->supported_ops |= (1 << op_type);
118 }
119
120 static inline bool
121 is_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
122 {
123         return ad->supported_ops & (1 << op_type);
124 }
125
126 static inline bool
127 flags_match(uint32_t flags_req, uint32_t flags_present)
128 {
129         return (flags_req & flags_present) == flags_req;
130 }
131
132 static void
133 clear_soft_out_cap(uint32_t *op_flags)
134 {
135         *op_flags &= ~RTE_BBDEV_TURBO_SOFT_OUTPUT;
136         *op_flags &= ~RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT;
137         *op_flags &= ~RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT;
138 }
139
140 static int
141 check_dev_cap(const struct rte_bbdev_info *dev_info)
142 {
143         unsigned int i;
144         unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs;
145         const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities;
146
147         nb_inputs = test_vector.entries[DATA_INPUT].nb_segments;
148         nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments;
149         nb_hard_outputs = test_vector.entries[DATA_HARD_OUTPUT].nb_segments;
150
151         for (i = 0; op_cap->type != RTE_BBDEV_OP_NONE; ++i, ++op_cap) {
152                 if (op_cap->type != test_vector.op_type)
153                         continue;
154
155                 if (op_cap->type == RTE_BBDEV_OP_TURBO_DEC) {
156                         const struct rte_bbdev_op_cap_turbo_dec *cap =
157                                         &op_cap->cap.turbo_dec;
158                         /* Ignore lack of soft output capability, just skip
159                          * checking if soft output is valid.
160                          */
161                         if ((test_vector.turbo_dec.op_flags &
162                                         RTE_BBDEV_TURBO_SOFT_OUTPUT) &&
163                                         !(cap->capability_flags &
164                                         RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
165                                 printf(
166                                         "WARNING: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
167                                         dev_info->dev_name);
168                                 clear_soft_out_cap(
169                                         &test_vector.turbo_dec.op_flags);
170                         }
171
172                         if (!flags_match(test_vector.turbo_dec.op_flags,
173                                         cap->capability_flags))
174                                 return TEST_FAILED;
175                         if (nb_inputs > cap->num_buffers_src) {
176                                 printf("Too many inputs defined: %u, max: %u\n",
177                                         nb_inputs, cap->num_buffers_src);
178                                 return TEST_FAILED;
179                         }
180                         if (nb_soft_outputs > cap->num_buffers_soft_out &&
181                                         (test_vector.turbo_dec.op_flags &
182                                         RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
183                                 printf(
184                                         "Too many soft outputs defined: %u, max: %u\n",
185                                                 nb_soft_outputs,
186                                                 cap->num_buffers_soft_out);
187                                 return TEST_FAILED;
188                         }
189                         if (nb_hard_outputs > cap->num_buffers_hard_out) {
190                                 printf(
191                                         "Too many hard outputs defined: %u, max: %u\n",
192                                                 nb_hard_outputs,
193                                                 cap->num_buffers_hard_out);
194                                 return TEST_FAILED;
195                         }
196                         if (intr_enabled && !(cap->capability_flags &
197                                         RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
198                                 printf(
199                                         "Dequeue interrupts are not supported!\n");
200                                 return TEST_FAILED;
201                         }
202
203                         return TEST_SUCCESS;
204                 } else if (op_cap->type == RTE_BBDEV_OP_TURBO_ENC) {
205                         const struct rte_bbdev_op_cap_turbo_enc *cap =
206                                         &op_cap->cap.turbo_enc;
207
208                         if (!flags_match(test_vector.turbo_enc.op_flags,
209                                         cap->capability_flags))
210                                 return TEST_FAILED;
211                         if (nb_inputs > cap->num_buffers_src) {
212                                 printf("Too many inputs defined: %u, max: %u\n",
213                                         nb_inputs, cap->num_buffers_src);
214                                 return TEST_FAILED;
215                         }
216                         if (nb_hard_outputs > cap->num_buffers_dst) {
217                                 printf(
218                                         "Too many hard outputs defined: %u, max: %u\n",
219                                         nb_hard_outputs, cap->num_buffers_src);
220                                 return TEST_FAILED;
221                         }
222                         if (intr_enabled && !(cap->capability_flags &
223                                         RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
224                                 printf(
225                                         "Dequeue interrupts are not supported!\n");
226                                 return TEST_FAILED;
227                         }
228
229                         return TEST_SUCCESS;
230                 }
231         }
232
233         if ((i == 0) && (test_vector.op_type == RTE_BBDEV_OP_NONE))
234                 return TEST_SUCCESS; /* Special case for NULL device */
235
236         return TEST_FAILED;
237 }
238
239 /* calculates optimal mempool size not smaller than the val */
240 static unsigned int
241 optimal_mempool_size(unsigned int val)
242 {
243         return rte_align32pow2(val + 1) - 1;
244 }
245
246 /* allocates mbuf mempool for inputs and outputs */
247 static struct rte_mempool *
248 create_mbuf_pool(struct op_data_entries *entries, uint8_t dev_id,
249                 int socket_id, unsigned int mbuf_pool_size,
250                 const char *op_type_str)
251 {
252         unsigned int i;
253         uint32_t max_seg_sz = 0;
254         char pool_name[RTE_MEMPOOL_NAMESIZE];
255
256         /* find max input segment size */
257         for (i = 0; i < entries->nb_segments; ++i)
258                 if (entries->segments[i].length > max_seg_sz)
259                         max_seg_sz = entries->segments[i].length;
260
261         snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
262                         dev_id);
263         return rte_pktmbuf_pool_create(pool_name, mbuf_pool_size, 0, 0,
264                         RTE_MAX(max_seg_sz + RTE_PKTMBUF_HEADROOM,
265                         (unsigned int)RTE_MBUF_DEFAULT_BUF_SIZE), socket_id);
266 }
267
268 static int
269 create_mempools(struct active_device *ad, int socket_id,
270                 enum rte_bbdev_op_type op_type, uint16_t num_ops)
271 {
272         struct rte_mempool *mp;
273         unsigned int ops_pool_size, mbuf_pool_size = 0;
274         char pool_name[RTE_MEMPOOL_NAMESIZE];
275         const char *op_type_str;
276
277         struct op_data_entries *in = &test_vector.entries[DATA_INPUT];
278         struct op_data_entries *hard_out =
279                         &test_vector.entries[DATA_HARD_OUTPUT];
280         struct op_data_entries *soft_out =
281                         &test_vector.entries[DATA_SOFT_OUTPUT];
282
283         /* allocate ops mempool */
284         ops_pool_size = optimal_mempool_size(RTE_MAX(
285                         /* Ops used plus 1 reference op */
286                         RTE_MAX((unsigned int)(ad->nb_queues * num_ops + 1),
287                         /* Minimal cache size plus 1 reference op */
288                         (unsigned int)(1.5 * rte_lcore_count() *
289                                         OPS_CACHE_SIZE + 1)),
290                         OPS_POOL_SIZE_MIN));
291
292         op_type_str = rte_bbdev_op_type_str(op_type);
293         TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
294
295         snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
296                         ad->dev_id);
297         mp = rte_bbdev_op_pool_create(pool_name, op_type,
298                         ops_pool_size, OPS_CACHE_SIZE, socket_id);
299         TEST_ASSERT_NOT_NULL(mp,
300                         "ERROR Failed to create %u items ops pool for dev %u on socket %u.",
301                         ops_pool_size,
302                         ad->dev_id,
303                         socket_id);
304         ad->ops_mempool = mp;
305
306         /* Inputs */
307         mbuf_pool_size = optimal_mempool_size(ops_pool_size * in->nb_segments);
308         mp = create_mbuf_pool(in, ad->dev_id, socket_id, mbuf_pool_size, "in");
309         TEST_ASSERT_NOT_NULL(mp,
310                         "ERROR Failed to create %u items input pktmbuf pool for dev %u on socket %u.",
311                         mbuf_pool_size,
312                         ad->dev_id,
313                         socket_id);
314         ad->in_mbuf_pool = mp;
315
316         /* Hard outputs */
317         mbuf_pool_size = optimal_mempool_size(ops_pool_size *
318                         hard_out->nb_segments);
319         mp = create_mbuf_pool(hard_out, ad->dev_id, socket_id, mbuf_pool_size,
320                         "hard_out");
321         TEST_ASSERT_NOT_NULL(mp,
322                         "ERROR Failed to create %u items hard output pktmbuf pool for dev %u on socket %u.",
323                         mbuf_pool_size,
324                         ad->dev_id,
325                         socket_id);
326         ad->hard_out_mbuf_pool = mp;
327
328         if (soft_out->nb_segments == 0)
329                 return TEST_SUCCESS;
330
331         /* Soft outputs */
332         mbuf_pool_size = optimal_mempool_size(ops_pool_size *
333                         soft_out->nb_segments);
334         mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id, mbuf_pool_size,
335                         "soft_out");
336         TEST_ASSERT_NOT_NULL(mp,
337                         "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
338                         mbuf_pool_size,
339                         ad->dev_id,
340                         socket_id);
341         ad->soft_out_mbuf_pool = mp;
342
343         return 0;
344 }
345
346 static int
347 add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
348                 struct test_bbdev_vector *vector)
349 {
350         int ret;
351         unsigned int queue_id;
352         struct rte_bbdev_queue_conf qconf;
353         struct active_device *ad = &active_devs[nb_active_devs];
354         unsigned int nb_queues;
355         enum rte_bbdev_op_type op_type = vector->op_type;
356
357         nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues);
358         /* setup device */
359         ret = rte_bbdev_setup_queues(dev_id, nb_queues, info->socket_id);
360         if (ret < 0) {
361                 printf("rte_bbdev_setup_queues(%u, %u, %d) ret %i\n",
362                                 dev_id, nb_queues, info->socket_id, ret);
363                 return TEST_FAILED;
364         }
365
366         /* configure interrupts if needed */
367         if (intr_enabled) {
368                 ret = rte_bbdev_intr_enable(dev_id);
369                 if (ret < 0) {
370                         printf("rte_bbdev_intr_enable(%u) ret %i\n", dev_id,
371                                         ret);
372                         return TEST_FAILED;
373                 }
374         }
375
376         /* setup device queues */
377         qconf.socket = info->socket_id;
378         qconf.queue_size = info->drv.default_queue_conf.queue_size;
379         qconf.priority = 0;
380         qconf.deferred_start = 0;
381         qconf.op_type = op_type;
382
383         for (queue_id = 0; queue_id < nb_queues; ++queue_id) {
384                 ret = rte_bbdev_queue_configure(dev_id, queue_id, &qconf);
385                 if (ret != 0) {
386                         printf(
387                                         "Allocated all queues (id=%u) at prio%u on dev%u\n",
388                                         queue_id, qconf.priority, dev_id);
389                         qconf.priority++;
390                         ret = rte_bbdev_queue_configure(ad->dev_id, queue_id,
391                                         &qconf);
392                 }
393                 if (ret != 0) {
394                         printf("All queues on dev %u allocated: %u\n",
395                                         dev_id, queue_id);
396                         break;
397                 }
398                 ad->queue_ids[queue_id] = queue_id;
399         }
400         TEST_ASSERT(queue_id != 0,
401                         "ERROR Failed to configure any queues on dev %u",
402                         dev_id);
403         ad->nb_queues = queue_id;
404
405         set_avail_op(ad, op_type);
406
407         return TEST_SUCCESS;
408 }
409
410 static int
411 add_active_device(uint8_t dev_id, struct rte_bbdev_info *info,
412                 struct test_bbdev_vector *vector)
413 {
414         int ret;
415
416         active_devs[nb_active_devs].driver_name = info->drv.driver_name;
417         active_devs[nb_active_devs].dev_id = dev_id;
418
419         ret = add_bbdev_dev(dev_id, info, vector);
420         if (ret == TEST_SUCCESS)
421                 ++nb_active_devs;
422         return ret;
423 }
424
425 static uint8_t
426 populate_active_devices(void)
427 {
428         int ret;
429         uint8_t dev_id;
430         uint8_t nb_devs_added = 0;
431         struct rte_bbdev_info info;
432
433         RTE_BBDEV_FOREACH(dev_id) {
434                 rte_bbdev_info_get(dev_id, &info);
435
436                 if (check_dev_cap(&info)) {
437                         printf(
438                                 "Device %d (%s) does not support specified capabilities\n",
439                                         dev_id, info.dev_name);
440                         continue;
441                 }
442
443                 ret = add_active_device(dev_id, &info, &test_vector);
444                 if (ret != 0) {
445                         printf("Adding active bbdev %s skipped\n",
446                                         info.dev_name);
447                         continue;
448                 }
449                 nb_devs_added++;
450         }
451
452         return nb_devs_added;
453 }
454
455 static int
456 read_test_vector(void)
457 {
458         int ret;
459
460         memset(&test_vector, 0, sizeof(test_vector));
461         printf("Test vector file = %s\n", get_vector_filename());
462         ret = test_bbdev_vector_read(get_vector_filename(), &test_vector);
463         TEST_ASSERT_SUCCESS(ret, "Failed to parse file %s\n",
464                         get_vector_filename());
465
466         return TEST_SUCCESS;
467 }
468
469 static int
470 testsuite_setup(void)
471 {
472         TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
473
474         if (populate_active_devices() == 0) {
475                 printf("No suitable devices found!\n");
476                 return TEST_SKIPPED;
477         }
478
479         return TEST_SUCCESS;
480 }
481
482 static int
483 interrupt_testsuite_setup(void)
484 {
485         TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
486
487         /* Enable interrupts */
488         intr_enabled = true;
489
490         /* Special case for NULL device (RTE_BBDEV_OP_NONE) */
491         if (populate_active_devices() == 0 ||
492                         test_vector.op_type == RTE_BBDEV_OP_NONE) {
493                 intr_enabled = false;
494                 printf("No suitable devices found!\n");
495                 return TEST_SKIPPED;
496         }
497
498         return TEST_SUCCESS;
499 }
500
501 static void
502 testsuite_teardown(void)
503 {
504         uint8_t dev_id;
505
506         /* Unconfigure devices */
507         RTE_BBDEV_FOREACH(dev_id)
508                 rte_bbdev_close(dev_id);
509
510         /* Clear active devices structs. */
511         memset(active_devs, 0, sizeof(active_devs));
512         nb_active_devs = 0;
513 }
514
515 static int
516 ut_setup(void)
517 {
518         uint8_t i, dev_id;
519
520         for (i = 0; i < nb_active_devs; i++) {
521                 dev_id = active_devs[i].dev_id;
522                 /* reset bbdev stats */
523                 TEST_ASSERT_SUCCESS(rte_bbdev_stats_reset(dev_id),
524                                 "Failed to reset stats of bbdev %u", dev_id);
525                 /* start the device */
526                 TEST_ASSERT_SUCCESS(rte_bbdev_start(dev_id),
527                                 "Failed to start bbdev %u", dev_id);
528         }
529
530         return TEST_SUCCESS;
531 }
532
533 static void
534 ut_teardown(void)
535 {
536         uint8_t i, dev_id;
537         struct rte_bbdev_stats stats;
538
539         for (i = 0; i < nb_active_devs; i++) {
540                 dev_id = active_devs[i].dev_id;
541                 /* read stats and print */
542                 rte_bbdev_stats_get(dev_id, &stats);
543                 /* Stop the device */
544                 rte_bbdev_stop(dev_id);
545         }
546 }
547
548 static int
549 init_op_data_objs(struct rte_bbdev_op_data *bufs,
550                 struct op_data_entries *ref_entries,
551                 struct rte_mempool *mbuf_pool, const uint16_t n,
552                 enum op_data_type op_type, uint16_t min_alignment)
553 {
554         int ret;
555         unsigned int i, j;
556
557         for (i = 0; i < n; ++i) {
558                 char *data;
559                 struct op_data_buf *seg = &ref_entries->segments[0];
560                 struct rte_mbuf *m_head = rte_pktmbuf_alloc(mbuf_pool);
561                 TEST_ASSERT_NOT_NULL(m_head,
562                                 "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
563                                 op_type, n * ref_entries->nb_segments,
564                                 mbuf_pool->size);
565
566                 bufs[i].data = m_head;
567                 bufs[i].offset = 0;
568                 bufs[i].length = 0;
569
570                 if (op_type == DATA_INPUT) {
571                         data = rte_pktmbuf_append(m_head, seg->length);
572                         TEST_ASSERT_NOT_NULL(data,
573                                         "Couldn't append %u bytes to mbuf from %d data type mbuf pool",
574                                         seg->length, op_type);
575
576                         TEST_ASSERT(data == RTE_PTR_ALIGN(data, min_alignment),
577                                         "Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
578                                         data, min_alignment);
579                         rte_memcpy(data, seg->addr, seg->length);
580                         bufs[i].length += seg->length;
581
582
583                         for (j = 1; j < ref_entries->nb_segments; ++j) {
584                                 struct rte_mbuf *m_tail =
585                                                 rte_pktmbuf_alloc(mbuf_pool);
586                                 TEST_ASSERT_NOT_NULL(m_tail,
587                                                 "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
588                                                 op_type,
589                                                 n * ref_entries->nb_segments,
590                                                 mbuf_pool->size);
591                                 seg += 1;
592
593                                 data = rte_pktmbuf_append(m_tail, seg->length);
594                                 TEST_ASSERT_NOT_NULL(data,
595                                                 "Couldn't append %u bytes to mbuf from %d data type mbuf pool",
596                                                 seg->length, op_type);
597
598                                 TEST_ASSERT(data == RTE_PTR_ALIGN(data,
599                                                 min_alignment),
600                                                 "Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
601                                                 data, min_alignment);
602                                 rte_memcpy(data, seg->addr, seg->length);
603                                 bufs[i].length += seg->length;
604
605                                 ret = rte_pktmbuf_chain(m_head, m_tail);
606                                 TEST_ASSERT_SUCCESS(ret,
607                                                 "Couldn't chain mbufs from %d data type mbuf pool",
608                                                 op_type);
609                         }
610                 }
611         }
612
613         return 0;
614 }
615
616 static int
617 allocate_buffers_on_socket(struct rte_bbdev_op_data **buffers, const int len,
618                 const int socket)
619 {
620         int i;
621
622         *buffers = rte_zmalloc_socket(NULL, len, 0, socket);
623         if (*buffers == NULL) {
624                 printf("WARNING: Failed to allocate op_data on socket %d\n",
625                                 socket);
626                 /* try to allocate memory on other detected sockets */
627                 for (i = 0; i < socket; i++) {
628                         *buffers = rte_zmalloc_socket(NULL, len, 0, i);
629                         if (*buffers != NULL)
630                                 break;
631                 }
632         }
633
634         return (*buffers == NULL) ? TEST_FAILED : TEST_SUCCESS;
635 }
636
637 static void
638 limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
639                 uint16_t n, int8_t max_llr_modulus)
640 {
641         uint16_t i, byte_idx;
642
643         for (i = 0; i < n; ++i) {
644                 struct rte_mbuf *m = input_ops[i].data;
645                 while (m != NULL) {
646                         int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
647                                         input_ops[i].offset);
648                         for (byte_idx = 0; byte_idx < input_ops[i].length;
649                                         ++byte_idx)
650                                 llr[byte_idx] = round((double)max_llr_modulus *
651                                                 llr[byte_idx] / INT8_MAX);
652
653                         m = m->next;
654                 }
655         }
656 }
657
658 static int
659 fill_queue_buffers(struct test_op_params *op_params,
660                 struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp,
661                 struct rte_mempool *soft_out_mp, uint16_t queue_id,
662                 const struct rte_bbdev_op_cap *capabilities,
663                 uint16_t min_alignment, const int socket_id)
664 {
665         int ret;
666         enum op_data_type type;
667         const uint16_t n = op_params->num_to_process;
668
669         struct rte_mempool *mbuf_pools[DATA_NUM_TYPES] = {
670                 in_mp,
671                 soft_out_mp,
672                 hard_out_mp,
673         };
674
675         struct rte_bbdev_op_data **queue_ops[DATA_NUM_TYPES] = {
676                 &op_params->q_bufs[socket_id][queue_id].inputs,
677                 &op_params->q_bufs[socket_id][queue_id].soft_outputs,
678                 &op_params->q_bufs[socket_id][queue_id].hard_outputs,
679         };
680
681         for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
682                 struct op_data_entries *ref_entries =
683                                 &test_vector.entries[type];
684                 if (ref_entries->nb_segments == 0)
685                         continue;
686
687                 ret = allocate_buffers_on_socket(queue_ops[type],
688                                 n * sizeof(struct rte_bbdev_op_data),
689                                 socket_id);
690                 TEST_ASSERT_SUCCESS(ret,
691                                 "Couldn't allocate memory for rte_bbdev_op_data structs");
692
693                 ret = init_op_data_objs(*queue_ops[type], ref_entries,
694                                 mbuf_pools[type], n, type, min_alignment);
695                 TEST_ASSERT_SUCCESS(ret,
696                                 "Couldn't init rte_bbdev_op_data structs");
697         }
698
699         if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
700                 limit_input_llr_val_range(*queue_ops[DATA_INPUT], n,
701                         capabilities->cap.turbo_dec.max_llr_modulus);
702
703         return 0;
704 }
705
706 static void
707 free_buffers(struct active_device *ad, struct test_op_params *op_params)
708 {
709         unsigned int i, j;
710
711         rte_mempool_free(ad->ops_mempool);
712         rte_mempool_free(ad->in_mbuf_pool);
713         rte_mempool_free(ad->hard_out_mbuf_pool);
714         rte_mempool_free(ad->soft_out_mbuf_pool);
715
716         for (i = 0; i < rte_lcore_count(); ++i) {
717                 for (j = 0; j < RTE_MAX_NUMA_NODES; ++j) {
718                         rte_free(op_params->q_bufs[j][i].inputs);
719                         rte_free(op_params->q_bufs[j][i].hard_outputs);
720                         rte_free(op_params->q_bufs[j][i].soft_outputs);
721                 }
722         }
723 }
724
725 static void
726 copy_reference_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
727                 unsigned int start_idx,
728                 struct rte_bbdev_op_data *inputs,
729                 struct rte_bbdev_op_data *hard_outputs,
730                 struct rte_bbdev_op_data *soft_outputs,
731                 struct rte_bbdev_dec_op *ref_op)
732 {
733         unsigned int i;
734         struct rte_bbdev_op_turbo_dec *turbo_dec = &ref_op->turbo_dec;
735
736         for (i = 0; i < n; ++i) {
737                 if (turbo_dec->code_block_mode == 0) {
738                         ops[i]->turbo_dec.tb_params.ea =
739                                         turbo_dec->tb_params.ea;
740                         ops[i]->turbo_dec.tb_params.eb =
741                                         turbo_dec->tb_params.eb;
742                         ops[i]->turbo_dec.tb_params.k_pos =
743                                         turbo_dec->tb_params.k_pos;
744                         ops[i]->turbo_dec.tb_params.k_neg =
745                                         turbo_dec->tb_params.k_neg;
746                         ops[i]->turbo_dec.tb_params.c =
747                                         turbo_dec->tb_params.c;
748                         ops[i]->turbo_dec.tb_params.c_neg =
749                                         turbo_dec->tb_params.c_neg;
750                         ops[i]->turbo_dec.tb_params.cab =
751                                         turbo_dec->tb_params.cab;
752                 } else {
753                         ops[i]->turbo_dec.cb_params.e = turbo_dec->cb_params.e;
754                         ops[i]->turbo_dec.cb_params.k = turbo_dec->cb_params.k;
755                 }
756
757                 ops[i]->turbo_dec.ext_scale = turbo_dec->ext_scale;
758                 ops[i]->turbo_dec.iter_max = turbo_dec->iter_max;
759                 ops[i]->turbo_dec.iter_min = turbo_dec->iter_min;
760                 ops[i]->turbo_dec.op_flags = turbo_dec->op_flags;
761                 ops[i]->turbo_dec.rv_index = turbo_dec->rv_index;
762                 ops[i]->turbo_dec.num_maps = turbo_dec->num_maps;
763                 ops[i]->turbo_dec.code_block_mode = turbo_dec->code_block_mode;
764
765                 ops[i]->turbo_dec.hard_output = hard_outputs[start_idx + i];
766                 ops[i]->turbo_dec.input = inputs[start_idx + i];
767                 if (soft_outputs != NULL)
768                         ops[i]->turbo_dec.soft_output =
769                                 soft_outputs[start_idx + i];
770         }
771 }
772
773 static void
774 copy_reference_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
775                 unsigned int start_idx,
776                 struct rte_bbdev_op_data *inputs,
777                 struct rte_bbdev_op_data *outputs,
778                 struct rte_bbdev_enc_op *ref_op)
779 {
780         unsigned int i;
781         struct rte_bbdev_op_turbo_enc *turbo_enc = &ref_op->turbo_enc;
782         for (i = 0; i < n; ++i) {
783                 if (turbo_enc->code_block_mode == 0) {
784                         ops[i]->turbo_enc.tb_params.ea =
785                                         turbo_enc->tb_params.ea;
786                         ops[i]->turbo_enc.tb_params.eb =
787                                         turbo_enc->tb_params.eb;
788                         ops[i]->turbo_enc.tb_params.k_pos =
789                                         turbo_enc->tb_params.k_pos;
790                         ops[i]->turbo_enc.tb_params.k_neg =
791                                         turbo_enc->tb_params.k_neg;
792                         ops[i]->turbo_enc.tb_params.c =
793                                         turbo_enc->tb_params.c;
794                         ops[i]->turbo_enc.tb_params.c_neg =
795                                         turbo_enc->tb_params.c_neg;
796                         ops[i]->turbo_enc.tb_params.cab =
797                                         turbo_enc->tb_params.cab;
798                         ops[i]->turbo_enc.tb_params.ncb_pos =
799                                         turbo_enc->tb_params.ncb_pos;
800                         ops[i]->turbo_enc.tb_params.ncb_neg =
801                                         turbo_enc->tb_params.ncb_neg;
802                         ops[i]->turbo_enc.tb_params.r = turbo_enc->tb_params.r;
803                 } else {
804                         ops[i]->turbo_enc.cb_params.e = turbo_enc->cb_params.e;
805                         ops[i]->turbo_enc.cb_params.k = turbo_enc->cb_params.k;
806                         ops[i]->turbo_enc.cb_params.ncb =
807                                         turbo_enc->cb_params.ncb;
808                 }
809                 ops[i]->turbo_enc.rv_index = turbo_enc->rv_index;
810                 ops[i]->turbo_enc.op_flags = turbo_enc->op_flags;
811                 ops[i]->turbo_enc.code_block_mode = turbo_enc->code_block_mode;
812
813                 ops[i]->turbo_enc.output = outputs[start_idx + i];
814                 ops[i]->turbo_enc.input = inputs[start_idx + i];
815         }
816 }
817
818 static int
819 check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
820                 unsigned int order_idx, const int expected_status)
821 {
822         TEST_ASSERT(op->status == expected_status,
823                         "op_status (%d) != expected_status (%d)",
824                         op->status, expected_status);
825
826         TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
827                         "Ordering error, expected %p, got %p",
828                         (void *)(uintptr_t)order_idx, op->opaque_data);
829
830         return TEST_SUCCESS;
831 }
832
833 static int
834 check_enc_status_and_ordering(struct rte_bbdev_enc_op *op,
835                 unsigned int order_idx, const int expected_status)
836 {
837         TEST_ASSERT(op->status == expected_status,
838                         "op_status (%d) != expected_status (%d)",
839                         op->status, expected_status);
840
841         TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
842                         "Ordering error, expected %p, got %p",
843                         (void *)(uintptr_t)order_idx, op->opaque_data);
844
845         return TEST_SUCCESS;
846 }
847
848 static inline int
849 validate_op_chain(struct rte_bbdev_op_data *op,
850                 struct op_data_entries *orig_op)
851 {
852         uint8_t i;
853         struct rte_mbuf *m = op->data;
854         uint8_t nb_dst_segments = orig_op->nb_segments;
855
856         TEST_ASSERT(nb_dst_segments == m->nb_segs,
857                         "Number of segments differ in original (%u) and filled (%u) op",
858                         nb_dst_segments, m->nb_segs);
859
860         for (i = 0; i < nb_dst_segments; ++i) {
861                 /* Apply offset to the first mbuf segment */
862                 uint16_t offset = (i == 0) ? op->offset : 0;
863                 uint16_t data_len = m->data_len - offset;
864
865                 TEST_ASSERT(orig_op->segments[i].length == data_len,
866                                 "Length of segment differ in original (%u) and filled (%u) op",
867                                 orig_op->segments[i].length, data_len);
868                 TEST_ASSERT_BUFFERS_ARE_EQUAL(orig_op->segments[i].addr,
869                                 rte_pktmbuf_mtod_offset(m, uint32_t *, offset),
870                                 data_len,
871                                 "Output buffers (CB=%u) are not equal", i);
872                 m = m->next;
873         }
874
875         return TEST_SUCCESS;
876 }
877
878 static int
879 validate_dec_buffers(struct rte_bbdev_dec_op *ref_op, struct test_buffers *bufs,
880                 const uint16_t num_to_process)
881 {
882         int i;
883
884         struct op_data_entries *hard_data_orig =
885                         &test_vector.entries[DATA_HARD_OUTPUT];
886         struct op_data_entries *soft_data_orig =
887                         &test_vector.entries[DATA_SOFT_OUTPUT];
888
889         for (i = 0; i < num_to_process; i++) {
890                 TEST_ASSERT_SUCCESS(validate_op_chain(&bufs->hard_outputs[i],
891                                 hard_data_orig),
892                                 "Hard output buffers are not equal");
893                 if (ref_op->turbo_dec.op_flags &
894                                 RTE_BBDEV_TURBO_SOFT_OUTPUT)
895                         TEST_ASSERT_SUCCESS(validate_op_chain(
896                                         &bufs->soft_outputs[i],
897                                         soft_data_orig),
898                                         "Soft output buffers are not equal");
899         }
900
901         return TEST_SUCCESS;
902 }
903
904 static int
905 validate_enc_buffers(struct test_buffers *bufs, const uint16_t num_to_process)
906 {
907         int i;
908
909         struct op_data_entries *hard_data_orig =
910                         &test_vector.entries[DATA_HARD_OUTPUT];
911
912         for (i = 0; i < num_to_process; i++)
913                 TEST_ASSERT_SUCCESS(validate_op_chain(&bufs->hard_outputs[i],
914                                 hard_data_orig), "");
915
916         return TEST_SUCCESS;
917 }
918
919 static int
920 validate_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
921                 struct rte_bbdev_dec_op *ref_op, const int vector_mask)
922 {
923         unsigned int i;
924         int ret;
925         struct op_data_entries *hard_data_orig =
926                         &test_vector.entries[DATA_HARD_OUTPUT];
927         struct op_data_entries *soft_data_orig =
928                         &test_vector.entries[DATA_SOFT_OUTPUT];
929         struct rte_bbdev_op_turbo_dec *ops_td;
930         struct rte_bbdev_op_data *hard_output;
931         struct rte_bbdev_op_data *soft_output;
932         struct rte_bbdev_op_turbo_dec *ref_td = &ref_op->turbo_dec;
933
934         for (i = 0; i < n; ++i) {
935                 ops_td = &ops[i]->turbo_dec;
936                 hard_output = &ops_td->hard_output;
937                 soft_output = &ops_td->soft_output;
938
939                 if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
940                         TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
941                                         "Returned iter_count (%d) > expected iter_count (%d)",
942                                         ops_td->iter_count, ref_td->iter_count);
943                 ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
944                 TEST_ASSERT_SUCCESS(ret,
945                                 "Checking status and ordering for decoder failed");
946
947                 TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
948                                 hard_data_orig),
949                                 "Hard output buffers (CB=%u) are not equal",
950                                 i);
951
952                 if (ref_op->turbo_dec.op_flags & RTE_BBDEV_TURBO_SOFT_OUTPUT)
953                         TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
954                                         soft_data_orig),
955                                         "Soft output buffers (CB=%u) are not equal",
956                                         i);
957         }
958
959         return TEST_SUCCESS;
960 }
961
962 static int
963 validate_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
964                 struct rte_bbdev_enc_op *ref_op)
965 {
966         unsigned int i;
967         int ret;
968         struct op_data_entries *hard_data_orig =
969                         &test_vector.entries[DATA_HARD_OUTPUT];
970
971         for (i = 0; i < n; ++i) {
972                 ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
973                 TEST_ASSERT_SUCCESS(ret,
974                                 "Checking status and ordering for encoder failed");
975                 TEST_ASSERT_SUCCESS(validate_op_chain(
976                                 &ops[i]->turbo_enc.output,
977                                 hard_data_orig),
978                                 "Output buffers (CB=%u) are not equal",
979                                 i);
980         }
981
982         return TEST_SUCCESS;
983 }
984
985 static void
986 create_reference_dec_op(struct rte_bbdev_dec_op *op)
987 {
988         unsigned int i;
989         struct op_data_entries *entry;
990
991         op->turbo_dec = test_vector.turbo_dec;
992         entry = &test_vector.entries[DATA_INPUT];
993         for (i = 0; i < entry->nb_segments; ++i)
994                 op->turbo_dec.input.length +=
995                                 entry->segments[i].length;
996 }
997
998 static void
999 create_reference_enc_op(struct rte_bbdev_enc_op *op)
1000 {
1001         unsigned int i;
1002         struct op_data_entries *entry;
1003
1004         op->turbo_enc = test_vector.turbo_enc;
1005         entry = &test_vector.entries[DATA_INPUT];
1006         for (i = 0; i < entry->nb_segments; ++i)
1007                 op->turbo_enc.input.length +=
1008                                 entry->segments[i].length;
1009 }
1010
1011 static int
1012 init_test_op_params(struct test_op_params *op_params,
1013                 enum rte_bbdev_op_type op_type, const int expected_status,
1014                 const int vector_mask, struct rte_mempool *ops_mp,
1015                 uint16_t burst_sz, uint16_t num_to_process, uint16_t num_lcores)
1016 {
1017         int ret = 0;
1018         if (op_type == RTE_BBDEV_OP_TURBO_DEC)
1019                 ret = rte_bbdev_dec_op_alloc_bulk(ops_mp,
1020                                 &op_params->ref_dec_op, 1);
1021         else
1022                 ret = rte_bbdev_enc_op_alloc_bulk(ops_mp,
1023                                 &op_params->ref_enc_op, 1);
1024
1025         TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
1026
1027         op_params->mp = ops_mp;
1028         op_params->burst_sz = burst_sz;
1029         op_params->num_to_process = num_to_process;
1030         op_params->num_lcores = num_lcores;
1031         op_params->vector_mask = vector_mask;
1032         if (op_type == RTE_BBDEV_OP_TURBO_DEC)
1033                 op_params->ref_dec_op->status = expected_status;
1034         else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
1035                 op_params->ref_enc_op->status = expected_status;
1036
1037         return 0;
1038 }
1039
1040 static int
1041 run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
1042                 struct test_op_params *op_params)
1043 {
1044         int t_ret, f_ret, socket_id = SOCKET_ID_ANY;
1045         unsigned int i;
1046         struct active_device *ad;
1047         unsigned int burst_sz = get_burst_sz();
1048         enum rte_bbdev_op_type op_type = test_vector.op_type;
1049         const struct rte_bbdev_op_cap *capabilities = NULL;
1050
1051         ad = &active_devs[dev_id];
1052
1053         /* Check if device supports op_type */
1054         if (!is_avail_op(ad, test_vector.op_type))
1055                 return TEST_SUCCESS;
1056
1057         struct rte_bbdev_info info;
1058         rte_bbdev_info_get(ad->dev_id, &info);
1059         socket_id = GET_SOCKET(info.socket_id);
1060
1061         if (op_type == RTE_BBDEV_OP_NONE)
1062                 op_type = RTE_BBDEV_OP_TURBO_ENC;
1063         f_ret = create_mempools(ad, socket_id, op_type,
1064                         get_num_ops());
1065         if (f_ret != TEST_SUCCESS) {
1066                 printf("Couldn't create mempools");
1067                 goto fail;
1068         }
1069
1070         f_ret = init_test_op_params(op_params, test_vector.op_type,
1071                         test_vector.expected_status,
1072                         test_vector.mask,
1073                         ad->ops_mempool,
1074                         burst_sz,
1075                         get_num_ops(),
1076                         get_num_lcores());
1077         if (f_ret != TEST_SUCCESS) {
1078                 printf("Couldn't init test op params");
1079                 goto fail;
1080         }
1081
1082         if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
1083                 /* Find Decoder capabilities */
1084                 const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
1085                 while (cap->type != RTE_BBDEV_OP_NONE) {
1086                         if (cap->type == RTE_BBDEV_OP_TURBO_DEC) {
1087                                 capabilities = cap;
1088                                 break;
1089                         }
1090                 }
1091                 TEST_ASSERT_NOT_NULL(capabilities,
1092                                 "Couldn't find Decoder capabilities");
1093
1094                 create_reference_dec_op(op_params->ref_dec_op);
1095         } else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
1096                 create_reference_enc_op(op_params->ref_enc_op);
1097
1098         for (i = 0; i < ad->nb_queues; ++i) {
1099                 f_ret = fill_queue_buffers(op_params,
1100                                 ad->in_mbuf_pool,
1101                                 ad->hard_out_mbuf_pool,
1102                                 ad->soft_out_mbuf_pool,
1103                                 ad->queue_ids[i],
1104                                 capabilities,
1105                                 info.drv.min_alignment,
1106                                 socket_id);
1107                 if (f_ret != TEST_SUCCESS) {
1108                         printf("Couldn't init queue buffers");
1109                         goto fail;
1110                 }
1111         }
1112
1113         /* Run test case function */
1114         t_ret = test_case_func(ad, op_params);
1115
1116         /* Free active device resources and return */
1117         free_buffers(ad, op_params);
1118         return t_ret;
1119
1120 fail:
1121         free_buffers(ad, op_params);
1122         return TEST_FAILED;
1123 }
1124
1125 /* Run given test function per active device per supported op type
1126  * per burst size.
1127  */
1128 static int
1129 run_test_case(test_case_function *test_case_func)
1130 {
1131         int ret = 0;
1132         uint8_t dev;
1133
1134         /* Alloc op_params */
1135         struct test_op_params *op_params = rte_zmalloc(NULL,
1136                         sizeof(struct test_op_params), RTE_CACHE_LINE_SIZE);
1137         TEST_ASSERT_NOT_NULL(op_params, "Failed to alloc %zuB for op_params",
1138                         RTE_ALIGN(sizeof(struct test_op_params),
1139                                 RTE_CACHE_LINE_SIZE));
1140
1141         /* For each device run test case function */
1142         for (dev = 0; dev < nb_active_devs; ++dev)
1143                 ret |= run_test_case_on_device(test_case_func, dev, op_params);
1144
1145         rte_free(op_params);
1146
1147         return ret;
1148 }
1149
1150 static void
1151 dequeue_event_callback(uint16_t dev_id,
1152                 enum rte_bbdev_event_type event, void *cb_arg,
1153                 void *ret_param)
1154 {
1155         int ret;
1156         uint16_t i;
1157         uint64_t total_time;
1158         uint16_t deq, burst_sz, num_to_process;
1159         uint16_t queue_id = INVALID_QUEUE_ID;
1160         struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
1161         struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
1162         struct test_buffers *bufs;
1163         struct rte_bbdev_info info;
1164
1165         /* Input length in bytes, million operations per second,
1166          * million bits per second.
1167          */
1168         double in_len;
1169
1170         struct thread_params *tp = cb_arg;
1171         RTE_SET_USED(ret_param);
1172         queue_id = tp->queue_id;
1173
1174         /* Find matching thread params using queue_id */
1175         for (i = 0; i < MAX_QUEUES; ++i, ++tp)
1176                 if (tp->queue_id == queue_id)
1177                         break;
1178
1179         if (i == MAX_QUEUES) {
1180                 printf("%s: Queue_id from interrupt details was not found!\n",
1181                                 __func__);
1182                 return;
1183         }
1184
1185         if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
1186                 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1187                 printf(
1188                         "Dequeue interrupt handler called for incorrect event!\n");
1189                 return;
1190         }
1191
1192         burst_sz = tp->op_params->burst_sz;
1193         num_to_process = tp->op_params->num_to_process;
1194
1195         if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1196                 deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id, dec_ops,
1197                                 burst_sz);
1198         else
1199                 deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id, enc_ops,
1200                                 burst_sz);
1201
1202         if (deq < burst_sz) {
1203                 printf(
1204                         "After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
1205                         burst_sz, deq);
1206                 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1207                 return;
1208         }
1209
1210         if (rte_atomic16_read(&tp->nb_dequeued) + deq < num_to_process) {
1211                 rte_atomic16_add(&tp->nb_dequeued, deq);
1212                 return;
1213         }
1214
1215         total_time = rte_rdtsc_precise() - tp->start_time;
1216
1217         rte_bbdev_info_get(dev_id, &info);
1218
1219         bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1220
1221         ret = TEST_SUCCESS;
1222         if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1223                 ret = validate_dec_buffers(tp->op_params->ref_dec_op, bufs,
1224                                 num_to_process);
1225         else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
1226                 ret = validate_enc_buffers(bufs, num_to_process);
1227
1228         if (ret) {
1229                 printf("Buffers validation failed\n");
1230                 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1231         }
1232
1233         switch (test_vector.op_type) {
1234         case RTE_BBDEV_OP_TURBO_DEC:
1235                 in_len = tp->op_params->ref_dec_op->turbo_dec.input.length;
1236                 break;
1237         case RTE_BBDEV_OP_TURBO_ENC:
1238                 in_len = tp->op_params->ref_enc_op->turbo_enc.input.length;
1239                 break;
1240         case RTE_BBDEV_OP_NONE:
1241                 in_len = 0.0;
1242                 break;
1243         default:
1244                 printf("Unknown op type: %d\n", test_vector.op_type);
1245                 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1246                 return;
1247         }
1248
1249         tp->mops = ((double)num_to_process / 1000000.0) /
1250                         ((double)total_time / (double)rte_get_tsc_hz());
1251         tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) /
1252                         ((double)total_time / (double)rte_get_tsc_hz());
1253
1254         rte_atomic16_add(&tp->nb_dequeued, deq);
1255 }
1256
1257 static int
1258 throughput_intr_lcore_dec(void *arg)
1259 {
1260         struct thread_params *tp = arg;
1261         unsigned int enqueued;
1262         struct rte_bbdev_dec_op *ops[MAX_BURST];
1263         const uint16_t queue_id = tp->queue_id;
1264         const uint16_t burst_sz = tp->op_params->burst_sz;
1265         const uint16_t num_to_process = tp->op_params->num_to_process;
1266         struct test_buffers *bufs = NULL;
1267         unsigned int allocs_failed = 0;
1268         struct rte_bbdev_info info;
1269         int ret;
1270
1271         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1272                         "BURST_SIZE should be <= %u", MAX_BURST);
1273
1274         TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
1275                         "Failed to enable interrupts for dev: %u, queue_id: %u",
1276                         tp->dev_id, queue_id);
1277
1278         rte_bbdev_info_get(tp->dev_id, &info);
1279         bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1280
1281         rte_atomic16_clear(&tp->processing_status);
1282         rte_atomic16_clear(&tp->nb_dequeued);
1283
1284         while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1285                 rte_pause();
1286
1287         tp->start_time = rte_rdtsc_precise();
1288         for (enqueued = 0; enqueued < num_to_process;) {
1289
1290                 uint16_t num_to_enq = burst_sz;
1291
1292                 if (unlikely(num_to_process - enqueued < num_to_enq))
1293                         num_to_enq = num_to_process - enqueued;
1294
1295                 ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
1296                                 num_to_enq);
1297                 if (ret != 0) {
1298                         allocs_failed++;
1299                         continue;
1300                 }
1301
1302                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1303                         copy_reference_dec_op(ops, num_to_enq, enqueued,
1304                                         bufs->inputs,
1305                                         bufs->hard_outputs,
1306                                         bufs->soft_outputs,
1307                                         tp->op_params->ref_dec_op);
1308
1309                 enqueued += rte_bbdev_enqueue_dec_ops(tp->dev_id, queue_id, ops,
1310                                 num_to_enq);
1311
1312                 rte_bbdev_dec_op_free_bulk(ops, num_to_enq);
1313         }
1314
1315         if (allocs_failed > 0)
1316                 printf("WARNING: op allocations failed: %u times\n",
1317                                 allocs_failed);
1318
1319         return TEST_SUCCESS;
1320 }
1321
1322 static int
1323 throughput_intr_lcore_enc(void *arg)
1324 {
1325         struct thread_params *tp = arg;
1326         unsigned int enqueued;
1327         struct rte_bbdev_enc_op *ops[MAX_BURST];
1328         const uint16_t queue_id = tp->queue_id;
1329         const uint16_t burst_sz = tp->op_params->burst_sz;
1330         const uint16_t num_to_process = tp->op_params->num_to_process;
1331         struct test_buffers *bufs = NULL;
1332         unsigned int allocs_failed = 0;
1333         struct rte_bbdev_info info;
1334         int ret;
1335
1336         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1337                         "BURST_SIZE should be <= %u", MAX_BURST);
1338
1339         TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
1340                         "Failed to enable interrupts for dev: %u, queue_id: %u",
1341                         tp->dev_id, queue_id);
1342
1343         rte_bbdev_info_get(tp->dev_id, &info);
1344         bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1345
1346         rte_atomic16_clear(&tp->processing_status);
1347         rte_atomic16_clear(&tp->nb_dequeued);
1348
1349         while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1350                 rte_pause();
1351
1352         tp->start_time = rte_rdtsc_precise();
1353         for (enqueued = 0; enqueued < num_to_process;) {
1354
1355                 uint16_t num_to_enq = burst_sz;
1356
1357                 if (unlikely(num_to_process - enqueued < num_to_enq))
1358                         num_to_enq = num_to_process - enqueued;
1359
1360                 ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
1361                                 num_to_enq);
1362                 if (ret != 0) {
1363                         allocs_failed++;
1364                         continue;
1365                 }
1366
1367                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1368                         copy_reference_enc_op(ops, num_to_enq, enqueued,
1369                                         bufs->inputs,
1370                                         bufs->hard_outputs,
1371                                         tp->op_params->ref_enc_op);
1372
1373                 enqueued += rte_bbdev_enqueue_enc_ops(tp->dev_id, queue_id, ops,
1374                                 num_to_enq);
1375
1376                 rte_bbdev_enc_op_free_bulk(ops, num_to_enq);
1377         }
1378
1379         if (allocs_failed > 0)
1380                 printf("WARNING: op allocations failed: %u times\n",
1381                                 allocs_failed);
1382
1383         return TEST_SUCCESS;
1384 }
1385
1386 static int
1387 throughput_pmd_lcore_dec(void *arg)
1388 {
1389         struct thread_params *tp = arg;
1390         unsigned int enqueued, dequeued;
1391         struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1392         uint64_t total_time, start_time;
1393         const uint16_t queue_id = tp->queue_id;
1394         const uint16_t burst_sz = tp->op_params->burst_sz;
1395         const uint16_t num_to_process = tp->op_params->num_to_process;
1396         struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
1397         struct test_buffers *bufs = NULL;
1398         unsigned int allocs_failed = 0;
1399         int ret;
1400         struct rte_bbdev_info info;
1401
1402         /* Input length in bytes, million operations per second, million bits
1403          * per second.
1404          */
1405         double in_len;
1406
1407         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1408                         "BURST_SIZE should be <= %u", MAX_BURST);
1409
1410         rte_bbdev_info_get(tp->dev_id, &info);
1411         bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1412
1413         while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1414                 rte_pause();
1415
1416         start_time = rte_rdtsc_precise();
1417         for (enqueued = 0, dequeued = 0; dequeued < num_to_process;) {
1418                 uint16_t deq;
1419
1420                 if (likely(enqueued < num_to_process)) {
1421
1422                         uint16_t num_to_enq = burst_sz;
1423
1424                         if (unlikely(num_to_process - enqueued < num_to_enq))
1425                                 num_to_enq = num_to_process - enqueued;
1426
1427                         ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp,
1428                                         ops_enq, num_to_enq);
1429                         if (ret != 0) {
1430                                 allocs_failed++;
1431                                 goto do_dequeue;
1432                         }
1433
1434                         if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1435                                 copy_reference_dec_op(ops_enq, num_to_enq,
1436                                                 enqueued,
1437                                                 bufs->inputs,
1438                                                 bufs->hard_outputs,
1439                                                 bufs->soft_outputs,
1440                                                 ref_op);
1441
1442                         enqueued += rte_bbdev_enqueue_dec_ops(tp->dev_id,
1443                                         queue_id, ops_enq, num_to_enq);
1444                 }
1445 do_dequeue:
1446                 deq = rte_bbdev_dequeue_dec_ops(tp->dev_id, queue_id, ops_deq,
1447                                 burst_sz);
1448                 dequeued += deq;
1449                 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
1450         }
1451         total_time = rte_rdtsc_precise() - start_time;
1452
1453         if (allocs_failed > 0)
1454                 printf("WARNING: op allocations failed: %u times\n",
1455                                 allocs_failed);
1456
1457         TEST_ASSERT(enqueued == dequeued, "enqueued (%u) != dequeued (%u)",
1458                         enqueued, dequeued);
1459
1460         if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1461                 ret = validate_dec_buffers(ref_op, bufs, num_to_process);
1462                 TEST_ASSERT_SUCCESS(ret, "Buffers validation failed");
1463         }
1464
1465         in_len = ref_op->turbo_dec.input.length;
1466         tp->mops = ((double)num_to_process / 1000000.0) /
1467                         ((double)total_time / (double)rte_get_tsc_hz());
1468         tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) /
1469                         ((double)total_time / (double)rte_get_tsc_hz());
1470
1471         return TEST_SUCCESS;
1472 }
1473
1474 static int
1475 throughput_pmd_lcore_enc(void *arg)
1476 {
1477         struct thread_params *tp = arg;
1478         unsigned int enqueued, dequeued;
1479         struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1480         uint64_t total_time, start_time;
1481         const uint16_t queue_id = tp->queue_id;
1482         const uint16_t burst_sz = tp->op_params->burst_sz;
1483         const uint16_t num_to_process = tp->op_params->num_to_process;
1484         struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
1485         struct test_buffers *bufs = NULL;
1486         unsigned int allocs_failed = 0;
1487         int ret;
1488         struct rte_bbdev_info info;
1489
1490         /* Input length in bytes, million operations per second, million bits
1491          * per second.
1492          */
1493         double in_len;
1494
1495         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1496                         "BURST_SIZE should be <= %u", MAX_BURST);
1497
1498         rte_bbdev_info_get(tp->dev_id, &info);
1499         bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1500
1501         while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1502                 rte_pause();
1503
1504         start_time = rte_rdtsc_precise();
1505         for (enqueued = 0, dequeued = 0; dequeued < num_to_process;) {
1506                 uint16_t deq;
1507
1508                 if (likely(enqueued < num_to_process)) {
1509
1510                         uint16_t num_to_enq = burst_sz;
1511
1512                         if (unlikely(num_to_process - enqueued < num_to_enq))
1513                                 num_to_enq = num_to_process - enqueued;
1514
1515                         ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp,
1516                                         ops_enq, num_to_enq);
1517                         if (ret != 0) {
1518                                 allocs_failed++;
1519                                 goto do_dequeue;
1520                         }
1521
1522                         if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1523                                 copy_reference_enc_op(ops_enq, num_to_enq,
1524                                                 enqueued,
1525                                                 bufs->inputs,
1526                                                 bufs->hard_outputs,
1527                                                 ref_op);
1528
1529                         enqueued += rte_bbdev_enqueue_enc_ops(tp->dev_id,
1530                                         queue_id, ops_enq, num_to_enq);
1531                 }
1532 do_dequeue:
1533                 deq = rte_bbdev_dequeue_enc_ops(tp->dev_id, queue_id, ops_deq,
1534                                 burst_sz);
1535                 dequeued += deq;
1536                 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
1537         }
1538         total_time = rte_rdtsc_precise() - start_time;
1539
1540         if (allocs_failed > 0)
1541                 printf("WARNING: op allocations failed: %u times\n",
1542                                 allocs_failed);
1543
1544         TEST_ASSERT(enqueued == dequeued, "enqueued (%u) != dequeued (%u)",
1545                         enqueued, dequeued);
1546
1547         if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1548                 ret = validate_enc_buffers(bufs, num_to_process);
1549                 TEST_ASSERT_SUCCESS(ret, "Buffers validation failed");
1550         }
1551
1552         in_len = ref_op->turbo_enc.input.length;
1553
1554         tp->mops = ((double)num_to_process / 1000000.0) /
1555                         ((double)total_time / (double)rte_get_tsc_hz());
1556         tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) /
1557                         ((double)total_time / (double)rte_get_tsc_hz());
1558
1559         return TEST_SUCCESS;
1560 }
1561 static void
1562 print_throughput(struct thread_params *t_params, unsigned int used_cores)
1563 {
1564         unsigned int lcore_id, iter = 0;
1565         double total_mops = 0, total_mbps = 0;
1566
1567         RTE_LCORE_FOREACH(lcore_id) {
1568                 if (iter++ >= used_cores)
1569                         break;
1570                 printf("\tlcore_id: %u, throughput: %.8lg MOPS, %.8lg Mbps\n",
1571                 lcore_id, t_params[lcore_id].mops, t_params[lcore_id].mbps);
1572                 total_mops += t_params[lcore_id].mops;
1573                 total_mbps += t_params[lcore_id].mbps;
1574         }
1575         printf(
1576                 "\n\tTotal stats for %u cores: throughput: %.8lg MOPS, %.8lg Mbps\n",
1577                 used_cores, total_mops, total_mbps);
1578 }
1579
1580 /*
1581  * Test function that determines how long an enqueue + dequeue of a burst
1582  * takes on available lcores.
1583  */
1584 static int
1585 throughput_test(struct active_device *ad,
1586                 struct test_op_params *op_params)
1587 {
1588         int ret;
1589         unsigned int lcore_id, used_cores = 0;
1590         struct thread_params t_params[MAX_QUEUES];
1591         struct rte_bbdev_info info;
1592         lcore_function_t *throughput_function;
1593         struct thread_params *tp;
1594         uint16_t num_lcores;
1595         const char *op_type_str;
1596
1597         rte_bbdev_info_get(ad->dev_id, &info);
1598
1599         op_type_str = rte_bbdev_op_type_str(test_vector.op_type);
1600         TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
1601                         test_vector.op_type);
1602
1603         printf(
1604                 "Throughput test: dev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, int mode: %s, GHz: %lg\n",
1605                         info.dev_name, ad->nb_queues, op_params->burst_sz,
1606                         op_params->num_to_process, op_params->num_lcores,
1607                         op_type_str,
1608                         intr_enabled ? "Interrupt mode" : "PMD mode",
1609                         (double)rte_get_tsc_hz() / 1000000000.0);
1610
1611         /* Set number of lcores */
1612         num_lcores = (ad->nb_queues < (op_params->num_lcores))
1613                         ? ad->nb_queues
1614                         : op_params->num_lcores;
1615
1616         if (intr_enabled) {
1617                 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1618                         throughput_function = throughput_intr_lcore_dec;
1619                 else
1620                         throughput_function = throughput_intr_lcore_enc;
1621
1622                 /* Dequeue interrupt callback registration */
1623                 ret = rte_bbdev_callback_register(ad->dev_id,
1624                                 RTE_BBDEV_EVENT_DEQUEUE, dequeue_event_callback,
1625                                 &t_params);
1626                 if (ret < 0)
1627                         return ret;
1628         } else {
1629                 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1630                         throughput_function = throughput_pmd_lcore_dec;
1631                 else
1632                         throughput_function = throughput_pmd_lcore_enc;
1633         }
1634
1635         rte_atomic16_set(&op_params->sync, SYNC_WAIT);
1636
1637         t_params[rte_lcore_id()].dev_id = ad->dev_id;
1638         t_params[rte_lcore_id()].op_params = op_params;
1639         t_params[rte_lcore_id()].queue_id =
1640                         ad->queue_ids[used_cores++];
1641
1642         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1643                 if (used_cores >= num_lcores)
1644                         break;
1645
1646                 t_params[lcore_id].dev_id = ad->dev_id;
1647                 t_params[lcore_id].op_params = op_params;
1648                 t_params[lcore_id].queue_id = ad->queue_ids[used_cores++];
1649
1650                 rte_eal_remote_launch(throughput_function, &t_params[lcore_id],
1651                                 lcore_id);
1652         }
1653
1654         rte_atomic16_set(&op_params->sync, SYNC_START);
1655         ret = throughput_function(&t_params[rte_lcore_id()]);
1656
1657         /* Master core is always used */
1658         used_cores = 1;
1659         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1660                 if (used_cores++ >= num_lcores)
1661                         break;
1662
1663                 ret |= rte_eal_wait_lcore(lcore_id);
1664         }
1665
1666         /* Return if test failed */
1667         if (ret)
1668                 return ret;
1669
1670         /* Print throughput if interrupts are disabled and test passed */
1671         if (!intr_enabled) {
1672                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1673                         print_throughput(t_params, num_lcores);
1674                 return ret;
1675         }
1676
1677         /* In interrupt TC we need to wait for the interrupt callback to deqeue
1678          * all pending operations. Skip waiting for queues which reported an
1679          * error using processing_status variable.
1680          * Wait for master lcore operations.
1681          */
1682         tp = &t_params[rte_lcore_id()];
1683         while ((rte_atomic16_read(&tp->nb_dequeued) <
1684                         op_params->num_to_process) &&
1685                         (rte_atomic16_read(&tp->processing_status) !=
1686                         TEST_FAILED))
1687                 rte_pause();
1688
1689         ret |= rte_atomic16_read(&tp->processing_status);
1690
1691         /* Wait for slave lcores operations */
1692         used_cores = 1;
1693         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1694                 tp = &t_params[lcore_id];
1695                 if (used_cores++ >= num_lcores)
1696                         break;
1697
1698                 while ((rte_atomic16_read(&tp->nb_dequeued) <
1699                                 op_params->num_to_process) &&
1700                                 (rte_atomic16_read(&tp->processing_status) !=
1701                                 TEST_FAILED))
1702                         rte_pause();
1703
1704                 ret |= rte_atomic16_read(&tp->processing_status);
1705         }
1706
1707         /* Print throughput if test passed */
1708         if (!ret && test_vector.op_type != RTE_BBDEV_OP_NONE)
1709                 print_throughput(t_params, num_lcores);
1710
1711         return ret;
1712 }
1713
1714 static int
1715 latency_test_dec(struct rte_mempool *mempool,
1716                 struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
1717                 int vector_mask, uint16_t dev_id, uint16_t queue_id,
1718                 const uint16_t num_to_process, uint16_t burst_sz,
1719                 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
1720 {
1721         int ret = TEST_SUCCESS;
1722         uint16_t i, j, dequeued;
1723         struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1724         uint64_t start_time = 0, last_time = 0;
1725
1726         for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
1727                 uint16_t enq = 0, deq = 0;
1728                 bool first_time = true;
1729                 last_time = 0;
1730
1731                 if (unlikely(num_to_process - dequeued < burst_sz))
1732                         burst_sz = num_to_process - dequeued;
1733
1734                 ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
1735                 TEST_ASSERT_SUCCESS(ret,
1736                                 "rte_bbdev_dec_op_alloc_bulk() failed");
1737                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1738                         copy_reference_dec_op(ops_enq, burst_sz, dequeued,
1739                                         bufs->inputs,
1740                                         bufs->hard_outputs,
1741                                         bufs->soft_outputs,
1742                                         ref_op);
1743
1744                 /* Set counter to validate the ordering */
1745                 for (j = 0; j < burst_sz; ++j)
1746                         ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
1747
1748                 start_time = rte_rdtsc_precise();
1749
1750                 enq = rte_bbdev_enqueue_dec_ops(dev_id, queue_id, &ops_enq[enq],
1751                                 burst_sz);
1752                 TEST_ASSERT(enq == burst_sz,
1753                                 "Error enqueueing burst, expected %u, got %u",
1754                                 burst_sz, enq);
1755
1756                 /* Dequeue */
1757                 do {
1758                         deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
1759                                         &ops_deq[deq], burst_sz - deq);
1760                         if (likely(first_time && (deq > 0))) {
1761                                 last_time = rte_rdtsc_precise() - start_time;
1762                                 first_time = false;
1763                         }
1764                 } while (unlikely(burst_sz != deq));
1765
1766                 *max_time = RTE_MAX(*max_time, last_time);
1767                 *min_time = RTE_MIN(*min_time, last_time);
1768                 *total_time += last_time;
1769
1770                 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1771                         ret = validate_dec_op(ops_deq, burst_sz, ref_op,
1772                                         vector_mask);
1773                         TEST_ASSERT_SUCCESS(ret, "Validation failed!");
1774                 }
1775
1776                 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
1777                 dequeued += deq;
1778         }
1779
1780         return i;
1781 }
1782
1783 static int
1784 latency_test_enc(struct rte_mempool *mempool,
1785                 struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
1786                 uint16_t dev_id, uint16_t queue_id,
1787                 const uint16_t num_to_process, uint16_t burst_sz,
1788                 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
1789 {
1790         int ret = TEST_SUCCESS;
1791         uint16_t i, j, dequeued;
1792         struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1793         uint64_t start_time = 0, last_time = 0;
1794
1795         for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
1796                 uint16_t enq = 0, deq = 0;
1797                 bool first_time = true;
1798                 last_time = 0;
1799
1800                 if (unlikely(num_to_process - dequeued < burst_sz))
1801                         burst_sz = num_to_process - dequeued;
1802
1803                 ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
1804                 TEST_ASSERT_SUCCESS(ret,
1805                                 "rte_bbdev_enc_op_alloc_bulk() failed");
1806                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1807                         copy_reference_enc_op(ops_enq, burst_sz, dequeued,
1808                                         bufs->inputs,
1809                                         bufs->hard_outputs,
1810                                         ref_op);
1811
1812                 /* Set counter to validate the ordering */
1813                 for (j = 0; j < burst_sz; ++j)
1814                         ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
1815
1816                 start_time = rte_rdtsc_precise();
1817
1818                 enq = rte_bbdev_enqueue_enc_ops(dev_id, queue_id, &ops_enq[enq],
1819                                 burst_sz);
1820                 TEST_ASSERT(enq == burst_sz,
1821                                 "Error enqueueing burst, expected %u, got %u",
1822                                 burst_sz, enq);
1823
1824                 /* Dequeue */
1825                 do {
1826                         deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
1827                                         &ops_deq[deq], burst_sz - deq);
1828                         if (likely(first_time && (deq > 0))) {
1829                                 last_time += rte_rdtsc_precise() - start_time;
1830                                 first_time = false;
1831                         }
1832                 } while (unlikely(burst_sz != deq));
1833
1834                 *max_time = RTE_MAX(*max_time, last_time);
1835                 *min_time = RTE_MIN(*min_time, last_time);
1836                 *total_time += last_time;
1837
1838                 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1839                         ret = validate_enc_op(ops_deq, burst_sz, ref_op);
1840                         TEST_ASSERT_SUCCESS(ret, "Validation failed!");
1841                 }
1842
1843                 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
1844                 dequeued += deq;
1845         }
1846
1847         return i;
1848 }
1849
1850 static int
1851 latency_test(struct active_device *ad,
1852                 struct test_op_params *op_params)
1853 {
1854         int iter;
1855         uint16_t burst_sz = op_params->burst_sz;
1856         const uint16_t num_to_process = op_params->num_to_process;
1857         const enum rte_bbdev_op_type op_type = test_vector.op_type;
1858         const uint16_t queue_id = ad->queue_ids[0];
1859         struct test_buffers *bufs = NULL;
1860         struct rte_bbdev_info info;
1861         uint64_t total_time, min_time, max_time;
1862         const char *op_type_str;
1863
1864         total_time = max_time = 0;
1865         min_time = UINT64_MAX;
1866
1867         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1868                         "BURST_SIZE should be <= %u", MAX_BURST);
1869
1870         rte_bbdev_info_get(ad->dev_id, &info);
1871         bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1872
1873         op_type_str = rte_bbdev_op_type_str(op_type);
1874         TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
1875
1876         printf(
1877                 "Validation/Latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
1878                         info.dev_name, burst_sz, num_to_process, op_type_str);
1879
1880         if (op_type == RTE_BBDEV_OP_TURBO_DEC)
1881                 iter = latency_test_dec(op_params->mp, bufs,
1882                                 op_params->ref_dec_op, op_params->vector_mask,
1883                                 ad->dev_id, queue_id, num_to_process,
1884                                 burst_sz, &total_time, &min_time, &max_time);
1885         else
1886                 iter = latency_test_enc(op_params->mp, bufs,
1887                                 op_params->ref_enc_op, ad->dev_id, queue_id,
1888                                 num_to_process, burst_sz, &total_time,
1889                                 &min_time, &max_time);
1890
1891         if (iter <= 0)
1892                 return TEST_FAILED;
1893
1894         printf("\toperation latency:\n"
1895                         "\t\tavg latency: %lg cycles, %lg us\n"
1896                         "\t\tmin latency: %lg cycles, %lg us\n"
1897                         "\t\tmax latency: %lg cycles, %lg us\n",
1898                         (double)total_time / (double)iter,
1899                         (double)(total_time * 1000000) / (double)iter /
1900                         (double)rte_get_tsc_hz(), (double)min_time,
1901                         (double)(min_time * 1000000) / (double)rte_get_tsc_hz(),
1902                         (double)max_time, (double)(max_time * 1000000) /
1903                         (double)rte_get_tsc_hz());
1904
1905         return TEST_SUCCESS;
1906 }
1907
1908 #ifdef RTE_BBDEV_OFFLOAD_COST
1909 static int
1910 get_bbdev_queue_stats(uint16_t dev_id, uint16_t queue_id,
1911                 struct rte_bbdev_stats *stats)
1912 {
1913         struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
1914         struct rte_bbdev_stats *q_stats;
1915
1916         if (queue_id >= dev->data->num_queues)
1917                 return -1;
1918
1919         q_stats = &dev->data->queues[queue_id].queue_stats;
1920
1921         stats->enqueued_count = q_stats->enqueued_count;
1922         stats->dequeued_count = q_stats->dequeued_count;
1923         stats->enqueue_err_count = q_stats->enqueue_err_count;
1924         stats->dequeue_err_count = q_stats->dequeue_err_count;
1925         stats->offload_time = q_stats->offload_time;
1926
1927         return 0;
1928 }
1929
1930 static int
1931 offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs,
1932                 struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
1933                 uint16_t queue_id, const uint16_t num_to_process,
1934                 uint16_t burst_sz, struct test_time_stats *time_st)
1935 {
1936         int i, dequeued, ret;
1937         struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1938         uint64_t enq_start_time, deq_start_time;
1939         uint64_t enq_sw_last_time, deq_last_time;
1940         struct rte_bbdev_stats stats;
1941
1942         for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
1943                 uint16_t enq = 0, deq = 0;
1944
1945                 if (unlikely(num_to_process - dequeued < burst_sz))
1946                         burst_sz = num_to_process - dequeued;
1947
1948                 rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
1949                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1950                         copy_reference_dec_op(ops_enq, burst_sz, dequeued,
1951                                         bufs->inputs,
1952                                         bufs->hard_outputs,
1953                                         bufs->soft_outputs,
1954                                         ref_op);
1955
1956                 /* Start time meas for enqueue function offload latency */
1957                 enq_start_time = rte_rdtsc_precise();
1958                 do {
1959                         enq += rte_bbdev_enqueue_dec_ops(dev_id, queue_id,
1960                                         &ops_enq[enq], burst_sz - enq);
1961                 } while (unlikely(burst_sz != enq));
1962
1963                 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
1964                 TEST_ASSERT_SUCCESS(ret,
1965                                 "Failed to get stats for queue (%u) of device (%u)",
1966                                 queue_id, dev_id);
1967
1968                 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
1969                                 stats.offload_time;
1970                 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
1971                                 enq_sw_last_time);
1972                 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
1973                                 enq_sw_last_time);
1974                 time_st->enq_sw_tot_time += enq_sw_last_time;
1975
1976                 time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time,
1977                                 stats.offload_time);
1978                 time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time,
1979                                 stats.offload_time);
1980                 time_st->enq_tur_tot_time += stats.offload_time;
1981
1982                 /* ensure enqueue has been completed */
1983                 rte_delay_ms(10);
1984
1985                 /* Start time meas for dequeue function offload latency */
1986                 deq_start_time = rte_rdtsc_precise();
1987                 /* Dequeue one operation */
1988                 do {
1989                         deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
1990                                         &ops_deq[deq], 1);
1991                 } while (unlikely(deq != 1));
1992
1993                 deq_last_time = rte_rdtsc_precise() - deq_start_time;
1994                 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
1995                                 deq_last_time);
1996                 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
1997                                 deq_last_time);
1998                 time_st->deq_tot_time += deq_last_time;
1999
2000                 /* Dequeue remaining operations if needed*/
2001                 while (burst_sz != deq)
2002                         deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
2003                                         &ops_deq[deq], burst_sz - deq);
2004
2005                 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
2006                 dequeued += deq;
2007         }
2008
2009         return i;
2010 }
2011
2012 static int
2013 offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
2014                 struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
2015                 uint16_t queue_id, const uint16_t num_to_process,
2016                 uint16_t burst_sz, struct test_time_stats *time_st)
2017 {
2018         int i, dequeued, ret;
2019         struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
2020         uint64_t enq_start_time, deq_start_time;
2021         uint64_t enq_sw_last_time, deq_last_time;
2022         struct rte_bbdev_stats stats;
2023
2024         for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
2025                 uint16_t enq = 0, deq = 0;
2026
2027                 if (unlikely(num_to_process - dequeued < burst_sz))
2028                         burst_sz = num_to_process - dequeued;
2029
2030                 rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
2031                 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2032                         copy_reference_enc_op(ops_enq, burst_sz, dequeued,
2033                                         bufs->inputs,
2034                                         bufs->hard_outputs,
2035                                         ref_op);
2036
2037                 /* Start time meas for enqueue function offload latency */
2038                 enq_start_time = rte_rdtsc_precise();
2039                 do {
2040                         enq += rte_bbdev_enqueue_enc_ops(dev_id, queue_id,
2041                                         &ops_enq[enq], burst_sz - enq);
2042                 } while (unlikely(burst_sz != enq));
2043
2044                 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
2045                 TEST_ASSERT_SUCCESS(ret,
2046                                 "Failed to get stats for queue (%u) of device (%u)",
2047                                 queue_id, dev_id);
2048
2049                 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
2050                                 stats.offload_time;
2051                 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
2052                                 enq_sw_last_time);
2053                 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
2054                                 enq_sw_last_time);
2055                 time_st->enq_sw_tot_time += enq_sw_last_time;
2056
2057                 time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time,
2058                                 stats.offload_time);
2059                 time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time,
2060                                 stats.offload_time);
2061                 time_st->enq_tur_tot_time += stats.offload_time;
2062
2063                 /* ensure enqueue has been completed */
2064                 rte_delay_ms(10);
2065
2066                 /* Start time meas for dequeue function offload latency */
2067                 deq_start_time = rte_rdtsc_precise();
2068                 /* Dequeue one operation */
2069                 do {
2070                         deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
2071                                         &ops_deq[deq], 1);
2072                 } while (unlikely(deq != 1));
2073
2074                 deq_last_time = rte_rdtsc_precise() - deq_start_time;
2075                 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
2076                                 deq_last_time);
2077                 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
2078                                 deq_last_time);
2079                 time_st->deq_tot_time += deq_last_time;
2080
2081                 while (burst_sz != deq)
2082                         deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
2083                                         &ops_deq[deq], burst_sz - deq);
2084
2085                 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
2086                 dequeued += deq;
2087         }
2088
2089         return i;
2090 }
2091 #endif
2092
2093 static int
2094 offload_cost_test(struct active_device *ad,
2095                 struct test_op_params *op_params)
2096 {
2097 #ifndef RTE_BBDEV_OFFLOAD_COST
2098         RTE_SET_USED(ad);
2099         RTE_SET_USED(op_params);
2100         printf("Offload latency test is disabled.\n");
2101         printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
2102         return TEST_SKIPPED;
2103 #else
2104         int iter;
2105         uint16_t burst_sz = op_params->burst_sz;
2106         const uint16_t num_to_process = op_params->num_to_process;
2107         const enum rte_bbdev_op_type op_type = test_vector.op_type;
2108         const uint16_t queue_id = ad->queue_ids[0];
2109         struct test_buffers *bufs = NULL;
2110         struct rte_bbdev_info info;
2111         const char *op_type_str;
2112         struct test_time_stats time_st;
2113
2114         memset(&time_st, 0, sizeof(struct test_time_stats));
2115         time_st.enq_sw_min_time = UINT64_MAX;
2116         time_st.enq_tur_min_time = UINT64_MAX;
2117         time_st.deq_min_time = UINT64_MAX;
2118
2119         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2120                         "BURST_SIZE should be <= %u", MAX_BURST);
2121
2122         rte_bbdev_info_get(ad->dev_id, &info);
2123         bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2124
2125         op_type_str = rte_bbdev_op_type_str(op_type);
2126         TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
2127
2128         printf(
2129                 "Offload latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
2130                         info.dev_name, burst_sz, num_to_process, op_type_str);
2131
2132         if (op_type == RTE_BBDEV_OP_TURBO_DEC)
2133                 iter = offload_latency_test_dec(op_params->mp, bufs,
2134                                 op_params->ref_dec_op, ad->dev_id, queue_id,
2135                                 num_to_process, burst_sz, &time_st);
2136         else
2137                 iter = offload_latency_test_enc(op_params->mp, bufs,
2138                                 op_params->ref_enc_op, ad->dev_id, queue_id,
2139                                 num_to_process, burst_sz, &time_st);
2140
2141         if (iter <= 0)
2142                 return TEST_FAILED;
2143
2144         printf("\tenq offload cost latency:\n"
2145                         "\t\tsoftware avg %lg cycles, %lg us\n"
2146                         "\t\tsoftware min %lg cycles, %lg us\n"
2147                         "\t\tsoftware max %lg cycles, %lg us\n"
2148                         "\t\tturbo avg %lg cycles, %lg us\n"
2149                         "\t\tturbo min %lg cycles, %lg us\n"
2150                         "\t\tturbo max %lg cycles, %lg us\n",
2151                         (double)time_st.enq_sw_tot_time / (double)iter,
2152                         (double)(time_st.enq_sw_tot_time * 1000000) /
2153                         (double)iter / (double)rte_get_tsc_hz(),
2154                         (double)time_st.enq_sw_min_time,
2155                         (double)(time_st.enq_sw_min_time * 1000000) /
2156                         rte_get_tsc_hz(), (double)time_st.enq_sw_max_time,
2157                         (double)(time_st.enq_sw_max_time * 1000000) /
2158                         rte_get_tsc_hz(), (double)time_st.enq_tur_tot_time /
2159                         (double)iter,
2160                         (double)(time_st.enq_tur_tot_time * 1000000) /
2161                         (double)iter / (double)rte_get_tsc_hz(),
2162                         (double)time_st.enq_tur_min_time,
2163                         (double)(time_st.enq_tur_min_time * 1000000) /
2164                         rte_get_tsc_hz(), (double)time_st.enq_tur_max_time,
2165                         (double)(time_st.enq_tur_max_time * 1000000) /
2166                         rte_get_tsc_hz());
2167
2168         printf("\tdeq offload cost latency - one op:\n"
2169                         "\t\tavg %lg cycles, %lg us\n"
2170                         "\t\tmin %lg cycles, %lg us\n"
2171                         "\t\tmax %lg cycles, %lg us\n",
2172                         (double)time_st.deq_tot_time / (double)iter,
2173                         (double)(time_st.deq_tot_time * 1000000) /
2174                         (double)iter / (double)rte_get_tsc_hz(),
2175                         (double)time_st.deq_min_time,
2176                         (double)(time_st.deq_min_time * 1000000) /
2177                         rte_get_tsc_hz(), (double)time_st.deq_max_time,
2178                         (double)(time_st.deq_max_time * 1000000) /
2179                         rte_get_tsc_hz());
2180
2181         return TEST_SUCCESS;
2182 #endif
2183 }
2184
2185 #ifdef RTE_BBDEV_OFFLOAD_COST
2186 static int
2187 offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
2188                 const uint16_t num_to_process, uint16_t burst_sz,
2189                 uint64_t *deq_tot_time, uint64_t *deq_min_time,
2190                 uint64_t *deq_max_time)
2191 {
2192         int i, deq_total;
2193         struct rte_bbdev_dec_op *ops[MAX_BURST];
2194         uint64_t deq_start_time, deq_last_time;
2195
2196         /* Test deq offload latency from an empty queue */
2197
2198         for (i = 0, deq_total = 0; deq_total < num_to_process;
2199                         ++i, deq_total += burst_sz) {
2200                 deq_start_time = rte_rdtsc_precise();
2201
2202                 if (unlikely(num_to_process - deq_total < burst_sz))
2203                         burst_sz = num_to_process - deq_total;
2204                 rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops, burst_sz);
2205
2206                 deq_last_time = rte_rdtsc_precise() - deq_start_time;
2207                 *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
2208                 *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
2209                 *deq_tot_time += deq_last_time;
2210         }
2211
2212         return i;
2213 }
2214
2215 static int
2216 offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
2217                 const uint16_t num_to_process, uint16_t burst_sz,
2218                 uint64_t *deq_tot_time, uint64_t *deq_min_time,
2219                 uint64_t *deq_max_time)
2220 {
2221         int i, deq_total;
2222         struct rte_bbdev_enc_op *ops[MAX_BURST];
2223         uint64_t deq_start_time, deq_last_time;
2224
2225         /* Test deq offload latency from an empty queue */
2226         for (i = 0, deq_total = 0; deq_total < num_to_process;
2227                         ++i, deq_total += burst_sz) {
2228                 deq_start_time = rte_rdtsc_precise();
2229
2230                 if (unlikely(num_to_process - deq_total < burst_sz))
2231                         burst_sz = num_to_process - deq_total;
2232                 rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops, burst_sz);
2233
2234                 deq_last_time = rte_rdtsc_precise() - deq_start_time;
2235                 *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
2236                 *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
2237                 *deq_tot_time += deq_last_time;
2238         }
2239
2240         return i;
2241 }
2242 #endif
2243
2244 static int
2245 offload_latency_empty_q_test(struct active_device *ad,
2246                 struct test_op_params *op_params)
2247 {
2248 #ifndef RTE_BBDEV_OFFLOAD_COST
2249         RTE_SET_USED(ad);
2250         RTE_SET_USED(op_params);
2251         printf("Offload latency empty dequeue test is disabled.\n");
2252         printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
2253         return TEST_SKIPPED;
2254 #else
2255         int iter;
2256         uint64_t deq_tot_time, deq_min_time, deq_max_time;
2257         uint16_t burst_sz = op_params->burst_sz;
2258         const uint16_t num_to_process = op_params->num_to_process;
2259         const enum rte_bbdev_op_type op_type = test_vector.op_type;
2260         const uint16_t queue_id = ad->queue_ids[0];
2261         struct rte_bbdev_info info;
2262         const char *op_type_str;
2263
2264         deq_tot_time = deq_max_time = 0;
2265         deq_min_time = UINT64_MAX;
2266
2267         TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2268                         "BURST_SIZE should be <= %u", MAX_BURST);
2269
2270         rte_bbdev_info_get(ad->dev_id, &info);
2271
2272         op_type_str = rte_bbdev_op_type_str(op_type);
2273         TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
2274
2275         printf(
2276                 "Offload latency empty dequeue test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
2277                         info.dev_name, burst_sz, num_to_process, op_type_str);
2278
2279         if (op_type == RTE_BBDEV_OP_TURBO_DEC)
2280                 iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id,
2281                                 num_to_process, burst_sz, &deq_tot_time,
2282                                 &deq_min_time, &deq_max_time);
2283         else
2284                 iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id,
2285                                 num_to_process, burst_sz, &deq_tot_time,
2286                                 &deq_min_time, &deq_max_time);
2287
2288         if (iter <= 0)
2289                 return TEST_FAILED;
2290
2291         printf("\tempty deq offload\n"
2292                         "\t\tavg. latency: %lg cycles, %lg us\n"
2293                         "\t\tmin. latency: %lg cycles, %lg us\n"
2294                         "\t\tmax. latency: %lg cycles, %lg us\n",
2295                         (double)deq_tot_time / (double)iter,
2296                         (double)(deq_tot_time * 1000000) / (double)iter /
2297                         (double)rte_get_tsc_hz(), (double)deq_min_time,
2298                         (double)(deq_min_time * 1000000) / rte_get_tsc_hz(),
2299                         (double)deq_max_time, (double)(deq_max_time * 1000000) /
2300                         rte_get_tsc_hz());
2301
2302         return TEST_SUCCESS;
2303 #endif
2304 }
2305
2306 static int
2307 throughput_tc(void)
2308 {
2309         return run_test_case(throughput_test);
2310 }
2311
2312 static int
2313 offload_cost_tc(void)
2314 {
2315         return run_test_case(offload_cost_test);
2316 }
2317
2318 static int
2319 offload_latency_empty_q_tc(void)
2320 {
2321         return run_test_case(offload_latency_empty_q_test);
2322 }
2323
2324 static int
2325 latency_tc(void)
2326 {
2327         return run_test_case(latency_test);
2328 }
2329
2330 static int
2331 interrupt_tc(void)
2332 {
2333         return run_test_case(throughput_test);
2334 }
2335
2336 static struct unit_test_suite bbdev_throughput_testsuite = {
2337         .suite_name = "BBdev Throughput Tests",
2338         .setup = testsuite_setup,
2339         .teardown = testsuite_teardown,
2340         .unit_test_cases = {
2341                 TEST_CASE_ST(ut_setup, ut_teardown, throughput_tc),
2342                 TEST_CASES_END() /**< NULL terminate unit test array */
2343         }
2344 };
2345
2346 static struct unit_test_suite bbdev_validation_testsuite = {
2347         .suite_name = "BBdev Validation Tests",
2348         .setup = testsuite_setup,
2349         .teardown = testsuite_teardown,
2350         .unit_test_cases = {
2351                 TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
2352                 TEST_CASES_END() /**< NULL terminate unit test array */
2353         }
2354 };
2355
2356 static struct unit_test_suite bbdev_latency_testsuite = {
2357         .suite_name = "BBdev Latency Tests",
2358         .setup = testsuite_setup,
2359         .teardown = testsuite_teardown,
2360         .unit_test_cases = {
2361                 TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
2362                 TEST_CASES_END() /**< NULL terminate unit test array */
2363         }
2364 };
2365
2366 static struct unit_test_suite bbdev_offload_cost_testsuite = {
2367         .suite_name = "BBdev Offload Cost Tests",
2368         .setup = testsuite_setup,
2369         .teardown = testsuite_teardown,
2370         .unit_test_cases = {
2371                 TEST_CASE_ST(ut_setup, ut_teardown, offload_cost_tc),
2372                 TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_empty_q_tc),
2373                 TEST_CASES_END() /**< NULL terminate unit test array */
2374         }
2375 };
2376
2377 static struct unit_test_suite bbdev_interrupt_testsuite = {
2378         .suite_name = "BBdev Interrupt Tests",
2379         .setup = interrupt_testsuite_setup,
2380         .teardown = testsuite_teardown,
2381         .unit_test_cases = {
2382                 TEST_CASE_ST(ut_setup, ut_teardown, interrupt_tc),
2383                 TEST_CASES_END() /**< NULL terminate unit test array */
2384         }
2385 };
2386
2387 REGISTER_TEST_COMMAND(throughput, bbdev_throughput_testsuite);
2388 REGISTER_TEST_COMMAND(validation, bbdev_validation_testsuite);
2389 REGISTER_TEST_COMMAND(latency, bbdev_latency_testsuite);
2390 REGISTER_TEST_COMMAND(offload, bbdev_offload_cost_testsuite);
2391 REGISTER_TEST_COMMAND(interrupt, bbdev_interrupt_testsuite);