New upstream version 18.02
[deb_dpdk.git] / drivers / event / octeontx / ssovf_evdev_selftest.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_debug.h>
9 #include <rte_eal.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
21 #include <rte_test.h>
22
23 #include "ssovf_evdev.h"
24
25 #define NUM_PACKETS (1 << 18)
26 #define MAX_EVENTS  (16 * 1024)
27
28 #define OCTEONTX_TEST_RUN(setup, teardown, test) \
29         octeontx_test_run(setup, teardown, test, #test)
30
31 static int total;
32 static int passed;
33 static int failed;
34 static int unsupported;
35
36 static int evdev;
37 static struct rte_mempool *eventdev_test_mempool;
38
39 struct event_attr {
40         uint32_t flow_id;
41         uint8_t event_type;
42         uint8_t sub_event_type;
43         uint8_t sched_type;
44         uint8_t queue;
45         uint8_t port;
46 };
47
48 static uint32_t seqn_list_index;
49 static int seqn_list[NUM_PACKETS];
50
51 static inline void
52 seqn_list_init(void)
53 {
54         RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
55         memset(seqn_list, 0, sizeof(seqn_list));
56         seqn_list_index = 0;
57 }
58
59 static inline int
60 seqn_list_update(int val)
61 {
62         if (seqn_list_index >= NUM_PACKETS)
63                 return -1;
64
65         seqn_list[seqn_list_index++] = val;
66         rte_smp_wmb();
67         return 0;
68 }
69
70 static inline int
71 seqn_list_check(int limit)
72 {
73         int i;
74
75         for (i = 0; i < limit; i++) {
76                 if (seqn_list[i] != i) {
77                         ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i);
78                         return -1;
79                 }
80         }
81         return 0;
82 }
83
84 struct test_core_param {
85         rte_atomic32_t *total_events;
86         uint64_t dequeue_tmo_ticks;
87         uint8_t port;
88         uint8_t sched_type;
89 };
90
91 static int
92 testsuite_setup(void)
93 {
94         const char *eventdev_name = "event_octeontx";
95
96         evdev = rte_event_dev_get_dev_id(eventdev_name);
97         if (evdev < 0) {
98                 ssovf_log_dbg("%d: Eventdev %s not found - creating.",
99                                 __LINE__, eventdev_name);
100                 if (rte_vdev_init(eventdev_name, NULL) < 0) {
101                         ssovf_log_dbg("Error creating eventdev %s",
102                                         eventdev_name);
103                         return -1;
104                 }
105                 evdev = rte_event_dev_get_dev_id(eventdev_name);
106                 if (evdev < 0) {
107                         ssovf_log_dbg("Error finding newly created eventdev");
108                         return -1;
109                 }
110         }
111
112         return 0;
113 }
114
115 static void
116 testsuite_teardown(void)
117 {
118         rte_event_dev_close(evdev);
119 }
120
121 static inline void
122 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
123                         struct rte_event_dev_info *info)
124 {
125         memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
126         dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
127         dev_conf->nb_event_ports = info->max_event_ports;
128         dev_conf->nb_event_queues = info->max_event_queues;
129         dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
130         dev_conf->nb_event_port_dequeue_depth =
131                         info->max_event_port_dequeue_depth;
132         dev_conf->nb_event_port_enqueue_depth =
133                         info->max_event_port_enqueue_depth;
134         dev_conf->nb_event_port_enqueue_depth =
135                         info->max_event_port_enqueue_depth;
136         dev_conf->nb_events_limit =
137                         info->max_num_events;
138 }
139
140 enum {
141         TEST_EVENTDEV_SETUP_DEFAULT,
142         TEST_EVENTDEV_SETUP_PRIORITY,
143         TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
144 };
145
146 static inline int
147 _eventdev_setup(int mode)
148 {
149         int i, ret;
150         struct rte_event_dev_config dev_conf;
151         struct rte_event_dev_info info;
152         const char *pool_name = "evdev_octeontx_test_pool";
153
154         /* Create and destrory pool for each test case to make it standalone */
155         eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
156                                         MAX_EVENTS,
157                                         0 /*MBUF_CACHE_SIZE*/,
158                                         0,
159                                         512, /* Use very small mbufs */
160                                         rte_socket_id());
161         if (!eventdev_test_mempool) {
162                 ssovf_log_dbg("ERROR creating mempool");
163                 return -1;
164         }
165
166         ret = rte_event_dev_info_get(evdev, &info);
167         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
168         RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
169                         "ERROR max_num_events=%d < max_events=%d",
170                                 info.max_num_events, MAX_EVENTS);
171
172         devconf_set_default_sane_values(&dev_conf, &info);
173         if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
174                 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
175
176         ret = rte_event_dev_configure(evdev, &dev_conf);
177         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
178
179         uint32_t queue_count;
180         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
181                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
182                             &queue_count), "Queue count get failed");
183
184         if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
185                 if (queue_count > 8) {
186                         ssovf_log_dbg(
187                                 "test expects the unique priority per queue");
188                         return -ENOTSUP;
189                 }
190
191                 /* Configure event queues(0 to n) with
192                  * RTE_EVENT_DEV_PRIORITY_HIGHEST to
193                  * RTE_EVENT_DEV_PRIORITY_LOWEST
194                  */
195                 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
196                                 queue_count;
197                 for (i = 0; i < (int)queue_count; i++) {
198                         struct rte_event_queue_conf queue_conf;
199
200                         ret = rte_event_queue_default_conf_get(evdev, i,
201                                                 &queue_conf);
202                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
203                                         i);
204                         queue_conf.priority = i * step;
205                         ret = rte_event_queue_setup(evdev, i, &queue_conf);
206                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
207                                         i);
208                 }
209
210         } else {
211                 /* Configure event queues with default priority */
212                 for (i = 0; i < (int)queue_count; i++) {
213                         ret = rte_event_queue_setup(evdev, i, NULL);
214                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
215                                         i);
216                 }
217         }
218         /* Configure event ports */
219         uint32_t port_count;
220         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
221                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
222                                 &port_count), "Port count get failed");
223         for (i = 0; i < (int)port_count; i++) {
224                 ret = rte_event_port_setup(evdev, i, NULL);
225                 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
226                 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
227                 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
228                                 i);
229         }
230
231         ret = rte_event_dev_start(evdev);
232         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
233
234         return 0;
235 }
236
237 static inline int
238 eventdev_setup(void)
239 {
240         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
241 }
242
243 static inline int
244 eventdev_setup_priority(void)
245 {
246         return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
247 }
248
249 static inline int
250 eventdev_setup_dequeue_timeout(void)
251 {
252         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
253 }
254
255 static inline void
256 eventdev_teardown(void)
257 {
258         rte_event_dev_stop(evdev);
259         rte_mempool_free(eventdev_test_mempool);
260 }
261
262 static inline void
263 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
264                         uint32_t flow_id, uint8_t event_type,
265                         uint8_t sub_event_type, uint8_t sched_type,
266                         uint8_t queue, uint8_t port)
267 {
268         struct event_attr *attr;
269
270         /* Store the event attributes in mbuf for future reference */
271         attr = rte_pktmbuf_mtod(m, struct event_attr *);
272         attr->flow_id = flow_id;
273         attr->event_type = event_type;
274         attr->sub_event_type = sub_event_type;
275         attr->sched_type = sched_type;
276         attr->queue = queue;
277         attr->port = port;
278
279         ev->flow_id = flow_id;
280         ev->sub_event_type = sub_event_type;
281         ev->event_type = event_type;
282         /* Inject the new event */
283         ev->op = RTE_EVENT_OP_NEW;
284         ev->sched_type = sched_type;
285         ev->queue_id = queue;
286         ev->mbuf = m;
287 }
288
289 static inline int
290 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
291                 uint8_t sched_type, uint8_t queue, uint8_t port,
292                 unsigned int events)
293 {
294         struct rte_mbuf *m;
295         unsigned int i;
296
297         for (i = 0; i < events; i++) {
298                 struct rte_event ev = {.event = 0, .u64 = 0};
299
300                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
301                 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
302
303                 m->seqn = i;
304                 update_event_and_validation_attr(m, &ev, flow_id, event_type,
305                         sub_event_type, sched_type, queue, port);
306                 rte_event_enqueue_burst(evdev, port, &ev, 1);
307         }
308         return 0;
309 }
310
311 static inline int
312 check_excess_events(uint8_t port)
313 {
314         int i;
315         uint16_t valid_event;
316         struct rte_event ev;
317
318         /* Check for excess events, try for a few times and exit */
319         for (i = 0; i < 32; i++) {
320                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
321
322                 RTE_TEST_ASSERT_SUCCESS(valid_event,
323                                 "Unexpected valid event=%d", ev.mbuf->seqn);
324         }
325         return 0;
326 }
327
328 static inline int
329 generate_random_events(const unsigned int total_events)
330 {
331         struct rte_event_dev_info info;
332         unsigned int i;
333         int ret;
334
335         uint32_t queue_count;
336         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
337                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
338                             &queue_count), "Queue count get failed");
339
340         ret = rte_event_dev_info_get(evdev, &info);
341         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
342         for (i = 0; i < total_events; i++) {
343                 ret = inject_events(
344                         rte_rand() % info.max_event_queue_flows /*flow_id */,
345                         RTE_EVENT_TYPE_CPU /* event_type */,
346                         rte_rand() % 256 /* sub_event_type */,
347                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
348                         rte_rand() % queue_count /* queue */,
349                         0 /* port */,
350                         1 /* events */);
351                 if (ret)
352                         return -1;
353         }
354         return ret;
355 }
356
357
358 static inline int
359 validate_event(struct rte_event *ev)
360 {
361         struct event_attr *attr;
362
363         attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
364         RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
365                         "flow_id mismatch enq=%d deq =%d",
366                         attr->flow_id, ev->flow_id);
367         RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
368                         "event_type mismatch enq=%d deq =%d",
369                         attr->event_type, ev->event_type);
370         RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
371                         "sub_event_type mismatch enq=%d deq =%d",
372                         attr->sub_event_type, ev->sub_event_type);
373         RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
374                         "sched_type mismatch enq=%d deq =%d",
375                         attr->sched_type, ev->sched_type);
376         RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
377                         "queue mismatch enq=%d deq =%d",
378                         attr->queue, ev->queue_id);
379         return 0;
380 }
381
382 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
383                                  struct rte_event *ev);
384
385 static inline int
386 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
387 {
388         int ret;
389         uint16_t valid_event;
390         uint32_t events = 0, forward_progress_cnt = 0, index = 0;
391         struct rte_event ev;
392
393         while (1) {
394                 if (++forward_progress_cnt > UINT16_MAX) {
395                         ssovf_log_dbg("Detected deadlock");
396                         return -1;
397                 }
398
399                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
400                 if (!valid_event)
401                         continue;
402
403                 forward_progress_cnt = 0;
404                 ret = validate_event(&ev);
405                 if (ret)
406                         return -1;
407
408                 if (fn != NULL) {
409                         ret = fn(index, port, &ev);
410                         RTE_TEST_ASSERT_SUCCESS(ret,
411                                 "Failed to validate test specific event");
412                 }
413
414                 ++index;
415
416                 rte_pktmbuf_free(ev.mbuf);
417                 if (++events >= total_events)
418                         break;
419         }
420
421         return check_excess_events(port);
422 }
423
424 static int
425 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
426 {
427         RTE_SET_USED(port);
428         RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
429                         index, ev->mbuf->seqn);
430         return 0;
431 }
432
433 static inline int
434 test_simple_enqdeq(uint8_t sched_type)
435 {
436         int ret;
437
438         ret = inject_events(0 /*flow_id */,
439                                 RTE_EVENT_TYPE_CPU /* event_type */,
440                                 0 /* sub_event_type */,
441                                 sched_type,
442                                 0 /* queue */,
443                                 0 /* port */,
444                                 MAX_EVENTS);
445         if (ret)
446                 return -1;
447
448         return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
449 }
450
451 static int
452 test_simple_enqdeq_ordered(void)
453 {
454         return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
455 }
456
457 static int
458 test_simple_enqdeq_atomic(void)
459 {
460         return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
461 }
462
463 static int
464 test_simple_enqdeq_parallel(void)
465 {
466         return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
467 }
468
469 /*
470  * Generate a prescribed number of events and spread them across available
471  * queues. On dequeue, using single event port(port 0) verify the enqueued
472  * event attributes
473  */
474 static int
475 test_multi_queue_enq_single_port_deq(void)
476 {
477         int ret;
478
479         ret = generate_random_events(MAX_EVENTS);
480         if (ret)
481                 return -1;
482
483         return consume_events(0 /* port */, MAX_EVENTS, NULL);
484 }
485
486 /*
487  * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
488  * operation
489  *
490  * For example, Inject 32 events over 0..7 queues
491  * enqueue events 0, 8, 16, 24 in queue 0
492  * enqueue events 1, 9, 17, 25 in queue 1
493  * ..
494  * ..
495  * enqueue events 7, 15, 23, 31 in queue 7
496  *
497  * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
498  * order from queue0(highest priority) to queue7(lowest_priority)
499  */
500 static int
501 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
502 {
503         uint32_t queue_count;
504         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
505                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
506                             &queue_count), "Queue count get failed");
507         uint32_t range = MAX_EVENTS / queue_count;
508         uint32_t expected_val = (index % range) * queue_count;
509
510         expected_val += ev->queue_id;
511         RTE_SET_USED(port);
512         RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
513         "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
514                         ev->mbuf->seqn, index, expected_val, range,
515                         queue_count, MAX_EVENTS);
516         return 0;
517 }
518
519 static int
520 test_multi_queue_priority(void)
521 {
522         uint8_t queue;
523         struct rte_mbuf *m;
524         int i, max_evts_roundoff;
525
526         /* See validate_queue_priority() comments for priority validate logic */
527         uint32_t queue_count;
528         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
529                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
530                             &queue_count), "Queue count get failed");
531         max_evts_roundoff  = MAX_EVENTS / queue_count;
532         max_evts_roundoff *= queue_count;
533
534         for (i = 0; i < max_evts_roundoff; i++) {
535                 struct rte_event ev = {.event = 0, .u64 = 0};
536
537                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
538                 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
539
540                 m->seqn = i;
541                 queue = i % queue_count;
542                 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
543                         0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
544                 rte_event_enqueue_burst(evdev, 0, &ev, 1);
545         }
546
547         return consume_events(0, max_evts_roundoff, validate_queue_priority);
548 }
549
550 static int
551 worker_multi_port_fn(void *arg)
552 {
553         struct test_core_param *param = arg;
554         struct rte_event ev;
555         uint16_t valid_event;
556         uint8_t port = param->port;
557         rte_atomic32_t *total_events = param->total_events;
558         int ret;
559
560         while (rte_atomic32_read(total_events) > 0) {
561                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
562                 if (!valid_event)
563                         continue;
564
565                 ret = validate_event(&ev);
566                 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
567                 rte_pktmbuf_free(ev.mbuf);
568                 rte_atomic32_sub(total_events, 1);
569         }
570         return 0;
571 }
572
573 static inline int
574 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
575 {
576         uint64_t cycles, print_cycles;
577         RTE_SET_USED(count);
578
579         print_cycles = cycles = rte_get_timer_cycles();
580         while (rte_eal_get_lcore_state(lcore) != FINISHED) {
581                 uint64_t new_cycles = rte_get_timer_cycles();
582
583                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
584                         ssovf_log_dbg("\r%s: events %d", __func__,
585                                 rte_atomic32_read(count));
586                         print_cycles = new_cycles;
587                 }
588                 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
589                         ssovf_log_dbg(
590                                 "%s: No schedules for seconds, deadlock (%d)",
591                                 __func__,
592                                 rte_atomic32_read(count));
593                         rte_event_dev_dump(evdev, stdout);
594                         cycles = new_cycles;
595                         return -1;
596                 }
597         }
598         rte_eal_mp_wait_lcore();
599         return 0;
600 }
601
602
603 static inline int
604 launch_workers_and_wait(int (*master_worker)(void *),
605                         int (*slave_workers)(void *), uint32_t total_events,
606                         uint8_t nb_workers, uint8_t sched_type)
607 {
608         uint8_t port = 0;
609         int w_lcore;
610         int ret;
611         struct test_core_param *param;
612         rte_atomic32_t atomic_total_events;
613         uint64_t dequeue_tmo_ticks;
614
615         if (!nb_workers)
616                 return 0;
617
618         rte_atomic32_set(&atomic_total_events, total_events);
619         seqn_list_init();
620
621         param = malloc(sizeof(struct test_core_param) * nb_workers);
622         if (!param)
623                 return -1;
624
625         ret = rte_event_dequeue_timeout_ticks(evdev,
626                 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
627         if (ret) {
628                 free(param);
629                 return -1;
630         }
631
632         param[0].total_events = &atomic_total_events;
633         param[0].sched_type = sched_type;
634         param[0].port = 0;
635         param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
636         rte_smp_wmb();
637
638         w_lcore = rte_get_next_lcore(
639                         /* start core */ -1,
640                         /* skip master */ 1,
641                         /* wrap */ 0);
642         rte_eal_remote_launch(master_worker, &param[0], w_lcore);
643
644         for (port = 1; port < nb_workers; port++) {
645                 param[port].total_events = &atomic_total_events;
646                 param[port].sched_type = sched_type;
647                 param[port].port = port;
648                 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
649                 rte_smp_wmb();
650                 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
651                 rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
652         }
653
654         ret = wait_workers_to_join(w_lcore, &atomic_total_events);
655         free(param);
656         return ret;
657 }
658
659 /*
660  * Generate a prescribed number of events and spread them across available
661  * queues. Dequeue the events through multiple ports and verify the enqueued
662  * event attributes
663  */
664 static int
665 test_multi_queue_enq_multi_port_deq(void)
666 {
667         const unsigned int total_events = MAX_EVENTS;
668         uint32_t nr_ports;
669         int ret;
670
671         ret = generate_random_events(total_events);
672         if (ret)
673                 return -1;
674
675         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
676                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
677                                 &nr_ports), "Port count get failed");
678         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
679
680         if (!nr_ports) {
681                 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
682                         nr_ports, rte_lcore_count() - 1);
683                 return 0;
684         }
685
686         return launch_workers_and_wait(worker_multi_port_fn,
687                                         worker_multi_port_fn, total_events,
688                                         nr_ports, 0xff /* invalid */);
689 }
690
691 static int
692 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
693                         struct rte_event *ev)
694 {
695         RTE_SET_USED(index);
696         RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
697                                 "queue mismatch enq=%d deq =%d",
698                                 port, ev->queue_id);
699         return 0;
700 }
701
702 /*
703  * Link queue x to port x and check correctness of link by checking
704  * queue_id == x on dequeue on the specific port x
705  */
706 static int
707 test_queue_to_port_single_link(void)
708 {
709         int i, nr_links, ret;
710
711         uint32_t port_count;
712         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
713                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
714                                 &port_count), "Port count get failed");
715
716         /* Unlink all connections that created in eventdev_setup */
717         for (i = 0; i < (int)port_count; i++) {
718                 ret = rte_event_port_unlink(evdev, i, NULL, 0);
719                 RTE_TEST_ASSERT(ret >= 0,
720                                 "Failed to unlink all queues port=%d", i);
721         }
722
723         uint32_t queue_count;
724         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
725                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
726                             &queue_count), "Queue count get failed");
727
728         nr_links = RTE_MIN(port_count, queue_count);
729         const unsigned int total_events = MAX_EVENTS / nr_links;
730
731         /* Link queue x to port x and inject events to queue x through port x */
732         for (i = 0; i < nr_links; i++) {
733                 uint8_t queue = (uint8_t)i;
734
735                 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
736                 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
737
738                 ret = inject_events(
739                         0x100 /*flow_id */,
740                         RTE_EVENT_TYPE_CPU /* event_type */,
741                         rte_rand() % 256 /* sub_event_type */,
742                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
743                         queue /* queue */,
744                         i /* port */,
745                         total_events /* events */);
746                 if (ret)
747                         return -1;
748         }
749
750         /* Verify the events generated from correct queue */
751         for (i = 0; i < nr_links; i++) {
752                 ret = consume_events(i /* port */, total_events,
753                                 validate_queue_to_port_single_link);
754                 if (ret)
755                         return -1;
756         }
757
758         return 0;
759 }
760
761 static int
762 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
763                         struct rte_event *ev)
764 {
765         RTE_SET_USED(index);
766         RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
767                                 "queue mismatch enq=%d deq =%d",
768                                 port, ev->queue_id);
769         return 0;
770 }
771
772 /*
773  * Link all even number of queues to port 0 and all odd number of queues to
774  * port 1 and verify the link connection on dequeue
775  */
776 static int
777 test_queue_to_port_multi_link(void)
778 {
779         int ret, port0_events = 0, port1_events = 0;
780         uint8_t queue, port;
781         uint32_t nr_queues = 0;
782         uint32_t nr_ports = 0;
783
784         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
785                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
786                             &nr_queues), "Queue count get failed");
787
788         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
789                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
790                                 &nr_queues), "Queue count get failed");
791         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
792                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
793                                 &nr_ports), "Port count get failed");
794
795         if (nr_ports < 2) {
796                 ssovf_log_dbg("%s: Not enough ports to test ports=%d",
797                                 __func__, nr_ports);
798                 return 0;
799         }
800
801         /* Unlink all connections that created in eventdev_setup */
802         for (port = 0; port < nr_ports; port++) {
803                 ret = rte_event_port_unlink(evdev, port, NULL, 0);
804                 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
805                                         port);
806         }
807
808         const unsigned int total_events = MAX_EVENTS / nr_queues;
809
810         /* Link all even number of queues to port0 and odd numbers to port 1*/
811         for (queue = 0; queue < nr_queues; queue++) {
812                 port = queue & 0x1;
813                 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
814                 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
815                                         queue, port);
816
817                 ret = inject_events(
818                         0x100 /*flow_id */,
819                         RTE_EVENT_TYPE_CPU /* event_type */,
820                         rte_rand() % 256 /* sub_event_type */,
821                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
822                         queue /* queue */,
823                         port /* port */,
824                         total_events /* events */);
825                 if (ret)
826                         return -1;
827
828                 if (port == 0)
829                         port0_events += total_events;
830                 else
831                         port1_events += total_events;
832         }
833
834         ret = consume_events(0 /* port */, port0_events,
835                                 validate_queue_to_port_multi_link);
836         if (ret)
837                 return -1;
838         ret = consume_events(1 /* port */, port1_events,
839                                 validate_queue_to_port_multi_link);
840         if (ret)
841                 return -1;
842
843         return 0;
844 }
845
846 static int
847 worker_flow_based_pipeline(void *arg)
848 {
849         struct test_core_param *param = arg;
850         struct rte_event ev;
851         uint16_t valid_event;
852         uint8_t port = param->port;
853         uint8_t new_sched_type = param->sched_type;
854         rte_atomic32_t *total_events = param->total_events;
855         uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
856
857         while (rte_atomic32_read(total_events) > 0) {
858                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
859                                         dequeue_tmo_ticks);
860                 if (!valid_event)
861                         continue;
862
863                 /* Events from stage 0 */
864                 if (ev.sub_event_type == 0) {
865                         /* Move to atomic flow to maintain the ordering */
866                         ev.flow_id = 0x2;
867                         ev.event_type = RTE_EVENT_TYPE_CPU;
868                         ev.sub_event_type = 1; /* stage 1 */
869                         ev.sched_type = new_sched_type;
870                         ev.op = RTE_EVENT_OP_FORWARD;
871                         rte_event_enqueue_burst(evdev, port, &ev, 1);
872                 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
873                         if (seqn_list_update(ev.mbuf->seqn) == 0) {
874                                 rte_pktmbuf_free(ev.mbuf);
875                                 rte_atomic32_sub(total_events, 1);
876                         } else {
877                                 ssovf_log_dbg("Failed to update seqn_list");
878                                 return -1;
879                         }
880                 } else {
881                         ssovf_log_dbg("Invalid ev.sub_event_type = %d",
882                                         ev.sub_event_type);
883                         return -1;
884                 }
885         }
886         return 0;
887 }
888
889 static int
890 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
891                         uint8_t out_sched_type)
892 {
893         const unsigned int total_events = MAX_EVENTS;
894         uint32_t nr_ports;
895         int ret;
896
897         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
898                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
899                                 &nr_ports), "Port count get failed");
900         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
901
902         if (!nr_ports) {
903                 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
904                         nr_ports, rte_lcore_count() - 1);
905                 return 0;
906         }
907
908         /* Injects events with m->seqn=0 to total_events */
909         ret = inject_events(
910                 0x1 /*flow_id */,
911                 RTE_EVENT_TYPE_CPU /* event_type */,
912                 0 /* sub_event_type (stage 0) */,
913                 in_sched_type,
914                 0 /* queue */,
915                 0 /* port */,
916                 total_events /* events */);
917         if (ret)
918                 return -1;
919
920         ret = launch_workers_and_wait(worker_flow_based_pipeline,
921                                         worker_flow_based_pipeline,
922                                         total_events, nr_ports, out_sched_type);
923         if (ret)
924                 return -1;
925
926         if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
927                         out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
928                 /* Check the events order maintained or not */
929                 return seqn_list_check(total_events);
930         }
931         return 0;
932 }
933
934
935 /* Multi port ordered to atomic transaction */
936 static int
937 test_multi_port_flow_ordered_to_atomic(void)
938 {
939         /* Ingress event order test */
940         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
941                                 RTE_SCHED_TYPE_ATOMIC);
942 }
943
944 static int
945 test_multi_port_flow_ordered_to_ordered(void)
946 {
947         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
948                                 RTE_SCHED_TYPE_ORDERED);
949 }
950
951 static int
952 test_multi_port_flow_ordered_to_parallel(void)
953 {
954         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
955                                 RTE_SCHED_TYPE_PARALLEL);
956 }
957
958 static int
959 test_multi_port_flow_atomic_to_atomic(void)
960 {
961         /* Ingress event order test */
962         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
963                                 RTE_SCHED_TYPE_ATOMIC);
964 }
965
966 static int
967 test_multi_port_flow_atomic_to_ordered(void)
968 {
969         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
970                                 RTE_SCHED_TYPE_ORDERED);
971 }
972
973 static int
974 test_multi_port_flow_atomic_to_parallel(void)
975 {
976         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
977                                 RTE_SCHED_TYPE_PARALLEL);
978 }
979
980 static int
981 test_multi_port_flow_parallel_to_atomic(void)
982 {
983         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
984                                 RTE_SCHED_TYPE_ATOMIC);
985 }
986
987 static int
988 test_multi_port_flow_parallel_to_ordered(void)
989 {
990         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
991                                 RTE_SCHED_TYPE_ORDERED);
992 }
993
994 static int
995 test_multi_port_flow_parallel_to_parallel(void)
996 {
997         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
998                                 RTE_SCHED_TYPE_PARALLEL);
999 }
1000
1001 static int
1002 worker_group_based_pipeline(void *arg)
1003 {
1004         struct test_core_param *param = arg;
1005         struct rte_event ev;
1006         uint16_t valid_event;
1007         uint8_t port = param->port;
1008         uint8_t new_sched_type = param->sched_type;
1009         rte_atomic32_t *total_events = param->total_events;
1010         uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1011
1012         while (rte_atomic32_read(total_events) > 0) {
1013                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1014                                         dequeue_tmo_ticks);
1015                 if (!valid_event)
1016                         continue;
1017
1018                 /* Events from stage 0(group 0) */
1019                 if (ev.queue_id == 0) {
1020                         /* Move to atomic flow to maintain the ordering */
1021                         ev.flow_id = 0x2;
1022                         ev.event_type = RTE_EVENT_TYPE_CPU;
1023                         ev.sched_type = new_sched_type;
1024                         ev.queue_id = 1; /* Stage 1*/
1025                         ev.op = RTE_EVENT_OP_FORWARD;
1026                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1027                 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1028                         if (seqn_list_update(ev.mbuf->seqn) == 0) {
1029                                 rte_pktmbuf_free(ev.mbuf);
1030                                 rte_atomic32_sub(total_events, 1);
1031                         } else {
1032                                 ssovf_log_dbg("Failed to update seqn_list");
1033                                 return -1;
1034                         }
1035                 } else {
1036                         ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id);
1037                         return -1;
1038                 }
1039         }
1040
1041
1042         return 0;
1043 }
1044
1045 static int
1046 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1047                         uint8_t out_sched_type)
1048 {
1049         const unsigned int total_events = MAX_EVENTS;
1050         uint32_t nr_ports;
1051         int ret;
1052
1053         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1054                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1055                                 &nr_ports), "Port count get failed");
1056
1057         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1058
1059         uint32_t queue_count;
1060         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1061                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1062                             &queue_count), "Queue count get failed");
1063         if (queue_count < 2 ||  !nr_ports) {
1064                 ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d",
1065                          __func__, queue_count, nr_ports,
1066                          rte_lcore_count() - 1);
1067                 return 0;
1068         }
1069
1070         /* Injects events with m->seqn=0 to total_events */
1071         ret = inject_events(
1072                 0x1 /*flow_id */,
1073                 RTE_EVENT_TYPE_CPU /* event_type */,
1074                 0 /* sub_event_type (stage 0) */,
1075                 in_sched_type,
1076                 0 /* queue */,
1077                 0 /* port */,
1078                 total_events /* events */);
1079         if (ret)
1080                 return -1;
1081
1082         ret = launch_workers_and_wait(worker_group_based_pipeline,
1083                                         worker_group_based_pipeline,
1084                                         total_events, nr_ports, out_sched_type);
1085         if (ret)
1086                 return -1;
1087
1088         if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1089                         out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1090                 /* Check the events order maintained or not */
1091                 return seqn_list_check(total_events);
1092         }
1093         return 0;
1094 }
1095
1096 static int
1097 test_multi_port_queue_ordered_to_atomic(void)
1098 {
1099         /* Ingress event order test */
1100         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1101                                 RTE_SCHED_TYPE_ATOMIC);
1102 }
1103
1104 static int
1105 test_multi_port_queue_ordered_to_ordered(void)
1106 {
1107         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1108                                 RTE_SCHED_TYPE_ORDERED);
1109 }
1110
1111 static int
1112 test_multi_port_queue_ordered_to_parallel(void)
1113 {
1114         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1115                                 RTE_SCHED_TYPE_PARALLEL);
1116 }
1117
1118 static int
1119 test_multi_port_queue_atomic_to_atomic(void)
1120 {
1121         /* Ingress event order test */
1122         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1123                                 RTE_SCHED_TYPE_ATOMIC);
1124 }
1125
1126 static int
1127 test_multi_port_queue_atomic_to_ordered(void)
1128 {
1129         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1130                                 RTE_SCHED_TYPE_ORDERED);
1131 }
1132
1133 static int
1134 test_multi_port_queue_atomic_to_parallel(void)
1135 {
1136         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1137                                 RTE_SCHED_TYPE_PARALLEL);
1138 }
1139
1140 static int
1141 test_multi_port_queue_parallel_to_atomic(void)
1142 {
1143         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1144                                 RTE_SCHED_TYPE_ATOMIC);
1145 }
1146
1147 static int
1148 test_multi_port_queue_parallel_to_ordered(void)
1149 {
1150         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1151                                 RTE_SCHED_TYPE_ORDERED);
1152 }
1153
1154 static int
1155 test_multi_port_queue_parallel_to_parallel(void)
1156 {
1157         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1158                                 RTE_SCHED_TYPE_PARALLEL);
1159 }
1160
1161 static int
1162 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1163 {
1164         struct test_core_param *param = arg;
1165         struct rte_event ev;
1166         uint16_t valid_event;
1167         uint8_t port = param->port;
1168         rte_atomic32_t *total_events = param->total_events;
1169
1170         while (rte_atomic32_read(total_events) > 0) {
1171                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1172                 if (!valid_event)
1173                         continue;
1174
1175                 if (ev.sub_event_type == 255) { /* last stage */
1176                         rte_pktmbuf_free(ev.mbuf);
1177                         rte_atomic32_sub(total_events, 1);
1178                 } else {
1179                         ev.event_type = RTE_EVENT_TYPE_CPU;
1180                         ev.sub_event_type++;
1181                         ev.sched_type =
1182                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1183                         ev.op = RTE_EVENT_OP_FORWARD;
1184                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1185                 }
1186         }
1187         return 0;
1188 }
1189
1190 static int
1191 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1192 {
1193         uint32_t nr_ports;
1194         int ret;
1195
1196         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1197                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1198                                 &nr_ports), "Port count get failed");
1199         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1200
1201         if (!nr_ports) {
1202                 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
1203                         nr_ports, rte_lcore_count() - 1);
1204                 return 0;
1205         }
1206
1207         /* Injects events with m->seqn=0 to total_events */
1208         ret = inject_events(
1209                 0x1 /*flow_id */,
1210                 RTE_EVENT_TYPE_CPU /* event_type */,
1211                 0 /* sub_event_type (stage 0) */,
1212                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1213                 0 /* queue */,
1214                 0 /* port */,
1215                 MAX_EVENTS /* events */);
1216         if (ret)
1217                 return -1;
1218
1219         return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1220                                          0xff /* invalid */);
1221 }
1222
1223 /* Flow based pipeline with maximum stages with random sched type */
1224 static int
1225 test_multi_port_flow_max_stages_random_sched_type(void)
1226 {
1227         return launch_multi_port_max_stages_random_sched_type(
1228                 worker_flow_based_pipeline_max_stages_rand_sched_type);
1229 }
1230
1231 static int
1232 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1233 {
1234         struct test_core_param *param = arg;
1235         struct rte_event ev;
1236         uint16_t valid_event;
1237         uint8_t port = param->port;
1238         uint32_t queue_count;
1239         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1240                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1241                             &queue_count), "Queue count get failed");
1242         uint8_t nr_queues = queue_count;
1243         rte_atomic32_t *total_events = param->total_events;
1244
1245         while (rte_atomic32_read(total_events) > 0) {
1246                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1247                 if (!valid_event)
1248                         continue;
1249
1250                 if (ev.queue_id == nr_queues - 1) { /* last stage */
1251                         rte_pktmbuf_free(ev.mbuf);
1252                         rte_atomic32_sub(total_events, 1);
1253                 } else {
1254                         ev.event_type = RTE_EVENT_TYPE_CPU;
1255                         ev.queue_id++;
1256                         ev.sched_type =
1257                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1258                         ev.op = RTE_EVENT_OP_FORWARD;
1259                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1260                 }
1261         }
1262         return 0;
1263 }
1264
1265 /* Queue based pipeline with maximum stages with random sched type */
1266 static int
1267 test_multi_port_queue_max_stages_random_sched_type(void)
1268 {
1269         return launch_multi_port_max_stages_random_sched_type(
1270                 worker_queue_based_pipeline_max_stages_rand_sched_type);
1271 }
1272
1273 static int
1274 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1275 {
1276         struct test_core_param *param = arg;
1277         struct rte_event ev;
1278         uint16_t valid_event;
1279         uint8_t port = param->port;
1280         uint32_t queue_count;
1281         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1282                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1283                             &queue_count), "Queue count get failed");
1284         uint8_t nr_queues = queue_count;
1285         rte_atomic32_t *total_events = param->total_events;
1286
1287         while (rte_atomic32_read(total_events) > 0) {
1288                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1289                 if (!valid_event)
1290                         continue;
1291
1292                 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1293                         rte_pktmbuf_free(ev.mbuf);
1294                         rte_atomic32_sub(total_events, 1);
1295                 } else {
1296                         ev.event_type = RTE_EVENT_TYPE_CPU;
1297                         ev.queue_id++;
1298                         ev.sub_event_type = rte_rand() % 256;
1299                         ev.sched_type =
1300                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1301                         ev.op = RTE_EVENT_OP_FORWARD;
1302                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1303                 }
1304         }
1305         return 0;
1306 }
1307
1308 /* Queue and flow based pipeline with maximum stages with random sched type */
1309 static int
1310 test_multi_port_mixed_max_stages_random_sched_type(void)
1311 {
1312         return launch_multi_port_max_stages_random_sched_type(
1313                 worker_mixed_pipeline_max_stages_rand_sched_type);
1314 }
1315
1316 static int
1317 worker_ordered_flow_producer(void *arg)
1318 {
1319         struct test_core_param *param = arg;
1320         uint8_t port = param->port;
1321         struct rte_mbuf *m;
1322         int counter = 0;
1323
1324         while (counter < NUM_PACKETS) {
1325                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1326                 if (m == NULL)
1327                         continue;
1328
1329                 m->seqn = counter++;
1330
1331                 struct rte_event ev = {.event = 0, .u64 = 0};
1332
1333                 ev.flow_id = 0x1; /* Generate a fat flow */
1334                 ev.sub_event_type = 0;
1335                 /* Inject the new event */
1336                 ev.op = RTE_EVENT_OP_NEW;
1337                 ev.event_type = RTE_EVENT_TYPE_CPU;
1338                 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1339                 ev.queue_id = 0;
1340                 ev.mbuf = m;
1341                 rte_event_enqueue_burst(evdev, port, &ev, 1);
1342         }
1343
1344         return 0;
1345 }
1346
1347 static inline int
1348 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1349 {
1350         uint32_t nr_ports;
1351
1352         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1353                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1354                                 &nr_ports), "Port count get failed");
1355         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1356
1357         if (rte_lcore_count() < 3 || nr_ports < 2) {
1358                 ssovf_log_dbg("### Not enough cores for %s test.", __func__);
1359                 return 0;
1360         }
1361
1362         launch_workers_and_wait(worker_ordered_flow_producer, fn,
1363                                 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1364         /* Check the events order maintained or not */
1365         return seqn_list_check(NUM_PACKETS);
1366 }
1367
1368 /* Flow based producer consumer ingress order test */
1369 static int
1370 test_flow_producer_consumer_ingress_order_test(void)
1371 {
1372         return test_producer_consumer_ingress_order_test(
1373                                 worker_flow_based_pipeline);
1374 }
1375
1376 /* Queue based producer consumer ingress order test */
1377 static int
1378 test_queue_producer_consumer_ingress_order_test(void)
1379 {
1380         return test_producer_consumer_ingress_order_test(
1381                                 worker_group_based_pipeline);
1382 }
1383
1384 static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
1385                 int (*test)(void), const char *name)
1386 {
1387         if (setup() < 0) {
1388                 ssovf_log_selftest("Error setting up test %s", name);
1389                 unsupported++;
1390         } else {
1391                 if (test() < 0) {
1392                         failed++;
1393                         ssovf_log_selftest("%s Failed", name);
1394                 } else {
1395                         passed++;
1396                         ssovf_log_selftest("%s Passed", name);
1397                 }
1398         }
1399
1400         total++;
1401         tdown();
1402 }
1403
1404 int
1405 test_eventdev_octeontx(void)
1406 {
1407         testsuite_setup();
1408
1409         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1410                         test_simple_enqdeq_ordered);
1411         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1412                         test_simple_enqdeq_atomic);
1413         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1414                         test_simple_enqdeq_parallel);
1415         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1416                         test_multi_queue_enq_single_port_deq);
1417         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1418                         test_multi_queue_enq_multi_port_deq);
1419         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1420                         test_queue_to_port_single_link);
1421         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1422                         test_queue_to_port_multi_link);
1423         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1424                         test_multi_port_flow_ordered_to_atomic);
1425         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1426                         test_multi_port_flow_ordered_to_ordered);
1427         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1428                         test_multi_port_flow_ordered_to_parallel);
1429         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1430                         test_multi_port_flow_atomic_to_atomic);
1431         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1432                         test_multi_port_flow_atomic_to_ordered);
1433         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1434                         test_multi_port_flow_atomic_to_parallel);
1435         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1436                         test_multi_port_flow_parallel_to_atomic);
1437         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1438                         test_multi_port_flow_parallel_to_ordered);
1439         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1440                         test_multi_port_flow_parallel_to_parallel);
1441         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1442                         test_multi_port_queue_ordered_to_atomic);
1443         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1444                         test_multi_port_queue_ordered_to_ordered);
1445         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1446                         test_multi_port_queue_ordered_to_parallel);
1447         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1448                         test_multi_port_queue_atomic_to_atomic);
1449         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1450                         test_multi_port_queue_atomic_to_ordered);
1451         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1452                         test_multi_port_queue_atomic_to_parallel);
1453         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1454                         test_multi_port_queue_parallel_to_atomic);
1455         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1456                         test_multi_port_queue_parallel_to_ordered);
1457         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1458                         test_multi_port_queue_parallel_to_parallel);
1459         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1460                         test_multi_port_flow_max_stages_random_sched_type);
1461         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1462                         test_multi_port_queue_max_stages_random_sched_type);
1463         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1464                         test_multi_port_mixed_max_stages_random_sched_type);
1465         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1466                         test_flow_producer_consumer_ingress_order_test);
1467         OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1468                         test_queue_producer_consumer_ingress_order_test);
1469         OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1470                         test_multi_queue_priority);
1471         OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1472                         test_multi_port_flow_ordered_to_atomic);
1473         OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1474                         test_multi_port_queue_ordered_to_atomic);
1475
1476         ssovf_log_selftest("Total tests   : %d", total);
1477         ssovf_log_selftest("Passed        : %d", passed);
1478         ssovf_log_selftest("Failed        : %d", failed);
1479         ssovf_log_selftest("Not supported : %d", unsupported);
1480
1481         testsuite_teardown();
1482
1483         if (failed)
1484                 return -1;
1485
1486         return 0;
1487 }