New upstream version 18.08
[deb_dpdk.git] / drivers / event / sw / sw_evdev_selftest.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/queue.h>
11
12 #include <rte_memory.h>
13 #include <rte_launch.h>
14 #include <rte_eal.h>
15 #include <rte_per_lcore.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_ethdev.h>
19 #include <rte_cycles.h>
20 #include <rte_eventdev.h>
21 #include <rte_pause.h>
22 #include <rte_service.h>
23 #include <rte_service_component.h>
24 #include <rte_bus_vdev.h>
25
26 #include "sw_evdev.h"
27
28 #define MAX_PORTS 16
29 #define MAX_QIDS 16
30 #define NUM_PACKETS (1<<18)
31 #define DEQUEUE_DEPTH 128
32
33 static int evdev;
34
35 struct test {
36         struct rte_mempool *mbuf_pool;
37         uint8_t port[MAX_PORTS];
38         uint8_t qid[MAX_QIDS];
39         int nb_qids;
40         uint32_t service_id;
41 };
42
43 static struct rte_event release_ev;
44
45 static inline struct rte_mbuf *
46 rte_gen_arp(int portid, struct rte_mempool *mp)
47 {
48         /*
49          * len = 14 + 46
50          * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
51          */
52         static const uint8_t arp_request[] = {
53                 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
54                 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
55                 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
56                 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
57                 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
58                 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
59                 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
60                 0x00, 0x00, 0x00, 0x00
61         };
62         struct rte_mbuf *m;
63         int pkt_len = sizeof(arp_request) - 1;
64
65         m = rte_pktmbuf_alloc(mp);
66         if (!m)
67                 return 0;
68
69         memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
70                 arp_request, pkt_len);
71         rte_pktmbuf_pkt_len(m) = pkt_len;
72         rte_pktmbuf_data_len(m) = pkt_len;
73
74         RTE_SET_USED(portid);
75
76         return m;
77 }
78
79 static void
80 xstats_print(void)
81 {
82         const uint32_t XSTATS_MAX = 1024;
83         uint32_t i;
84         uint32_t ids[XSTATS_MAX];
85         uint64_t values[XSTATS_MAX];
86         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
87
88         for (i = 0; i < XSTATS_MAX; i++)
89                 ids[i] = i;
90
91         /* Device names / values */
92         int ret = rte_event_dev_xstats_names_get(evdev,
93                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
94                                         xstats_names, ids, XSTATS_MAX);
95         if (ret < 0) {
96                 printf("%d: xstats names get() returned error\n",
97                         __LINE__);
98                 return;
99         }
100         ret = rte_event_dev_xstats_get(evdev,
101                                         RTE_EVENT_DEV_XSTATS_DEVICE,
102                                         0, ids, values, ret);
103         if (ret > (signed int)XSTATS_MAX)
104                 printf("%s %d: more xstats available than space\n",
105                                 __func__, __LINE__);
106         for (i = 0; (signed int)i < ret; i++) {
107                 printf("%d : %s : %"PRIu64"\n",
108                                 i, xstats_names[i].name, values[i]);
109         }
110
111         /* Port names / values */
112         ret = rte_event_dev_xstats_names_get(evdev,
113                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
114                                         xstats_names, ids, XSTATS_MAX);
115         ret = rte_event_dev_xstats_get(evdev,
116                                         RTE_EVENT_DEV_XSTATS_PORT, 1,
117                                         ids, values, ret);
118         if (ret > (signed int)XSTATS_MAX)
119                 printf("%s %d: more xstats available than space\n",
120                                 __func__, __LINE__);
121         for (i = 0; (signed int)i < ret; i++) {
122                 printf("%d : %s : %"PRIu64"\n",
123                                 i, xstats_names[i].name, values[i]);
124         }
125
126         /* Queue names / values */
127         ret = rte_event_dev_xstats_names_get(evdev,
128                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
129                                         xstats_names, ids, XSTATS_MAX);
130         ret = rte_event_dev_xstats_get(evdev,
131                                         RTE_EVENT_DEV_XSTATS_QUEUE,
132                                         1, ids, values, ret);
133         if (ret > (signed int)XSTATS_MAX)
134                 printf("%s %d: more xstats available than space\n",
135                                 __func__, __LINE__);
136         for (i = 0; (signed int)i < ret; i++) {
137                 printf("%d : %s : %"PRIu64"\n",
138                                 i, xstats_names[i].name, values[i]);
139         }
140 }
141
142 /* initialization and config */
143 static inline int
144 init(struct test *t, int nb_queues, int nb_ports)
145 {
146         struct rte_event_dev_config config = {
147                         .nb_event_queues = nb_queues,
148                         .nb_event_ports = nb_ports,
149                         .nb_event_queue_flows = 1024,
150                         .nb_events_limit = 4096,
151                         .nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
152                         .nb_event_port_enqueue_depth = 128,
153         };
154         int ret;
155
156         void *temp = t->mbuf_pool; /* save and restore mbuf pool */
157
158         memset(t, 0, sizeof(*t));
159         t->mbuf_pool = temp;
160
161         ret = rte_event_dev_configure(evdev, &config);
162         if (ret < 0)
163                 printf("%d: Error configuring device\n", __LINE__);
164         return ret;
165 };
166
167 static inline int
168 create_ports(struct test *t, int num_ports)
169 {
170         int i;
171         static const struct rte_event_port_conf conf = {
172                         .new_event_threshold = 1024,
173                         .dequeue_depth = 32,
174                         .enqueue_depth = 64,
175                         .disable_implicit_release = 0,
176         };
177         if (num_ports > MAX_PORTS)
178                 return -1;
179
180         for (i = 0; i < num_ports; i++) {
181                 if (rte_event_port_setup(evdev, i, &conf) < 0) {
182                         printf("Error setting up port %d\n", i);
183                         return -1;
184                 }
185                 t->port[i] = i;
186         }
187
188         return 0;
189 }
190
191 static inline int
192 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
193 {
194         int i;
195
196         /* Q creation */
197         const struct rte_event_queue_conf conf = {
198                         .schedule_type = flags,
199                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
200                         .nb_atomic_flows = 1024,
201                         .nb_atomic_order_sequences = 1024,
202         };
203
204         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
205                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
206                         printf("%d: error creating qid %d\n", __LINE__, i);
207                         return -1;
208                 }
209                 t->qid[i] = i;
210         }
211         t->nb_qids += num_qids;
212         if (t->nb_qids > MAX_QIDS)
213                 return -1;
214
215         return 0;
216 }
217
218 static inline int
219 create_atomic_qids(struct test *t, int num_qids)
220 {
221         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
222 }
223
224 static inline int
225 create_ordered_qids(struct test *t, int num_qids)
226 {
227         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
228 }
229
230
231 static inline int
232 create_unordered_qids(struct test *t, int num_qids)
233 {
234         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
235 }
236
237 static inline int
238 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
239 {
240         int i;
241
242         /* Q creation */
243         static const struct rte_event_queue_conf conf = {
244                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
245                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
246         };
247
248         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
249                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
250                         printf("%d: error creating qid %d\n", __LINE__, i);
251                         return -1;
252                 }
253                 t->qid[i] = i;
254
255                 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
256                                 &t->qid[i], NULL, 1) != 1) {
257                         printf("%d: error creating link for qid %d\n",
258                                         __LINE__, i);
259                         return -1;
260                 }
261         }
262         t->nb_qids += num_qids;
263         if (t->nb_qids > MAX_QIDS)
264                 return -1;
265
266         return 0;
267 }
268
269 /* destruction */
270 static inline int
271 cleanup(struct test *t __rte_unused)
272 {
273         rte_event_dev_stop(evdev);
274         rte_event_dev_close(evdev);
275         return 0;
276 };
277
278 struct test_event_dev_stats {
279         uint64_t rx_pkts;       /**< Total packets received */
280         uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
281         uint64_t tx_pkts;       /**< Total packets transmitted */
282
283         /** Packets received on this port */
284         uint64_t port_rx_pkts[MAX_PORTS];
285         /** Packets dropped on this port */
286         uint64_t port_rx_dropped[MAX_PORTS];
287         /** Packets inflight on this port */
288         uint64_t port_inflight[MAX_PORTS];
289         /** Packets transmitted on this port */
290         uint64_t port_tx_pkts[MAX_PORTS];
291         /** Packets received on this qid */
292         uint64_t qid_rx_pkts[MAX_QIDS];
293         /** Packets dropped on this qid */
294         uint64_t qid_rx_dropped[MAX_QIDS];
295         /** Packets transmitted on this qid */
296         uint64_t qid_tx_pkts[MAX_QIDS];
297 };
298
299 static inline int
300 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
301 {
302         static uint32_t i;
303         static uint32_t total_ids[3]; /* rx, tx and drop */
304         static uint32_t port_rx_pkts_ids[MAX_PORTS];
305         static uint32_t port_rx_dropped_ids[MAX_PORTS];
306         static uint32_t port_inflight_ids[MAX_PORTS];
307         static uint32_t port_tx_pkts_ids[MAX_PORTS];
308         static uint32_t qid_rx_pkts_ids[MAX_QIDS];
309         static uint32_t qid_rx_dropped_ids[MAX_QIDS];
310         static uint32_t qid_tx_pkts_ids[MAX_QIDS];
311
312
313         stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
314                         "dev_rx", &total_ids[0]);
315         stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
316                         "dev_drop", &total_ids[1]);
317         stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
318                         "dev_tx", &total_ids[2]);
319         for (i = 0; i < MAX_PORTS; i++) {
320                 char name[32];
321                 snprintf(name, sizeof(name), "port_%u_rx", i);
322                 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
323                                 dev_id, name, &port_rx_pkts_ids[i]);
324                 snprintf(name, sizeof(name), "port_%u_drop", i);
325                 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
326                                 dev_id, name, &port_rx_dropped_ids[i]);
327                 snprintf(name, sizeof(name), "port_%u_inflight", i);
328                 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
329                                 dev_id, name, &port_inflight_ids[i]);
330                 snprintf(name, sizeof(name), "port_%u_tx", i);
331                 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
332                                 dev_id, name, &port_tx_pkts_ids[i]);
333         }
334         for (i = 0; i < MAX_QIDS; i++) {
335                 char name[32];
336                 snprintf(name, sizeof(name), "qid_%u_rx", i);
337                 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
338                                 dev_id, name, &qid_rx_pkts_ids[i]);
339                 snprintf(name, sizeof(name), "qid_%u_drop", i);
340                 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
341                                 dev_id, name, &qid_rx_dropped_ids[i]);
342                 snprintf(name, sizeof(name), "qid_%u_tx", i);
343                 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
344                                 dev_id, name, &qid_tx_pkts_ids[i]);
345         }
346
347         return 0;
348 }
349
350 /* run_prio_packet_test
351  * This performs a basic packet priority check on the test instance passed in.
352  * It is factored out of the main priority tests as the same tests must be
353  * performed to ensure prioritization of each type of QID.
354  *
355  * Requirements:
356  *  - An initialized test structure, including mempool
357  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
358  *  - t->qid[0] is the QID to be tested
359  *  - if LB QID, the CQ must be mapped to the QID.
360  */
361 static int
362 run_prio_packet_test(struct test *t)
363 {
364         int err;
365         const uint32_t MAGIC_SEQN[] = {4711, 1234};
366         const uint32_t PRIORITY[] = {
367                 RTE_EVENT_DEV_PRIORITY_NORMAL,
368                 RTE_EVENT_DEV_PRIORITY_HIGHEST
369         };
370         unsigned int i;
371         for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
372                 /* generate pkt and enqueue */
373                 struct rte_event ev;
374                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
375                 if (!arp) {
376                         printf("%d: gen of pkt failed\n", __LINE__);
377                         return -1;
378                 }
379                 arp->seqn = MAGIC_SEQN[i];
380
381                 ev = (struct rte_event){
382                         .priority = PRIORITY[i],
383                         .op = RTE_EVENT_OP_NEW,
384                         .queue_id = t->qid[0],
385                         .mbuf = arp
386                 };
387                 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
388                 if (err < 0) {
389                         printf("%d: error failed to enqueue\n", __LINE__);
390                         return -1;
391                 }
392         }
393
394         rte_service_run_iter_on_app_lcore(t->service_id, 1);
395
396         struct test_event_dev_stats stats;
397         err = test_event_dev_stats_get(evdev, &stats);
398         if (err) {
399                 printf("%d: error failed to get stats\n", __LINE__);
400                 return -1;
401         }
402
403         if (stats.port_rx_pkts[t->port[0]] != 2) {
404                 printf("%d: error stats incorrect for directed port\n",
405                                 __LINE__);
406                 rte_event_dev_dump(evdev, stdout);
407                 return -1;
408         }
409
410         struct rte_event ev, ev2;
411         uint32_t deq_pkts;
412         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
413         if (deq_pkts != 1) {
414                 printf("%d: error failed to deq\n", __LINE__);
415                 rte_event_dev_dump(evdev, stdout);
416                 return -1;
417         }
418         if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
419                 printf("%d: first packet out not highest priority\n",
420                                 __LINE__);
421                 rte_event_dev_dump(evdev, stdout);
422                 return -1;
423         }
424         rte_pktmbuf_free(ev.mbuf);
425
426         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
427         if (deq_pkts != 1) {
428                 printf("%d: error failed to deq\n", __LINE__);
429                 rte_event_dev_dump(evdev, stdout);
430                 return -1;
431         }
432         if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
433                 printf("%d: second packet out not lower priority\n",
434                                 __LINE__);
435                 rte_event_dev_dump(evdev, stdout);
436                 return -1;
437         }
438         rte_pktmbuf_free(ev2.mbuf);
439
440         cleanup(t);
441         return 0;
442 }
443
444 static int
445 test_single_directed_packet(struct test *t)
446 {
447         const int rx_enq = 0;
448         const int wrk_enq = 2;
449         int err;
450
451         /* Create instance with 3 directed QIDs going to 3 ports */
452         if (init(t, 3, 3) < 0 ||
453                         create_ports(t, 3) < 0 ||
454                         create_directed_qids(t, 3, t->port) < 0)
455                 return -1;
456
457         if (rte_event_dev_start(evdev) < 0) {
458                 printf("%d: Error with start call\n", __LINE__);
459                 return -1;
460         }
461
462         /************** FORWARD ****************/
463         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
464         struct rte_event ev = {
465                         .op = RTE_EVENT_OP_NEW,
466                         .queue_id = wrk_enq,
467                         .mbuf = arp,
468         };
469
470         if (!arp) {
471                 printf("%d: gen of pkt failed\n", __LINE__);
472                 return -1;
473         }
474
475         const uint32_t MAGIC_SEQN = 4711;
476         arp->seqn = MAGIC_SEQN;
477
478         /* generate pkt and enqueue */
479         err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
480         if (err < 0) {
481                 printf("%d: error failed to enqueue\n", __LINE__);
482                 return -1;
483         }
484
485         /* Run schedule() as dir packets may need to be re-ordered */
486         rte_service_run_iter_on_app_lcore(t->service_id, 1);
487
488         struct test_event_dev_stats stats;
489         err = test_event_dev_stats_get(evdev, &stats);
490         if (err) {
491                 printf("%d: error failed to get stats\n", __LINE__);
492                 return -1;
493         }
494
495         if (stats.port_rx_pkts[rx_enq] != 1) {
496                 printf("%d: error stats incorrect for directed port\n",
497                                 __LINE__);
498                 return -1;
499         }
500
501         uint32_t deq_pkts;
502         deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
503         if (deq_pkts != 1) {
504                 printf("%d: error failed to deq\n", __LINE__);
505                 return -1;
506         }
507
508         err = test_event_dev_stats_get(evdev, &stats);
509         if (stats.port_rx_pkts[wrk_enq] != 0 &&
510                         stats.port_rx_pkts[wrk_enq] != 1) {
511                 printf("%d: error directed stats post-dequeue\n", __LINE__);
512                 return -1;
513         }
514
515         if (ev.mbuf->seqn != MAGIC_SEQN) {
516                 printf("%d: error magic sequence number not dequeued\n",
517                                 __LINE__);
518                 return -1;
519         }
520
521         rte_pktmbuf_free(ev.mbuf);
522         cleanup(t);
523         return 0;
524 }
525
526 static int
527 test_directed_forward_credits(struct test *t)
528 {
529         uint32_t i;
530         int32_t err;
531
532         if (init(t, 1, 1) < 0 ||
533                         create_ports(t, 1) < 0 ||
534                         create_directed_qids(t, 1, t->port) < 0)
535                 return -1;
536
537         if (rte_event_dev_start(evdev) < 0) {
538                 printf("%d: Error with start call\n", __LINE__);
539                 return -1;
540         }
541
542         struct rte_event ev = {
543                         .op = RTE_EVENT_OP_NEW,
544                         .queue_id = 0,
545         };
546
547         for (i = 0; i < 1000; i++) {
548                 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
549                 if (err < 0) {
550                         printf("%d: error failed to enqueue\n", __LINE__);
551                         return -1;
552                 }
553                 rte_service_run_iter_on_app_lcore(t->service_id, 1);
554
555                 uint32_t deq_pkts;
556                 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
557                 if (deq_pkts != 1) {
558                         printf("%d: error failed to deq\n", __LINE__);
559                         return -1;
560                 }
561
562                 /* re-write event to be a forward, and continue looping it */
563                 ev.op = RTE_EVENT_OP_FORWARD;
564         }
565
566         cleanup(t);
567         return 0;
568 }
569
570
571 static int
572 test_priority_directed(struct test *t)
573 {
574         if (init(t, 1, 1) < 0 ||
575                         create_ports(t, 1) < 0 ||
576                         create_directed_qids(t, 1, t->port) < 0) {
577                 printf("%d: Error initializing device\n", __LINE__);
578                 return -1;
579         }
580
581         if (rte_event_dev_start(evdev) < 0) {
582                 printf("%d: Error with start call\n", __LINE__);
583                 return -1;
584         }
585
586         return run_prio_packet_test(t);
587 }
588
589 static int
590 test_priority_atomic(struct test *t)
591 {
592         if (init(t, 1, 1) < 0 ||
593                         create_ports(t, 1) < 0 ||
594                         create_atomic_qids(t, 1) < 0) {
595                 printf("%d: Error initializing device\n", __LINE__);
596                 return -1;
597         }
598
599         /* map the QID */
600         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
601                 printf("%d: error mapping qid to port\n", __LINE__);
602                 return -1;
603         }
604         if (rte_event_dev_start(evdev) < 0) {
605                 printf("%d: Error with start call\n", __LINE__);
606                 return -1;
607         }
608
609         return run_prio_packet_test(t);
610 }
611
612 static int
613 test_priority_ordered(struct test *t)
614 {
615         if (init(t, 1, 1) < 0 ||
616                         create_ports(t, 1) < 0 ||
617                         create_ordered_qids(t, 1) < 0) {
618                 printf("%d: Error initializing device\n", __LINE__);
619                 return -1;
620         }
621
622         /* map the QID */
623         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
624                 printf("%d: error mapping qid to port\n", __LINE__);
625                 return -1;
626         }
627         if (rte_event_dev_start(evdev) < 0) {
628                 printf("%d: Error with start call\n", __LINE__);
629                 return -1;
630         }
631
632         return run_prio_packet_test(t);
633 }
634
635 static int
636 test_priority_unordered(struct test *t)
637 {
638         if (init(t, 1, 1) < 0 ||
639                         create_ports(t, 1) < 0 ||
640                         create_unordered_qids(t, 1) < 0) {
641                 printf("%d: Error initializing device\n", __LINE__);
642                 return -1;
643         }
644
645         /* map the QID */
646         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
647                 printf("%d: error mapping qid to port\n", __LINE__);
648                 return -1;
649         }
650         if (rte_event_dev_start(evdev) < 0) {
651                 printf("%d: Error with start call\n", __LINE__);
652                 return -1;
653         }
654
655         return run_prio_packet_test(t);
656 }
657
658 static int
659 burst_packets(struct test *t)
660 {
661         /************** CONFIG ****************/
662         uint32_t i;
663         int err;
664         int ret;
665
666         /* Create instance with 2 ports and 2 queues */
667         if (init(t, 2, 2) < 0 ||
668                         create_ports(t, 2) < 0 ||
669                         create_atomic_qids(t, 2) < 0) {
670                 printf("%d: Error initializing device\n", __LINE__);
671                 return -1;
672         }
673
674         /* CQ mapping to QID */
675         ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
676         if (ret != 1) {
677                 printf("%d: error mapping lb qid0\n", __LINE__);
678                 return -1;
679         }
680         ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
681         if (ret != 1) {
682                 printf("%d: error mapping lb qid1\n", __LINE__);
683                 return -1;
684         }
685
686         if (rte_event_dev_start(evdev) < 0) {
687                 printf("%d: Error with start call\n", __LINE__);
688                 return -1;
689         }
690
691         /************** FORWARD ****************/
692         const uint32_t rx_port = 0;
693         const uint32_t NUM_PKTS = 2;
694
695         for (i = 0; i < NUM_PKTS; i++) {
696                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
697                 if (!arp) {
698                         printf("%d: error generating pkt\n", __LINE__);
699                         return -1;
700                 }
701
702                 struct rte_event ev = {
703                                 .op = RTE_EVENT_OP_NEW,
704                                 .queue_id = i % 2,
705                                 .flow_id = i % 3,
706                                 .mbuf = arp,
707                 };
708                 /* generate pkt and enqueue */
709                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
710                 if (err < 0) {
711                         printf("%d: Failed to enqueue\n", __LINE__);
712                         return -1;
713                 }
714         }
715         rte_service_run_iter_on_app_lcore(t->service_id, 1);
716
717         /* Check stats for all NUM_PKTS arrived to sched core */
718         struct test_event_dev_stats stats;
719
720         err = test_event_dev_stats_get(evdev, &stats);
721         if (err) {
722                 printf("%d: failed to get stats\n", __LINE__);
723                 return -1;
724         }
725         if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
726                 printf("%d: Sched core didn't receive all %d pkts\n",
727                                 __LINE__, NUM_PKTS);
728                 rte_event_dev_dump(evdev, stdout);
729                 return -1;
730         }
731
732         uint32_t deq_pkts;
733         int p;
734
735         deq_pkts = 0;
736         /******** DEQ QID 1 *******/
737         do {
738                 struct rte_event ev;
739                 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
740                 deq_pkts += p;
741                 rte_pktmbuf_free(ev.mbuf);
742         } while (p);
743
744         if (deq_pkts != NUM_PKTS/2) {
745                 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
746                                 __LINE__);
747                 return -1;
748         }
749
750         /******** DEQ QID 2 *******/
751         deq_pkts = 0;
752         do {
753                 struct rte_event ev;
754                 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
755                 deq_pkts += p;
756                 rte_pktmbuf_free(ev.mbuf);
757         } while (p);
758         if (deq_pkts != NUM_PKTS/2) {
759                 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
760                                 __LINE__);
761                 return -1;
762         }
763
764         cleanup(t);
765         return 0;
766 }
767
768 static int
769 abuse_inflights(struct test *t)
770 {
771         const int rx_enq = 0;
772         const int wrk_enq = 2;
773         int err;
774
775         /* Create instance with 4 ports */
776         if (init(t, 1, 4) < 0 ||
777                         create_ports(t, 4) < 0 ||
778                         create_atomic_qids(t, 1) < 0) {
779                 printf("%d: Error initializing device\n", __LINE__);
780                 return -1;
781         }
782
783         /* CQ mapping to QID */
784         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
785         if (err != 1) {
786                 printf("%d: error mapping lb qid\n", __LINE__);
787                 cleanup(t);
788                 return -1;
789         }
790
791         if (rte_event_dev_start(evdev) < 0) {
792                 printf("%d: Error with start call\n", __LINE__);
793                 return -1;
794         }
795
796         /* Enqueue op only */
797         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
798         if (err < 0) {
799                 printf("%d: Failed to enqueue\n", __LINE__);
800                 return -1;
801         }
802
803         /* schedule */
804         rte_service_run_iter_on_app_lcore(t->service_id, 1);
805
806         struct test_event_dev_stats stats;
807
808         err = test_event_dev_stats_get(evdev, &stats);
809         if (err) {
810                 printf("%d: failed to get stats\n", __LINE__);
811                 return -1;
812         }
813
814         if (stats.rx_pkts != 0 ||
815                         stats.tx_pkts != 0 ||
816                         stats.port_inflight[wrk_enq] != 0) {
817                 printf("%d: Sched core didn't handle pkt as expected\n",
818                                 __LINE__);
819                 return -1;
820         }
821
822         cleanup(t);
823         return 0;
824 }
825
826 static int
827 xstats_tests(struct test *t)
828 {
829         const int wrk_enq = 2;
830         int err;
831
832         /* Create instance with 4 ports */
833         if (init(t, 1, 4) < 0 ||
834                         create_ports(t, 4) < 0 ||
835                         create_atomic_qids(t, 1) < 0) {
836                 printf("%d: Error initializing device\n", __LINE__);
837                 return -1;
838         }
839
840         /* CQ mapping to QID */
841         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
842         if (err != 1) {
843                 printf("%d: error mapping lb qid\n", __LINE__);
844                 cleanup(t);
845                 return -1;
846         }
847
848         if (rte_event_dev_start(evdev) < 0) {
849                 printf("%d: Error with start call\n", __LINE__);
850                 return -1;
851         }
852
853         const uint32_t XSTATS_MAX = 1024;
854
855         uint32_t i;
856         uint32_t ids[XSTATS_MAX];
857         uint64_t values[XSTATS_MAX];
858         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
859
860         for (i = 0; i < XSTATS_MAX; i++)
861                 ids[i] = i;
862
863         /* Device names / values */
864         int ret = rte_event_dev_xstats_names_get(evdev,
865                                         RTE_EVENT_DEV_XSTATS_DEVICE,
866                                         0, xstats_names, ids, XSTATS_MAX);
867         if (ret != 6) {
868                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
869                 return -1;
870         }
871         ret = rte_event_dev_xstats_get(evdev,
872                                         RTE_EVENT_DEV_XSTATS_DEVICE,
873                                         0, ids, values, ret);
874         if (ret != 6) {
875                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
876                 return -1;
877         }
878
879         /* Port names / values */
880         ret = rte_event_dev_xstats_names_get(evdev,
881                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
882                                         xstats_names, ids, XSTATS_MAX);
883         if (ret != 21) {
884                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
885                 return -1;
886         }
887         ret = rte_event_dev_xstats_get(evdev,
888                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
889                                         ids, values, ret);
890         if (ret != 21) {
891                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
892                 return -1;
893         }
894
895         /* Queue names / values */
896         ret = rte_event_dev_xstats_names_get(evdev,
897                                         RTE_EVENT_DEV_XSTATS_QUEUE,
898                                         0, xstats_names, ids, XSTATS_MAX);
899         if (ret != 16) {
900                 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
901                 return -1;
902         }
903
904         /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
905         ret = rte_event_dev_xstats_get(evdev,
906                                         RTE_EVENT_DEV_XSTATS_QUEUE,
907                                         1, ids, values, ret);
908         if (ret != -EINVAL) {
909                 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
910                 return -1;
911         }
912
913         ret = rte_event_dev_xstats_get(evdev,
914                                         RTE_EVENT_DEV_XSTATS_QUEUE,
915                                         0, ids, values, ret);
916         if (ret != 16) {
917                 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
918                 return -1;
919         }
920
921         /* enqueue packets to check values */
922         for (i = 0; i < 3; i++) {
923                 struct rte_event ev;
924                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
925                 if (!arp) {
926                         printf("%d: gen of pkt failed\n", __LINE__);
927                         return -1;
928                 }
929                 ev.queue_id = t->qid[i];
930                 ev.op = RTE_EVENT_OP_NEW;
931                 ev.mbuf = arp;
932                 ev.flow_id = 7;
933                 arp->seqn = i;
934
935                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
936                 if (err != 1) {
937                         printf("%d: Failed to enqueue\n", __LINE__);
938                         return -1;
939                 }
940         }
941
942         rte_service_run_iter_on_app_lcore(t->service_id, 1);
943
944         /* Device names / values */
945         int num_stats = rte_event_dev_xstats_names_get(evdev,
946                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
947                                         xstats_names, ids, XSTATS_MAX);
948         if (num_stats < 0)
949                 goto fail;
950         ret = rte_event_dev_xstats_get(evdev,
951                                         RTE_EVENT_DEV_XSTATS_DEVICE,
952                                         0, ids, values, num_stats);
953         static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
954         for (i = 0; (signed int)i < ret; i++) {
955                 if (expected[i] != values[i]) {
956                         printf(
957                                 "%d Error xstat %d (id %d) %s : %"PRIu64
958                                 ", expect %"PRIu64"\n",
959                                 __LINE__, i, ids[i], xstats_names[i].name,
960                                 values[i], expected[i]);
961                         goto fail;
962                 }
963         }
964
965         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
966                                         0, NULL, 0);
967
968         /* ensure reset statistics are zero-ed */
969         static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
970         ret = rte_event_dev_xstats_get(evdev,
971                                         RTE_EVENT_DEV_XSTATS_DEVICE,
972                                         0, ids, values, num_stats);
973         for (i = 0; (signed int)i < ret; i++) {
974                 if (expected_zero[i] != values[i]) {
975                         printf(
976                                 "%d Error, xstat %d (id %d) %s : %"PRIu64
977                                 ", expect %"PRIu64"\n",
978                                 __LINE__, i, ids[i], xstats_names[i].name,
979                                 values[i], expected_zero[i]);
980                         goto fail;
981                 }
982         }
983
984         /* port reset checks */
985         num_stats = rte_event_dev_xstats_names_get(evdev,
986                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
987                                         xstats_names, ids, XSTATS_MAX);
988         if (num_stats < 0)
989                 goto fail;
990         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
991                                         0, ids, values, num_stats);
992
993         static const uint64_t port_expected[] = {
994                 3 /* rx */,
995                 0 /* tx */,
996                 0 /* drop */,
997                 0 /* inflights */,
998                 0 /* avg pkt cycles */,
999                 29 /* credits */,
1000                 0 /* rx ring used */,
1001                 4096 /* rx ring free */,
1002                 0 /* cq ring used */,
1003                 32 /* cq ring free */,
1004                 0 /* dequeue calls */,
1005                 /* 10 dequeue burst buckets */
1006                 0, 0, 0, 0, 0,
1007                 0, 0, 0, 0, 0,
1008         };
1009         if (ret != RTE_DIM(port_expected)) {
1010                 printf(
1011                         "%s %d: wrong number of port stats (%d), expected %zu\n",
1012                         __func__, __LINE__, ret, RTE_DIM(port_expected));
1013         }
1014
1015         for (i = 0; (signed int)i < ret; i++) {
1016                 if (port_expected[i] != values[i]) {
1017                         printf(
1018                                 "%s : %d: Error stat %s is %"PRIu64
1019                                 ", expected %"PRIu64"\n",
1020                                 __func__, __LINE__, xstats_names[i].name,
1021                                 values[i], port_expected[i]);
1022                         goto fail;
1023                 }
1024         }
1025
1026         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1027                                         0, NULL, 0);
1028
1029         /* ensure reset statistics are zero-ed */
1030         static const uint64_t port_expected_zero[] = {
1031                 0 /* rx */,
1032                 0 /* tx */,
1033                 0 /* drop */,
1034                 0 /* inflights */,
1035                 0 /* avg pkt cycles */,
1036                 29 /* credits */,
1037                 0 /* rx ring used */,
1038                 4096 /* rx ring free */,
1039                 0 /* cq ring used */,
1040                 32 /* cq ring free */,
1041                 0 /* dequeue calls */,
1042                 /* 10 dequeue burst buckets */
1043                 0, 0, 0, 0, 0,
1044                 0, 0, 0, 0, 0,
1045         };
1046         ret = rte_event_dev_xstats_get(evdev,
1047                                         RTE_EVENT_DEV_XSTATS_PORT,
1048                                         0, ids, values, num_stats);
1049         for (i = 0; (signed int)i < ret; i++) {
1050                 if (port_expected_zero[i] != values[i]) {
1051                         printf(
1052                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1053                                 ", expect %"PRIu64"\n",
1054                                 __LINE__, i, ids[i], xstats_names[i].name,
1055                                 values[i], port_expected_zero[i]);
1056                         goto fail;
1057                 }
1058         }
1059
1060         /* QUEUE STATS TESTS */
1061         num_stats = rte_event_dev_xstats_names_get(evdev,
1062                                                 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1063                                                 xstats_names, ids, XSTATS_MAX);
1064         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1065                                         0, ids, values, num_stats);
1066         if (ret < 0) {
1067                 printf("xstats get returned %d\n", ret);
1068                 goto fail;
1069         }
1070         if ((unsigned int)ret > XSTATS_MAX)
1071                 printf("%s %d: more xstats available than space\n",
1072                                 __func__, __LINE__);
1073
1074         static const uint64_t queue_expected[] = {
1075                 3 /* rx */,
1076                 3 /* tx */,
1077                 0 /* drop */,
1078                 3 /* inflights */,
1079                 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1080                 /* QID-to-Port: pinned_flows, packets */
1081                 0, 0,
1082                 0, 0,
1083                 1, 3,
1084                 0, 0,
1085         };
1086         for (i = 0; (signed int)i < ret; i++) {
1087                 if (queue_expected[i] != values[i]) {
1088                         printf(
1089                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1090                                 ", expect %"PRIu64"\n",
1091                                 __LINE__, i, ids[i], xstats_names[i].name,
1092                                 values[i], queue_expected[i]);
1093                         goto fail;
1094                 }
1095         }
1096
1097         /* Reset the queue stats here */
1098         ret = rte_event_dev_xstats_reset(evdev,
1099                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1100                                         NULL,
1101                                         0);
1102
1103         /* Verify that the resetable stats are reset, and others are not */
1104         static const uint64_t queue_expected_zero[] = {
1105                 0 /* rx */,
1106                 0 /* tx */,
1107                 0 /* drop */,
1108                 3 /* inflight */,
1109                 0, 0, 0, 0, /* 4 iq used */
1110                 /* QID-to-Port: pinned_flows, packets */
1111                 0, 0,
1112                 0, 0,
1113                 1, 0,
1114                 0, 0,
1115         };
1116
1117         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1118                                         ids, values, num_stats);
1119         int fails = 0;
1120         for (i = 0; (signed int)i < ret; i++) {
1121                 if (queue_expected_zero[i] != values[i]) {
1122                         printf(
1123                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1124                                 ", expect %"PRIu64"\n",
1125                                 __LINE__, i, ids[i], xstats_names[i].name,
1126                                 values[i], queue_expected_zero[i]);
1127                         fails++;
1128                 }
1129         }
1130         if (fails) {
1131                 printf("%d : %d of values were not as expected above\n",
1132                                 __LINE__, fails);
1133                 goto fail;
1134         }
1135
1136         cleanup(t);
1137         return 0;
1138
1139 fail:
1140         rte_event_dev_dump(0, stdout);
1141         cleanup(t);
1142         return -1;
1143 }
1144
1145
1146 static int
1147 xstats_id_abuse_tests(struct test *t)
1148 {
1149         int err;
1150         const uint32_t XSTATS_MAX = 1024;
1151         const uint32_t link_port = 2;
1152
1153         uint32_t ids[XSTATS_MAX];
1154         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1155
1156         /* Create instance with 4 ports */
1157         if (init(t, 1, 4) < 0 ||
1158                         create_ports(t, 4) < 0 ||
1159                         create_atomic_qids(t, 1) < 0) {
1160                 printf("%d: Error initializing device\n", __LINE__);
1161                 goto fail;
1162         }
1163
1164         err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1165         if (err != 1) {
1166                 printf("%d: error mapping lb qid\n", __LINE__);
1167                 goto fail;
1168         }
1169
1170         if (rte_event_dev_start(evdev) < 0) {
1171                 printf("%d: Error with start call\n", __LINE__);
1172                 goto fail;
1173         }
1174
1175         /* no test for device, as it ignores the port/q number */
1176         int num_stats = rte_event_dev_xstats_names_get(evdev,
1177                                         RTE_EVENT_DEV_XSTATS_PORT,
1178                                         UINT8_MAX-1, xstats_names, ids,
1179                                         XSTATS_MAX);
1180         if (num_stats != 0) {
1181                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1182                                 0, num_stats);
1183                 goto fail;
1184         }
1185
1186         num_stats = rte_event_dev_xstats_names_get(evdev,
1187                                         RTE_EVENT_DEV_XSTATS_QUEUE,
1188                                         UINT8_MAX-1, xstats_names, ids,
1189                                         XSTATS_MAX);
1190         if (num_stats != 0) {
1191                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1192                                 0, num_stats);
1193                 goto fail;
1194         }
1195
1196         cleanup(t);
1197         return 0;
1198 fail:
1199         cleanup(t);
1200         return -1;
1201 }
1202
1203 static int
1204 port_reconfig_credits(struct test *t)
1205 {
1206         if (init(t, 1, 1) < 0) {
1207                 printf("%d: Error initializing device\n", __LINE__);
1208                 return -1;
1209         }
1210
1211         uint32_t i;
1212         const uint32_t NUM_ITERS = 32;
1213         for (i = 0; i < NUM_ITERS; i++) {
1214                 const struct rte_event_queue_conf conf = {
1215                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1216                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1217                         .nb_atomic_flows = 1024,
1218                         .nb_atomic_order_sequences = 1024,
1219                 };
1220                 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1221                         printf("%d: error creating qid\n", __LINE__);
1222                         return -1;
1223                 }
1224                 t->qid[0] = 0;
1225
1226                 static const struct rte_event_port_conf port_conf = {
1227                                 .new_event_threshold = 128,
1228                                 .dequeue_depth = 32,
1229                                 .enqueue_depth = 64,
1230                                 .disable_implicit_release = 0,
1231                 };
1232                 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1233                         printf("%d Error setting up port\n", __LINE__);
1234                         return -1;
1235                 }
1236
1237                 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1238                 if (links != 1) {
1239                         printf("%d: error mapping lb qid\n", __LINE__);
1240                         goto fail;
1241                 }
1242
1243                 if (rte_event_dev_start(evdev) < 0) {
1244                         printf("%d: Error with start call\n", __LINE__);
1245                         goto fail;
1246                 }
1247
1248                 const uint32_t NPKTS = 1;
1249                 uint32_t j;
1250                 for (j = 0; j < NPKTS; j++) {
1251                         struct rte_event ev;
1252                         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1253                         if (!arp) {
1254                                 printf("%d: gen of pkt failed\n", __LINE__);
1255                                 goto fail;
1256                         }
1257                         ev.queue_id = t->qid[0];
1258                         ev.op = RTE_EVENT_OP_NEW;
1259                         ev.mbuf = arp;
1260                         int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1261                         if (err != 1) {
1262                                 printf("%d: Failed to enqueue\n", __LINE__);
1263                                 rte_event_dev_dump(0, stdout);
1264                                 goto fail;
1265                         }
1266                 }
1267
1268                 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1269
1270                 struct rte_event ev[NPKTS];
1271                 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1272                                                         NPKTS, 0);
1273                 if (deq != 1)
1274                         printf("%d error; no packet dequeued\n", __LINE__);
1275
1276                 /* let cleanup below stop the device on last iter */
1277                 if (i != NUM_ITERS-1)
1278                         rte_event_dev_stop(evdev);
1279         }
1280
1281         cleanup(t);
1282         return 0;
1283 fail:
1284         cleanup(t);
1285         return -1;
1286 }
1287
1288 static int
1289 port_single_lb_reconfig(struct test *t)
1290 {
1291         if (init(t, 2, 2) < 0) {
1292                 printf("%d: Error initializing device\n", __LINE__);
1293                 goto fail;
1294         }
1295
1296         static const struct rte_event_queue_conf conf_lb_atomic = {
1297                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1298                 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1299                 .nb_atomic_flows = 1024,
1300                 .nb_atomic_order_sequences = 1024,
1301         };
1302         if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1303                 printf("%d: error creating qid\n", __LINE__);
1304                 goto fail;
1305         }
1306
1307         static const struct rte_event_queue_conf conf_single_link = {
1308                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1309                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1310         };
1311         if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1312                 printf("%d: error creating qid\n", __LINE__);
1313                 goto fail;
1314         }
1315
1316         struct rte_event_port_conf port_conf = {
1317                 .new_event_threshold = 128,
1318                 .dequeue_depth = 32,
1319                 .enqueue_depth = 64,
1320                 .disable_implicit_release = 0,
1321         };
1322         if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1323                 printf("%d Error setting up port\n", __LINE__);
1324                 goto fail;
1325         }
1326         if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1327                 printf("%d Error setting up port\n", __LINE__);
1328                 goto fail;
1329         }
1330
1331         /* link port to lb queue */
1332         uint8_t queue_id = 0;
1333         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1334                 printf("%d: error creating link for qid\n", __LINE__);
1335                 goto fail;
1336         }
1337
1338         int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1339         if (ret != 1) {
1340                 printf("%d: Error unlinking lb port\n", __LINE__);
1341                 goto fail;
1342         }
1343
1344         queue_id = 1;
1345         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1346                 printf("%d: error creating link for qid\n", __LINE__);
1347                 goto fail;
1348         }
1349
1350         queue_id = 0;
1351         int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1352         if (err != 1) {
1353                 printf("%d: error mapping lb qid\n", __LINE__);
1354                 goto fail;
1355         }
1356
1357         if (rte_event_dev_start(evdev) < 0) {
1358                 printf("%d: Error with start call\n", __LINE__);
1359                 goto fail;
1360         }
1361
1362         cleanup(t);
1363         return 0;
1364 fail:
1365         cleanup(t);
1366         return -1;
1367 }
1368
1369 static int
1370 xstats_brute_force(struct test *t)
1371 {
1372         uint32_t i;
1373         const uint32_t XSTATS_MAX = 1024;
1374         uint32_t ids[XSTATS_MAX];
1375         uint64_t values[XSTATS_MAX];
1376         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1377
1378
1379         /* Create instance with 4 ports */
1380         if (init(t, 1, 4) < 0 ||
1381                         create_ports(t, 4) < 0 ||
1382                         create_atomic_qids(t, 1) < 0) {
1383                 printf("%d: Error initializing device\n", __LINE__);
1384                 return -1;
1385         }
1386
1387         int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1388         if (err != 1) {
1389                 printf("%d: error mapping lb qid\n", __LINE__);
1390                 goto fail;
1391         }
1392
1393         if (rte_event_dev_start(evdev) < 0) {
1394                 printf("%d: Error with start call\n", __LINE__);
1395                 goto fail;
1396         }
1397
1398         for (i = 0; i < XSTATS_MAX; i++)
1399                 ids[i] = i;
1400
1401         for (i = 0; i < 3; i++) {
1402                 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1403                 uint32_t j;
1404                 for (j = 0; j < UINT8_MAX; j++) {
1405                         rte_event_dev_xstats_names_get(evdev, mode,
1406                                 j, xstats_names, ids, XSTATS_MAX);
1407
1408                         rte_event_dev_xstats_get(evdev, mode, j, ids,
1409                                                  values, XSTATS_MAX);
1410                 }
1411         }
1412
1413         cleanup(t);
1414         return 0;
1415 fail:
1416         cleanup(t);
1417         return -1;
1418 }
1419
1420 static int
1421 xstats_id_reset_tests(struct test *t)
1422 {
1423         const int wrk_enq = 2;
1424         int err;
1425
1426         /* Create instance with 4 ports */
1427         if (init(t, 1, 4) < 0 ||
1428                         create_ports(t, 4) < 0 ||
1429                         create_atomic_qids(t, 1) < 0) {
1430                 printf("%d: Error initializing device\n", __LINE__);
1431                 return -1;
1432         }
1433
1434         /* CQ mapping to QID */
1435         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1436         if (err != 1) {
1437                 printf("%d: error mapping lb qid\n", __LINE__);
1438                 goto fail;
1439         }
1440
1441         if (rte_event_dev_start(evdev) < 0) {
1442                 printf("%d: Error with start call\n", __LINE__);
1443                 goto fail;
1444         }
1445
1446 #define XSTATS_MAX 1024
1447         int ret;
1448         uint32_t i;
1449         uint32_t ids[XSTATS_MAX];
1450         uint64_t values[XSTATS_MAX];
1451         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1452
1453         for (i = 0; i < XSTATS_MAX; i++)
1454                 ids[i] = i;
1455
1456 #define NUM_DEV_STATS 6
1457         /* Device names / values */
1458         int num_stats = rte_event_dev_xstats_names_get(evdev,
1459                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1460                                         0, xstats_names, ids, XSTATS_MAX);
1461         if (num_stats != NUM_DEV_STATS) {
1462                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1463                                 NUM_DEV_STATS, num_stats);
1464                 goto fail;
1465         }
1466         ret = rte_event_dev_xstats_get(evdev,
1467                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1468                                         0, ids, values, num_stats);
1469         if (ret != NUM_DEV_STATS) {
1470                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1471                                 NUM_DEV_STATS, ret);
1472                 goto fail;
1473         }
1474
1475 #define NPKTS 7
1476         for (i = 0; i < NPKTS; i++) {
1477                 struct rte_event ev;
1478                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1479                 if (!arp) {
1480                         printf("%d: gen of pkt failed\n", __LINE__);
1481                         goto fail;
1482                 }
1483                 ev.queue_id = t->qid[i];
1484                 ev.op = RTE_EVENT_OP_NEW;
1485                 ev.mbuf = arp;
1486                 arp->seqn = i;
1487
1488                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1489                 if (err != 1) {
1490                         printf("%d: Failed to enqueue\n", __LINE__);
1491                         goto fail;
1492                 }
1493         }
1494
1495         rte_service_run_iter_on_app_lcore(t->service_id, 1);
1496
1497         static const char * const dev_names[] = {
1498                 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1499                 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1500         };
1501         uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1502         for (i = 0; (int)i < ret; i++) {
1503                 unsigned int id;
1504                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1505                                                                 dev_names[i],
1506                                                                 &id);
1507                 if (id != i) {
1508                         printf("%d: %s id incorrect, expected %d got %d\n",
1509                                         __LINE__, dev_names[i], i, id);
1510                         goto fail;
1511                 }
1512                 if (val != dev_expected[i]) {
1513                         printf("%d: %s value incorrect, expected %"
1514                                 PRIu64" got %d\n", __LINE__, dev_names[i],
1515                                 dev_expected[i], id);
1516                         goto fail;
1517                 }
1518                 /* reset to zero */
1519                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1520                                                 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1521                                                 &id,
1522                                                 1);
1523                 if (reset_ret) {
1524                         printf("%d: failed to reset successfully\n", __LINE__);
1525                         goto fail;
1526                 }
1527                 dev_expected[i] = 0;
1528                 /* check value again */
1529                 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1530                 if (val != dev_expected[i]) {
1531                         printf("%d: %s value incorrect, expected %"PRIu64
1532                                 " got %"PRIu64"\n", __LINE__, dev_names[i],
1533                                 dev_expected[i], val);
1534                         goto fail;
1535                 }
1536         };
1537
1538 /* 48 is stat offset from start of the devices whole xstats.
1539  * This WILL break every time we add a statistic to a port
1540  * or the device, but there is no other way to test
1541  */
1542 #define PORT_OFF 48
1543 /* num stats for the tested port. CQ size adds more stats to a port */
1544 #define NUM_PORT_STATS 21
1545 /* the port to test. */
1546 #define PORT 2
1547         num_stats = rte_event_dev_xstats_names_get(evdev,
1548                                         RTE_EVENT_DEV_XSTATS_PORT, PORT,
1549                                         xstats_names, ids, XSTATS_MAX);
1550         if (num_stats != NUM_PORT_STATS) {
1551                 printf("%d: expected %d stats, got return %d\n",
1552                         __LINE__, NUM_PORT_STATS, num_stats);
1553                 goto fail;
1554         }
1555         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1556                                         ids, values, num_stats);
1557
1558         if (ret != NUM_PORT_STATS) {
1559                 printf("%d: expected %d stats, got return %d\n",
1560                                 __LINE__, NUM_PORT_STATS, ret);
1561                 goto fail;
1562         }
1563         static const char * const port_names[] = {
1564                 "port_2_rx",
1565                 "port_2_tx",
1566                 "port_2_drop",
1567                 "port_2_inflight",
1568                 "port_2_avg_pkt_cycles",
1569                 "port_2_credits",
1570                 "port_2_rx_ring_used",
1571                 "port_2_rx_ring_free",
1572                 "port_2_cq_ring_used",
1573                 "port_2_cq_ring_free",
1574                 "port_2_dequeue_calls",
1575                 "port_2_dequeues_returning_0",
1576                 "port_2_dequeues_returning_1-4",
1577                 "port_2_dequeues_returning_5-8",
1578                 "port_2_dequeues_returning_9-12",
1579                 "port_2_dequeues_returning_13-16",
1580                 "port_2_dequeues_returning_17-20",
1581                 "port_2_dequeues_returning_21-24",
1582                 "port_2_dequeues_returning_25-28",
1583                 "port_2_dequeues_returning_29-32",
1584                 "port_2_dequeues_returning_33-36",
1585         };
1586         uint64_t port_expected[] = {
1587                 0, /* rx */
1588                 NPKTS, /* tx */
1589                 0, /* drop */
1590                 NPKTS, /* inflight */
1591                 0, /* avg pkt cycles */
1592                 0, /* credits */
1593                 0, /* rx ring used */
1594                 4096, /* rx ring free */
1595                 NPKTS,  /* cq ring used */
1596                 25, /* cq ring free */
1597                 0, /* dequeue zero calls */
1598                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1599                 0, 0, 0, 0, 0,
1600         };
1601         uint64_t port_expected_zero[] = {
1602                 0, /* rx */
1603                 0, /* tx */
1604                 0, /* drop */
1605                 NPKTS, /* inflight */
1606                 0, /* avg pkt cycles */
1607                 0, /* credits */
1608                 0, /* rx ring used */
1609                 4096, /* rx ring free */
1610                 NPKTS,  /* cq ring used */
1611                 25, /* cq ring free */
1612                 0, /* dequeue zero calls */
1613                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1614                 0, 0, 0, 0, 0,
1615         };
1616         if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1617                         RTE_DIM(port_names) != NUM_PORT_STATS) {
1618                 printf("%d: port array of wrong size\n", __LINE__);
1619                 goto fail;
1620         }
1621
1622         int failed = 0;
1623         for (i = 0; (int)i < ret; i++) {
1624                 unsigned int id;
1625                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1626                                                                 port_names[i],
1627                                                                 &id);
1628                 if (id != i + PORT_OFF) {
1629                         printf("%d: %s id incorrect, expected %d got %d\n",
1630                                         __LINE__, port_names[i], i+PORT_OFF,
1631                                         id);
1632                         failed = 1;
1633                 }
1634                 if (val != port_expected[i]) {
1635                         printf("%d: %s value incorrect, expected %"PRIu64
1636                                 " got %d\n", __LINE__, port_names[i],
1637                                 port_expected[i], id);
1638                         failed = 1;
1639                 }
1640                 /* reset to zero */
1641                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1642                                                 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1643                                                 &id,
1644                                                 1);
1645                 if (reset_ret) {
1646                         printf("%d: failed to reset successfully\n", __LINE__);
1647                         failed = 1;
1648                 }
1649                 /* check value again */
1650                 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1651                 if (val != port_expected_zero[i]) {
1652                         printf("%d: %s value incorrect, expected %"PRIu64
1653                                 " got %"PRIu64"\n", __LINE__, port_names[i],
1654                                 port_expected_zero[i], val);
1655                         failed = 1;
1656                 }
1657         };
1658         if (failed)
1659                 goto fail;
1660
1661 /* num queue stats */
1662 #define NUM_Q_STATS 16
1663 /* queue offset from start of the devices whole xstats.
1664  * This will break every time we add a statistic to a device/port/queue
1665  */
1666 #define QUEUE_OFF 90
1667         const uint32_t queue = 0;
1668         num_stats = rte_event_dev_xstats_names_get(evdev,
1669                                         RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1670                                         xstats_names, ids, XSTATS_MAX);
1671         if (num_stats != NUM_Q_STATS) {
1672                 printf("%d: expected %d stats, got return %d\n",
1673                         __LINE__, NUM_Q_STATS, num_stats);
1674                 goto fail;
1675         }
1676         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1677                                         queue, ids, values, num_stats);
1678         if (ret != NUM_Q_STATS) {
1679                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1680                 goto fail;
1681         }
1682         static const char * const queue_names[] = {
1683                 "qid_0_rx",
1684                 "qid_0_tx",
1685                 "qid_0_drop",
1686                 "qid_0_inflight",
1687                 "qid_0_iq_0_used",
1688                 "qid_0_iq_1_used",
1689                 "qid_0_iq_2_used",
1690                 "qid_0_iq_3_used",
1691                 "qid_0_port_0_pinned_flows",
1692                 "qid_0_port_0_packets",
1693                 "qid_0_port_1_pinned_flows",
1694                 "qid_0_port_1_packets",
1695                 "qid_0_port_2_pinned_flows",
1696                 "qid_0_port_2_packets",
1697                 "qid_0_port_3_pinned_flows",
1698                 "qid_0_port_3_packets",
1699         };
1700         uint64_t queue_expected[] = {
1701                 7, /* rx */
1702                 7, /* tx */
1703                 0, /* drop */
1704                 7, /* inflight */
1705                 0, /* iq 0 used */
1706                 0, /* iq 1 used */
1707                 0, /* iq 2 used */
1708                 0, /* iq 3 used */
1709                 /* QID-to-Port: pinned_flows, packets */
1710                 0, 0,
1711                 0, 0,
1712                 1, 7,
1713                 0, 0,
1714         };
1715         uint64_t queue_expected_zero[] = {
1716                 0, /* rx */
1717                 0, /* tx */
1718                 0, /* drop */
1719                 7, /* inflight */
1720                 0, /* iq 0 used */
1721                 0, /* iq 1 used */
1722                 0, /* iq 2 used */
1723                 0, /* iq 3 used */
1724                 /* QID-to-Port: pinned_flows, packets */
1725                 0, 0,
1726                 0, 0,
1727                 1, 0,
1728                 0, 0,
1729         };
1730         if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1731                         RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1732                         RTE_DIM(queue_names) != NUM_Q_STATS) {
1733                 printf("%d : queue array of wrong size\n", __LINE__);
1734                 goto fail;
1735         }
1736
1737         failed = 0;
1738         for (i = 0; (int)i < ret; i++) {
1739                 unsigned int id;
1740                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1741                                                                 queue_names[i],
1742                                                                 &id);
1743                 if (id != i + QUEUE_OFF) {
1744                         printf("%d: %s id incorrect, expected %d got %d\n",
1745                                         __LINE__, queue_names[i], i+QUEUE_OFF,
1746                                         id);
1747                         failed = 1;
1748                 }
1749                 if (val != queue_expected[i]) {
1750                         printf("%d: %d: %s value , expected %"PRIu64
1751                                 " got %"PRIu64"\n", i, __LINE__,
1752                                 queue_names[i], queue_expected[i], val);
1753                         failed = 1;
1754                 }
1755                 /* reset to zero */
1756                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1757                                                 RTE_EVENT_DEV_XSTATS_QUEUE,
1758                                                 queue, &id, 1);
1759                 if (reset_ret) {
1760                         printf("%d: failed to reset successfully\n", __LINE__);
1761                         failed = 1;
1762                 }
1763                 /* check value again */
1764                 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1765                                                         0);
1766                 if (val != queue_expected_zero[i]) {
1767                         printf("%d: %s value incorrect, expected %"PRIu64
1768                                 " got %"PRIu64"\n", __LINE__, queue_names[i],
1769                                 queue_expected_zero[i], val);
1770                         failed = 1;
1771                 }
1772         };
1773
1774         if (failed)
1775                 goto fail;
1776
1777         cleanup(t);
1778         return 0;
1779 fail:
1780         cleanup(t);
1781         return -1;
1782 }
1783
1784 static int
1785 ordered_reconfigure(struct test *t)
1786 {
1787         if (init(t, 1, 1) < 0 ||
1788                         create_ports(t, 1) < 0) {
1789                 printf("%d: Error initializing device\n", __LINE__);
1790                 return -1;
1791         }
1792
1793         const struct rte_event_queue_conf conf = {
1794                         .schedule_type = RTE_SCHED_TYPE_ORDERED,
1795                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1796                         .nb_atomic_flows = 1024,
1797                         .nb_atomic_order_sequences = 1024,
1798         };
1799
1800         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1801                 printf("%d: error creating qid\n", __LINE__);
1802                 goto failed;
1803         }
1804
1805         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1806                 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1807                 goto failed;
1808         }
1809
1810         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1811         if (rte_event_dev_start(evdev) < 0) {
1812                 printf("%d: Error with start call\n", __LINE__);
1813                 return -1;
1814         }
1815
1816         cleanup(t);
1817         return 0;
1818 failed:
1819         cleanup(t);
1820         return -1;
1821 }
1822
1823 static int
1824 qid_priorities(struct test *t)
1825 {
1826         /* Test works by having a CQ with enough empty space for all packets,
1827          * and enqueueing 3 packets to 3 QIDs. They must return based on the
1828          * priority of the QID, not the ingress order, to pass the test
1829          */
1830         unsigned int i;
1831         /* Create instance with 1 ports, and 3 qids */
1832         if (init(t, 3, 1) < 0 ||
1833                         create_ports(t, 1) < 0) {
1834                 printf("%d: Error initializing device\n", __LINE__);
1835                 return -1;
1836         }
1837
1838         for (i = 0; i < 3; i++) {
1839                 /* Create QID */
1840                 const struct rte_event_queue_conf conf = {
1841                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1842                         /* increase priority (0 == highest), as we go */
1843                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1844                         .nb_atomic_flows = 1024,
1845                         .nb_atomic_order_sequences = 1024,
1846                 };
1847
1848                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1849                         printf("%d: error creating qid %d\n", __LINE__, i);
1850                         return -1;
1851                 }
1852                 t->qid[i] = i;
1853         }
1854         t->nb_qids = i;
1855         /* map all QIDs to port */
1856         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1857
1858         if (rte_event_dev_start(evdev) < 0) {
1859                 printf("%d: Error with start call\n", __LINE__);
1860                 return -1;
1861         }
1862
1863         /* enqueue 3 packets, setting seqn and QID to check priority */
1864         for (i = 0; i < 3; i++) {
1865                 struct rte_event ev;
1866                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1867                 if (!arp) {
1868                         printf("%d: gen of pkt failed\n", __LINE__);
1869                         return -1;
1870                 }
1871                 ev.queue_id = t->qid[i];
1872                 ev.op = RTE_EVENT_OP_NEW;
1873                 ev.mbuf = arp;
1874                 arp->seqn = i;
1875
1876                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1877                 if (err != 1) {
1878                         printf("%d: Failed to enqueue\n", __LINE__);
1879                         return -1;
1880                 }
1881         }
1882
1883         rte_service_run_iter_on_app_lcore(t->service_id, 1);
1884
1885         /* dequeue packets, verify priority was upheld */
1886         struct rte_event ev[32];
1887         uint32_t deq_pkts =
1888                 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1889         if (deq_pkts != 3) {
1890                 printf("%d: failed to deq packets\n", __LINE__);
1891                 rte_event_dev_dump(evdev, stdout);
1892                 return -1;
1893         }
1894         for (i = 0; i < 3; i++) {
1895                 if (ev[i].mbuf->seqn != 2-i) {
1896                         printf(
1897                                 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1898                                         __LINE__, i);
1899                 }
1900         }
1901
1902         cleanup(t);
1903         return 0;
1904 }
1905
1906 static int
1907 load_balancing(struct test *t)
1908 {
1909         const int rx_enq = 0;
1910         int err;
1911         uint32_t i;
1912
1913         if (init(t, 1, 4) < 0 ||
1914                         create_ports(t, 4) < 0 ||
1915                         create_atomic_qids(t, 1) < 0) {
1916                 printf("%d: Error initializing device\n", __LINE__);
1917                 return -1;
1918         }
1919
1920         for (i = 0; i < 3; i++) {
1921                 /* map port 1 - 3 inclusive */
1922                 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1923                                 NULL, 1) != 1) {
1924                         printf("%d: error mapping qid to port %d\n",
1925                                         __LINE__, i);
1926                         return -1;
1927                 }
1928         }
1929
1930         if (rte_event_dev_start(evdev) < 0) {
1931                 printf("%d: Error with start call\n", __LINE__);
1932                 return -1;
1933         }
1934
1935         /************** FORWARD ****************/
1936         /*
1937          * Create a set of flows that test the load-balancing operation of the
1938          * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1939          * with a new flow, which should be sent to the 3rd mapped CQ
1940          */
1941         static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1942
1943         for (i = 0; i < RTE_DIM(flows); i++) {
1944                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1945                 if (!arp) {
1946                         printf("%d: gen of pkt failed\n", __LINE__);
1947                         return -1;
1948                 }
1949
1950                 struct rte_event ev = {
1951                                 .op = RTE_EVENT_OP_NEW,
1952                                 .queue_id = t->qid[0],
1953                                 .flow_id = flows[i],
1954                                 .mbuf = arp,
1955                 };
1956                 /* generate pkt and enqueue */
1957                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1958                 if (err < 0) {
1959                         printf("%d: Failed to enqueue\n", __LINE__);
1960                         return -1;
1961                 }
1962         }
1963
1964         rte_service_run_iter_on_app_lcore(t->service_id, 1);
1965
1966         struct test_event_dev_stats stats;
1967         err = test_event_dev_stats_get(evdev, &stats);
1968         if (err) {
1969                 printf("%d: failed to get stats\n", __LINE__);
1970                 return -1;
1971         }
1972
1973         if (stats.port_inflight[1] != 4) {
1974                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
1975                                 __func__);
1976                 return -1;
1977         }
1978         if (stats.port_inflight[2] != 2) {
1979                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
1980                                 __func__);
1981                 return -1;
1982         }
1983         if (stats.port_inflight[3] != 3) {
1984                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
1985                                 __func__);
1986                 return -1;
1987         }
1988
1989         cleanup(t);
1990         return 0;
1991 }
1992
1993 static int
1994 load_balancing_history(struct test *t)
1995 {
1996         struct test_event_dev_stats stats = {0};
1997         const int rx_enq = 0;
1998         int err;
1999         uint32_t i;
2000
2001         /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2002         if (init(t, 1, 4) < 0 ||
2003                         create_ports(t, 4) < 0 ||
2004                         create_atomic_qids(t, 1) < 0)
2005                 return -1;
2006
2007         /* CQ mapping to QID */
2008         if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2009                 printf("%d: error mapping port 1 qid\n", __LINE__);
2010                 return -1;
2011         }
2012         if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2013                 printf("%d: error mapping port 2 qid\n", __LINE__);
2014                 return -1;
2015         }
2016         if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2017                 printf("%d: error mapping port 3 qid\n", __LINE__);
2018                 return -1;
2019         }
2020         if (rte_event_dev_start(evdev) < 0) {
2021                 printf("%d: Error with start call\n", __LINE__);
2022                 return -1;
2023         }
2024
2025         /*
2026          * Create a set of flows that test the load-balancing operation of the
2027          * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2028          * the packet from CQ 0, send in a new set of flows. Ensure that:
2029          *  1. The new flow 3 gets into the empty CQ0
2030          *  2. packets for existing flow gets added into CQ1
2031          *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2032          *     more outstanding pkts
2033          *
2034          *  This test makes sure that when a flow ends (i.e. all packets
2035          *  have been completed for that flow), that the flow can be moved
2036          *  to a different CQ when new packets come in for that flow.
2037          */
2038         static uint32_t flows1[] = {0, 1, 1, 2};
2039
2040         for (i = 0; i < RTE_DIM(flows1); i++) {
2041                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2042                 struct rte_event ev = {
2043                                 .flow_id = flows1[i],
2044                                 .op = RTE_EVENT_OP_NEW,
2045                                 .queue_id = t->qid[0],
2046                                 .event_type = RTE_EVENT_TYPE_CPU,
2047                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2048                                 .mbuf = arp
2049                 };
2050
2051                 if (!arp) {
2052                         printf("%d: gen of pkt failed\n", __LINE__);
2053                         return -1;
2054                 }
2055                 arp->hash.rss = flows1[i];
2056                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2057                 if (err < 0) {
2058                         printf("%d: Failed to enqueue\n", __LINE__);
2059                         return -1;
2060                 }
2061         }
2062
2063         /* call the scheduler */
2064         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2065
2066         /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2067         struct rte_event ev;
2068         if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2069                 printf("%d: failed to dequeue\n", __LINE__);
2070                 return -1;
2071         }
2072         if (ev.mbuf->hash.rss != flows1[0]) {
2073                 printf("%d: unexpected flow received\n", __LINE__);
2074                 return -1;
2075         }
2076
2077         /* drop the flow 0 packet from port 1 */
2078         rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2079
2080         /* call the scheduler */
2081         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2082
2083         /*
2084          * Set up the next set of flows, first a new flow to fill up
2085          * CQ 0, so that the next flow 0 packet should go to CQ2
2086          */
2087         static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2088
2089         for (i = 0; i < RTE_DIM(flows2); i++) {
2090                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2091                 struct rte_event ev = {
2092                                 .flow_id = flows2[i],
2093                                 .op = RTE_EVENT_OP_NEW,
2094                                 .queue_id = t->qid[0],
2095                                 .event_type = RTE_EVENT_TYPE_CPU,
2096                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2097                                 .mbuf = arp
2098                 };
2099
2100                 if (!arp) {
2101                         printf("%d: gen of pkt failed\n", __LINE__);
2102                         return -1;
2103                 }
2104                 arp->hash.rss = flows2[i];
2105
2106                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2107                 if (err < 0) {
2108                         printf("%d: Failed to enqueue\n", __LINE__);
2109                         return -1;
2110                 }
2111         }
2112
2113         /* schedule */
2114         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2115
2116         err = test_event_dev_stats_get(evdev, &stats);
2117         if (err) {
2118                 printf("%d:failed to get stats\n", __LINE__);
2119                 return -1;
2120         }
2121
2122         /*
2123          * Now check the resulting inflights on each port.
2124          */
2125         if (stats.port_inflight[1] != 3) {
2126                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2127                                 __func__);
2128                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2129                                 (unsigned int)stats.port_inflight[1],
2130                                 (unsigned int)stats.port_inflight[2],
2131                                 (unsigned int)stats.port_inflight[3]);
2132                 return -1;
2133         }
2134         if (stats.port_inflight[2] != 4) {
2135                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2136                                 __func__);
2137                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2138                                 (unsigned int)stats.port_inflight[1],
2139                                 (unsigned int)stats.port_inflight[2],
2140                                 (unsigned int)stats.port_inflight[3]);
2141                 return -1;
2142         }
2143         if (stats.port_inflight[3] != 2) {
2144                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2145                                 __func__);
2146                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2147                                 (unsigned int)stats.port_inflight[1],
2148                                 (unsigned int)stats.port_inflight[2],
2149                                 (unsigned int)stats.port_inflight[3]);
2150                 return -1;
2151         }
2152
2153         for (i = 1; i <= 3; i++) {
2154                 struct rte_event ev;
2155                 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2156                         rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2157         }
2158         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2159
2160         cleanup(t);
2161         return 0;
2162 }
2163
2164 static int
2165 invalid_qid(struct test *t)
2166 {
2167         struct test_event_dev_stats stats;
2168         const int rx_enq = 0;
2169         int err;
2170         uint32_t i;
2171
2172         if (init(t, 1, 4) < 0 ||
2173                         create_ports(t, 4) < 0 ||
2174                         create_atomic_qids(t, 1) < 0) {
2175                 printf("%d: Error initializing device\n", __LINE__);
2176                 return -1;
2177         }
2178
2179         /* CQ mapping to QID */
2180         for (i = 0; i < 4; i++) {
2181                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2182                                 NULL, 1);
2183                 if (err != 1) {
2184                         printf("%d: error mapping port 1 qid\n", __LINE__);
2185                         return -1;
2186                 }
2187         }
2188
2189         if (rte_event_dev_start(evdev) < 0) {
2190                 printf("%d: Error with start call\n", __LINE__);
2191                 return -1;
2192         }
2193
2194         /*
2195          * Send in a packet with an invalid qid to the scheduler.
2196          * We should see the packed enqueued OK, but the inflights for
2197          * that packet should not be incremented, and the rx_dropped
2198          * should be incremented.
2199          */
2200         static uint32_t flows1[] = {20};
2201
2202         for (i = 0; i < RTE_DIM(flows1); i++) {
2203                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2204                 if (!arp) {
2205                         printf("%d: gen of pkt failed\n", __LINE__);
2206                         return -1;
2207                 }
2208
2209                 struct rte_event ev = {
2210                                 .op = RTE_EVENT_OP_NEW,
2211                                 .queue_id = t->qid[0] + flows1[i],
2212                                 .flow_id = i,
2213                                 .mbuf = arp,
2214                 };
2215                 /* generate pkt and enqueue */
2216                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2217                 if (err < 0) {
2218                         printf("%d: Failed to enqueue\n", __LINE__);
2219                         return -1;
2220                 }
2221         }
2222
2223         /* call the scheduler */
2224         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2225
2226         err = test_event_dev_stats_get(evdev, &stats);
2227         if (err) {
2228                 printf("%d: failed to get stats\n", __LINE__);
2229                 return -1;
2230         }
2231
2232         /*
2233          * Now check the resulting inflights on the port, and the rx_dropped.
2234          */
2235         if (stats.port_inflight[0] != 0) {
2236                 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2237                                 __func__);
2238                 rte_event_dev_dump(evdev, stdout);
2239                 return -1;
2240         }
2241         if (stats.port_rx_dropped[0] != 1) {
2242                 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2243                 rte_event_dev_dump(evdev, stdout);
2244                 return -1;
2245         }
2246         /* each packet drop should only be counted in one place - port or dev */
2247         if (stats.rx_dropped != 0) {
2248                 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2249                                 __func__);
2250                 rte_event_dev_dump(evdev, stdout);
2251                 return -1;
2252         }
2253
2254         cleanup(t);
2255         return 0;
2256 }
2257
2258 static int
2259 single_packet(struct test *t)
2260 {
2261         const uint32_t MAGIC_SEQN = 7321;
2262         struct rte_event ev;
2263         struct test_event_dev_stats stats;
2264         const int rx_enq = 0;
2265         const int wrk_enq = 2;
2266         int err;
2267
2268         /* Create instance with 4 ports */
2269         if (init(t, 1, 4) < 0 ||
2270                         create_ports(t, 4) < 0 ||
2271                         create_atomic_qids(t, 1) < 0) {
2272                 printf("%d: Error initializing device\n", __LINE__);
2273                 return -1;
2274         }
2275
2276         /* CQ mapping to QID */
2277         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2278         if (err != 1) {
2279                 printf("%d: error mapping lb qid\n", __LINE__);
2280                 cleanup(t);
2281                 return -1;
2282         }
2283
2284         if (rte_event_dev_start(evdev) < 0) {
2285                 printf("%d: Error with start call\n", __LINE__);
2286                 return -1;
2287         }
2288
2289         /************** Gen pkt and enqueue ****************/
2290         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2291         if (!arp) {
2292                 printf("%d: gen of pkt failed\n", __LINE__);
2293                 return -1;
2294         }
2295
2296         ev.op = RTE_EVENT_OP_NEW;
2297         ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2298         ev.mbuf = arp;
2299         ev.queue_id = 0;
2300         ev.flow_id = 3;
2301         arp->seqn = MAGIC_SEQN;
2302
2303         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2304         if (err < 0) {
2305                 printf("%d: Failed to enqueue\n", __LINE__);
2306                 return -1;
2307         }
2308
2309         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2310
2311         err = test_event_dev_stats_get(evdev, &stats);
2312         if (err) {
2313                 printf("%d: failed to get stats\n", __LINE__);
2314                 return -1;
2315         }
2316
2317         if (stats.rx_pkts != 1 ||
2318                         stats.tx_pkts != 1 ||
2319                         stats.port_inflight[wrk_enq] != 1) {
2320                 printf("%d: Sched core didn't handle pkt as expected\n",
2321                                 __LINE__);
2322                 rte_event_dev_dump(evdev, stdout);
2323                 return -1;
2324         }
2325
2326         uint32_t deq_pkts;
2327
2328         deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2329         if (deq_pkts < 1) {
2330                 printf("%d: Failed to deq\n", __LINE__);
2331                 return -1;
2332         }
2333
2334         err = test_event_dev_stats_get(evdev, &stats);
2335         if (err) {
2336                 printf("%d: failed to get stats\n", __LINE__);
2337                 return -1;
2338         }
2339
2340         err = test_event_dev_stats_get(evdev, &stats);
2341         if (ev.mbuf->seqn != MAGIC_SEQN) {
2342                 printf("%d: magic sequence number not dequeued\n", __LINE__);
2343                 return -1;
2344         }
2345
2346         rte_pktmbuf_free(ev.mbuf);
2347         err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2348         if (err < 0) {
2349                 printf("%d: Failed to enqueue\n", __LINE__);
2350                 return -1;
2351         }
2352         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2353
2354         err = test_event_dev_stats_get(evdev, &stats);
2355         if (stats.port_inflight[wrk_enq] != 0) {
2356                 printf("%d: port inflight not correct\n", __LINE__);
2357                 return -1;
2358         }
2359
2360         cleanup(t);
2361         return 0;
2362 }
2363
2364 static int
2365 inflight_counts(struct test *t)
2366 {
2367         struct rte_event ev;
2368         struct test_event_dev_stats stats;
2369         const int rx_enq = 0;
2370         const int p1 = 1;
2371         const int p2 = 2;
2372         int err;
2373         int i;
2374
2375         /* Create instance with 4 ports */
2376         if (init(t, 2, 3) < 0 ||
2377                         create_ports(t, 3) < 0 ||
2378                         create_atomic_qids(t, 2) < 0) {
2379                 printf("%d: Error initializing device\n", __LINE__);
2380                 return -1;
2381         }
2382
2383         /* CQ mapping to QID */
2384         err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2385         if (err != 1) {
2386                 printf("%d: error mapping lb qid\n", __LINE__);
2387                 cleanup(t);
2388                 return -1;
2389         }
2390         err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2391         if (err != 1) {
2392                 printf("%d: error mapping lb qid\n", __LINE__);
2393                 cleanup(t);
2394                 return -1;
2395         }
2396
2397         if (rte_event_dev_start(evdev) < 0) {
2398                 printf("%d: Error with start call\n", __LINE__);
2399                 return -1;
2400         }
2401
2402         /************** FORWARD ****************/
2403 #define QID1_NUM 5
2404         for (i = 0; i < QID1_NUM; i++) {
2405                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2406
2407                 if (!arp) {
2408                         printf("%d: gen of pkt failed\n", __LINE__);
2409                         goto err;
2410                 }
2411
2412                 ev.queue_id =  t->qid[0];
2413                 ev.op = RTE_EVENT_OP_NEW;
2414                 ev.mbuf = arp;
2415                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2416                 if (err != 1) {
2417                         printf("%d: Failed to enqueue\n", __LINE__);
2418                         goto err;
2419                 }
2420         }
2421 #define QID2_NUM 3
2422         for (i = 0; i < QID2_NUM; i++) {
2423                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2424
2425                 if (!arp) {
2426                         printf("%d: gen of pkt failed\n", __LINE__);
2427                         goto err;
2428                 }
2429                 ev.queue_id =  t->qid[1];
2430                 ev.op = RTE_EVENT_OP_NEW;
2431                 ev.mbuf = arp;
2432                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2433                 if (err != 1) {
2434                         printf("%d: Failed to enqueue\n", __LINE__);
2435                         goto err;
2436                 }
2437         }
2438
2439         /* schedule */
2440         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2441
2442         err = test_event_dev_stats_get(evdev, &stats);
2443         if (err) {
2444                 printf("%d: failed to get stats\n", __LINE__);
2445                 goto err;
2446         }
2447
2448         if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2449                         stats.tx_pkts != QID1_NUM + QID2_NUM) {
2450                 printf("%d: Sched core didn't handle pkt as expected\n",
2451                                 __LINE__);
2452                 goto err;
2453         }
2454
2455         if (stats.port_inflight[p1] != QID1_NUM) {
2456                 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2457                                 __func__);
2458                 goto err;
2459         }
2460         if (stats.port_inflight[p2] != QID2_NUM) {
2461                 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2462                                 __func__);
2463                 goto err;
2464         }
2465
2466         /************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
2467         /* port 1 */
2468         struct rte_event events[QID1_NUM + QID2_NUM];
2469         uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2470                         RTE_DIM(events), 0);
2471
2472         if (deq_pkts != QID1_NUM) {
2473                 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2474                 goto err;
2475         }
2476         err = test_event_dev_stats_get(evdev, &stats);
2477         if (stats.port_inflight[p1] != QID1_NUM) {
2478                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2479                                 __LINE__);
2480                 goto err;
2481         }
2482         for (i = 0; i < QID1_NUM; i++) {
2483                 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2484                                 1);
2485                 if (err != 1) {
2486                         printf("%d: %s rte enqueue of inf release failed\n",
2487                                 __LINE__, __func__);
2488                         goto err;
2489                 }
2490         }
2491
2492         /*
2493          * As the scheduler core decrements inflights, it needs to run to
2494          * process packets to act on the drop messages
2495          */
2496         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2497
2498         err = test_event_dev_stats_get(evdev, &stats);
2499         if (stats.port_inflight[p1] != 0) {
2500                 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2501                 goto err;
2502         }
2503
2504         /* port2 */
2505         deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2506                         RTE_DIM(events), 0);
2507         if (deq_pkts != QID2_NUM) {
2508                 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2509                 goto err;
2510         }
2511         err = test_event_dev_stats_get(evdev, &stats);
2512         if (stats.port_inflight[p2] != QID2_NUM) {
2513                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2514                                 __LINE__);
2515                 goto err;
2516         }
2517         for (i = 0; i < QID2_NUM; i++) {
2518                 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2519                                 1);
2520                 if (err != 1) {
2521                         printf("%d: %s rte enqueue of inf release failed\n",
2522                                 __LINE__, __func__);
2523                         goto err;
2524                 }
2525         }
2526
2527         /*
2528          * As the scheduler core decrements inflights, it needs to run to
2529          * process packets to act on the drop messages
2530          */
2531         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2532
2533         err = test_event_dev_stats_get(evdev, &stats);
2534         if (stats.port_inflight[p2] != 0) {
2535                 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2536                 goto err;
2537         }
2538         cleanup(t);
2539         return 0;
2540
2541 err:
2542         rte_event_dev_dump(evdev, stdout);
2543         cleanup(t);
2544         return -1;
2545 }
2546
2547 static int
2548 parallel_basic(struct test *t, int check_order)
2549 {
2550         const uint8_t rx_port = 0;
2551         const uint8_t w1_port = 1;
2552         const uint8_t w3_port = 3;
2553         const uint8_t tx_port = 4;
2554         int err;
2555         int i;
2556         uint32_t deq_pkts, j;
2557         struct rte_mbuf *mbufs[3];
2558         struct rte_mbuf *mbufs_out[3] = { 0 };
2559         const uint32_t MAGIC_SEQN = 1234;
2560
2561         /* Create instance with 4 ports */
2562         if (init(t, 2, tx_port + 1) < 0 ||
2563                         create_ports(t, tx_port + 1) < 0 ||
2564                         (check_order ?  create_ordered_qids(t, 1) :
2565                                 create_unordered_qids(t, 1)) < 0 ||
2566                         create_directed_qids(t, 1, &tx_port)) {
2567                 printf("%d: Error initializing device\n", __LINE__);
2568                 return -1;
2569         }
2570
2571         /*
2572          * CQ mapping to QID
2573          * We need three ports, all mapped to the same ordered qid0. Then we'll
2574          * take a packet out to each port, re-enqueue in reverse order,
2575          * then make sure the reordering has taken place properly when we
2576          * dequeue from the tx_port.
2577          *
2578          * Simplified test setup diagram:
2579          *
2580          * rx_port        w1_port
2581          *        \     /         \
2582          *         qid0 - w2_port - qid1
2583          *              \         /     \
2584          *                w3_port        tx_port
2585          */
2586         /* CQ mapping to QID for LB ports (directed mapped on create) */
2587         for (i = w1_port; i <= w3_port; i++) {
2588                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2589                                 1);
2590                 if (err != 1) {
2591                         printf("%d: error mapping lb qid\n", __LINE__);
2592                         cleanup(t);
2593                         return -1;
2594                 }
2595         }
2596
2597         if (rte_event_dev_start(evdev) < 0) {
2598                 printf("%d: Error with start call\n", __LINE__);
2599                 return -1;
2600         }
2601
2602         /* Enqueue 3 packets to the rx port */
2603         for (i = 0; i < 3; i++) {
2604                 struct rte_event ev;
2605                 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2606                 if (!mbufs[i]) {
2607                         printf("%d: gen of pkt failed\n", __LINE__);
2608                         return -1;
2609                 }
2610
2611                 ev.queue_id = t->qid[0];
2612                 ev.op = RTE_EVENT_OP_NEW;
2613                 ev.mbuf = mbufs[i];
2614                 mbufs[i]->seqn = MAGIC_SEQN + i;
2615
2616                 /* generate pkt and enqueue */
2617                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2618                 if (err != 1) {
2619                         printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2620                                         __LINE__, i, err);
2621                         return -1;
2622                 }
2623         }
2624
2625         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2626
2627         /* use extra slot to make logic in loops easier */
2628         struct rte_event deq_ev[w3_port + 1];
2629
2630         /* Dequeue the 3 packets, one from each worker port */
2631         for (i = w1_port; i <= w3_port; i++) {
2632                 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2633                                 &deq_ev[i], 1, 0);
2634                 if (deq_pkts != 1) {
2635                         printf("%d: Failed to deq\n", __LINE__);
2636                         rte_event_dev_dump(evdev, stdout);
2637                         return -1;
2638                 }
2639         }
2640
2641         /* Enqueue each packet in reverse order, flushing after each one */
2642         for (i = w3_port; i >= w1_port; i--) {
2643
2644                 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2645                 deq_ev[i].queue_id = t->qid[1];
2646                 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2647                 if (err != 1) {
2648                         printf("%d: Failed to enqueue\n", __LINE__);
2649                         return -1;
2650                 }
2651         }
2652         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2653
2654         /* dequeue from the tx ports, we should get 3 packets */
2655         deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2656                         3, 0);
2657
2658         /* Check to see if we've got all 3 packets */
2659         if (deq_pkts != 3) {
2660                 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2661                         __LINE__, deq_pkts, tx_port);
2662                 rte_event_dev_dump(evdev, stdout);
2663                 return 1;
2664         }
2665
2666         /* Check to see if the sequence numbers are in expected order */
2667         if (check_order) {
2668                 for (j = 0 ; j < deq_pkts ; j++) {
2669                         if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2670                                 printf(
2671                                         "%d: Incorrect sequence number(%d) from port %d\n",
2672                                         __LINE__, mbufs_out[j]->seqn, tx_port);
2673                                 return -1;
2674                         }
2675                 }
2676         }
2677
2678         /* Destroy the instance */
2679         cleanup(t);
2680         return 0;
2681 }
2682
2683 static int
2684 ordered_basic(struct test *t)
2685 {
2686         return parallel_basic(t, 1);
2687 }
2688
2689 static int
2690 unordered_basic(struct test *t)
2691 {
2692         return parallel_basic(t, 0);
2693 }
2694
2695 static int
2696 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2697 {
2698         const struct rte_event new_ev = {
2699                         .op = RTE_EVENT_OP_NEW
2700                         /* all other fields zero */
2701         };
2702         struct rte_event ev = new_ev;
2703         unsigned int rx_port = 0; /* port we get the first flow on */
2704         char rx_port_used_stat[64];
2705         char rx_port_free_stat[64];
2706         char other_port_used_stat[64];
2707
2708         if (init(t, 1, 2) < 0 ||
2709                         create_ports(t, 2) < 0 ||
2710                         create_atomic_qids(t, 1) < 0) {
2711                 printf("%d: Error initializing device\n", __LINE__);
2712                 return -1;
2713         }
2714         int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2715         if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2716                         nb_links != 1) {
2717                 printf("%d: Error links queue to ports\n", __LINE__);
2718                 goto err;
2719         }
2720         if (rte_event_dev_start(evdev) < 0) {
2721                 printf("%d: Error with start call\n", __LINE__);
2722                 goto err;
2723         }
2724
2725         /* send one packet and see where it goes, port 0 or 1 */
2726         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2727                 printf("%d: Error doing first enqueue\n", __LINE__);
2728                 goto err;
2729         }
2730         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2731
2732         if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2733                         != 1)
2734                 rx_port = 1;
2735
2736         snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2737                         "port_%u_cq_ring_used", rx_port);
2738         snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2739                         "port_%u_cq_ring_free", rx_port);
2740         snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2741                         "port_%u_cq_ring_used", rx_port ^ 1);
2742         if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2743                         != 1) {
2744                 printf("%d: Error, first event not scheduled\n", __LINE__);
2745                 goto err;
2746         }
2747
2748         /* now fill up the rx port's queue with one flow to cause HOLB */
2749         do {
2750                 ev = new_ev;
2751                 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2752                         printf("%d: Error with enqueue\n", __LINE__);
2753                         goto err;
2754                 }
2755                 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2756         } while (rte_event_dev_xstats_by_name_get(evdev,
2757                                 rx_port_free_stat, NULL) != 0);
2758
2759         /* one more packet, which needs to stay in IQ - i.e. HOLB */
2760         ev = new_ev;
2761         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2762                 printf("%d: Error with enqueue\n", __LINE__);
2763                 goto err;
2764         }
2765         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2766
2767         /* check that the other port still has an empty CQ */
2768         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2769                         != 0) {
2770                 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2771                 goto err;
2772         }
2773         /* check IQ now has one packet */
2774         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2775                         != 1) {
2776                 printf("%d: Error, QID does not have exactly 1 packet\n",
2777                         __LINE__);
2778                 goto err;
2779         }
2780
2781         /* send another flow, which should pass the other IQ entry */
2782         ev = new_ev;
2783         ev.flow_id = 1;
2784         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2785                 printf("%d: Error with enqueue\n", __LINE__);
2786                 goto err;
2787         }
2788         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2789
2790         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2791                         != 1) {
2792                 printf("%d: Error, second flow did not pass out first\n",
2793                         __LINE__);
2794                 goto err;
2795         }
2796
2797         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2798                         != 1) {
2799                 printf("%d: Error, QID does not have exactly 1 packet\n",
2800                         __LINE__);
2801                 goto err;
2802         }
2803         cleanup(t);
2804         return 0;
2805 err:
2806         rte_event_dev_dump(evdev, stdout);
2807         cleanup(t);
2808         return -1;
2809 }
2810
2811 static void
2812 flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
2813 {
2814         *((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
2815 }
2816
2817 static int
2818 dev_stop_flush(struct test *t) /* test to check we can properly flush events */
2819 {
2820         const struct rte_event new_ev = {
2821                 .op = RTE_EVENT_OP_NEW,
2822                 .u64 = 0xCA11BACC,
2823                 .queue_id = 0
2824         };
2825         struct rte_event ev = new_ev;
2826         uint8_t count = 0;
2827         int i;
2828
2829         if (init(t, 1, 1) < 0 ||
2830             create_ports(t, 1) < 0 ||
2831             create_atomic_qids(t, 1) < 0) {
2832                 printf("%d: Error initializing device\n", __LINE__);
2833                 return -1;
2834         }
2835
2836         /* Link the queue so *_start() doesn't error out */
2837         if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
2838                 printf("%d: Error linking queue to port\n", __LINE__);
2839                 goto err;
2840         }
2841
2842         if (rte_event_dev_start(evdev) < 0) {
2843                 printf("%d: Error with start call\n", __LINE__);
2844                 goto err;
2845         }
2846
2847         for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
2848                 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2849                         printf("%d: Error enqueuing events\n", __LINE__);
2850                         goto err;
2851                 }
2852         }
2853
2854         /* Schedule the events from the port to the IQ. At least one event
2855          * should be remaining in the queue.
2856          */
2857         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2858
2859         if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
2860                 printf("%d: Error installing the flush callback\n", __LINE__);
2861                 goto err;
2862         }
2863
2864         cleanup(t);
2865
2866         if (count == 0) {
2867                 printf("%d: Error executing the flush callback\n", __LINE__);
2868                 goto err;
2869         }
2870
2871         if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
2872                 printf("%d: Error uninstalling the flush callback\n", __LINE__);
2873                 goto err;
2874         }
2875
2876         return 0;
2877 err:
2878         rte_event_dev_dump(evdev, stdout);
2879         cleanup(t);
2880         return -1;
2881 }
2882
2883 static int
2884 worker_loopback_worker_fn(void *arg)
2885 {
2886         struct test *t = arg;
2887         uint8_t port = t->port[1];
2888         int count = 0;
2889         int enqd;
2890
2891         /*
2892          * Takes packets from the input port and then loops them back through
2893          * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2894          * so each packet goes through 8*16 = 128 times.
2895          */
2896         printf("%d: \tWorker function started\n", __LINE__);
2897         while (count < NUM_PACKETS) {
2898 #define BURST_SIZE 32
2899                 struct rte_event ev[BURST_SIZE];
2900                 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2901                                 BURST_SIZE, 0);
2902                 if (nb_rx == 0) {
2903                         rte_pause();
2904                         continue;
2905                 }
2906
2907                 for (i = 0; i < nb_rx; i++) {
2908                         ev[i].queue_id++;
2909                         if (ev[i].queue_id != 8) {
2910                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2911                                 enqd = rte_event_enqueue_burst(evdev, port,
2912                                                 &ev[i], 1);
2913                                 if (enqd != 1) {
2914                                         printf("%d: Can't enqueue FWD!!\n",
2915                                                         __LINE__);
2916                                         return -1;
2917                                 }
2918                                 continue;
2919                         }
2920
2921                         ev[i].queue_id = 0;
2922                         ev[i].mbuf->udata64++;
2923                         if (ev[i].mbuf->udata64 != 16) {
2924                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2925                                 enqd = rte_event_enqueue_burst(evdev, port,
2926                                                 &ev[i], 1);
2927                                 if (enqd != 1) {
2928                                         printf("%d: Can't enqueue FWD!!\n",
2929                                                         __LINE__);
2930                                         return -1;
2931                                 }
2932                                 continue;
2933                         }
2934                         /* we have hit 16 iterations through system - drop */
2935                         rte_pktmbuf_free(ev[i].mbuf);
2936                         count++;
2937                         ev[i].op = RTE_EVENT_OP_RELEASE;
2938                         enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2939                         if (enqd != 1) {
2940                                 printf("%d drop enqueue failed\n", __LINE__);
2941                                 return -1;
2942                         }
2943                 }
2944         }
2945
2946         return 0;
2947 }
2948
2949 static int
2950 worker_loopback_producer_fn(void *arg)
2951 {
2952         struct test *t = arg;
2953         uint8_t port = t->port[0];
2954         uint64_t count = 0;
2955
2956         printf("%d: \tProducer function started\n", __LINE__);
2957         while (count < NUM_PACKETS) {
2958                 struct rte_mbuf *m = 0;
2959                 do {
2960                         m = rte_pktmbuf_alloc(t->mbuf_pool);
2961                 } while (m == NULL);
2962
2963                 m->udata64 = 0;
2964
2965                 struct rte_event ev = {
2966                                 .op = RTE_EVENT_OP_NEW,
2967                                 .queue_id = t->qid[0],
2968                                 .flow_id = (uintptr_t)m & 0xFFFF,
2969                                 .mbuf = m,
2970                 };
2971
2972                 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2973                         while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2974                                         1)
2975                                 rte_pause();
2976                 }
2977
2978                 count++;
2979         }
2980
2981         return 0;
2982 }
2983
2984 static int
2985 worker_loopback(struct test *t, uint8_t disable_implicit_release)
2986 {
2987         /* use a single producer core, and a worker core to see what happens
2988          * if the worker loops packets back multiple times
2989          */
2990         struct test_event_dev_stats stats;
2991         uint64_t print_cycles = 0, cycles = 0;
2992         uint64_t tx_pkts = 0;
2993         int err;
2994         int w_lcore, p_lcore;
2995
2996         if (init(t, 8, 2) < 0 ||
2997                         create_atomic_qids(t, 8) < 0) {
2998                 printf("%d: Error initializing device\n", __LINE__);
2999                 return -1;
3000         }
3001
3002         /* RX with low max events */
3003         static struct rte_event_port_conf conf = {
3004                         .dequeue_depth = 32,
3005                         .enqueue_depth = 64,
3006         };
3007         /* beware: this cannot be initialized in the static above as it would
3008          * only be initialized once - and this needs to be set for multiple runs
3009          */
3010         conf.new_event_threshold = 512;
3011         conf.disable_implicit_release = disable_implicit_release;
3012
3013         if (rte_event_port_setup(evdev, 0, &conf) < 0) {
3014                 printf("Error setting up RX port\n");
3015                 return -1;
3016         }
3017         t->port[0] = 0;
3018         /* TX with higher max events */
3019         conf.new_event_threshold = 4096;
3020         if (rte_event_port_setup(evdev, 1, &conf) < 0) {
3021                 printf("Error setting up TX port\n");
3022                 return -1;
3023         }
3024         t->port[1] = 1;
3025
3026         /* CQ mapping to QID */
3027         err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
3028         if (err != 8) { /* should have mapped all queues*/
3029                 printf("%d: error mapping port 2 to all qids\n", __LINE__);
3030                 return -1;
3031         }
3032
3033         if (rte_event_dev_start(evdev) < 0) {
3034                 printf("%d: Error with start call\n", __LINE__);
3035                 return -1;
3036         }
3037
3038         p_lcore = rte_get_next_lcore(
3039                         /* start core */ -1,
3040                         /* skip master */ 1,
3041                         /* wrap */ 0);
3042         w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3043
3044         rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3045         rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3046
3047         print_cycles = cycles = rte_get_timer_cycles();
3048         while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3049                         rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3050
3051                 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3052
3053                 uint64_t new_cycles = rte_get_timer_cycles();
3054
3055                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3056                         test_event_dev_stats_get(evdev, &stats);
3057                         printf(
3058                                 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3059                                 __LINE__, stats.rx_pkts, stats.tx_pkts);
3060
3061                         print_cycles = new_cycles;
3062                 }
3063                 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3064                         test_event_dev_stats_get(evdev, &stats);
3065                         if (stats.tx_pkts == tx_pkts) {
3066                                 rte_event_dev_dump(evdev, stdout);
3067                                 printf("Dumping xstats:\n");
3068                                 xstats_print();
3069                                 printf(
3070                                         "%d: No schedules for seconds, deadlock\n",
3071                                         __LINE__);
3072                                 return -1;
3073                         }
3074                         tx_pkts = stats.tx_pkts;
3075                         cycles = new_cycles;
3076                 }
3077         }
3078         rte_service_run_iter_on_app_lcore(t->service_id, 1);
3079         /* ensure all completions are flushed */
3080
3081         rte_eal_mp_wait_lcore();
3082
3083         cleanup(t);
3084         return 0;
3085 }
3086
3087 static struct rte_mempool *eventdev_func_mempool;
3088
3089 int
3090 test_sw_eventdev(void)
3091 {
3092         struct test *t;
3093         int ret;
3094
3095         t = malloc(sizeof(struct test));
3096         if (t == NULL)
3097                 return -1;
3098         /* manually initialize the op, older gcc's complain on static
3099          * initialization of struct elements that are a bitfield.
3100          */
3101         release_ev.op = RTE_EVENT_OP_RELEASE;
3102
3103         const char *eventdev_name = "event_sw";
3104         evdev = rte_event_dev_get_dev_id(eventdev_name);
3105         if (evdev < 0) {
3106                 printf("%d: Eventdev %s not found - creating.\n",
3107                                 __LINE__, eventdev_name);
3108                 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3109                         printf("Error creating eventdev\n");
3110                         goto test_fail;
3111                 }
3112                 evdev = rte_event_dev_get_dev_id(eventdev_name);
3113                 if (evdev < 0) {
3114                         printf("Error finding newly created eventdev\n");
3115                         goto test_fail;
3116                 }
3117         }
3118
3119         if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3120                 printf("Failed to get service ID for software event dev\n");
3121                 goto test_fail;
3122         }
3123
3124         rte_service_runstate_set(t->service_id, 1);
3125         rte_service_set_runstate_mapped_check(t->service_id, 0);
3126
3127         /* Only create mbuf pool once, reuse for each test run */
3128         if (!eventdev_func_mempool) {
3129                 eventdev_func_mempool = rte_pktmbuf_pool_create(
3130                                 "EVENTDEV_SW_SA_MBUF_POOL",
3131                                 (1<<12), /* 4k buffers */
3132                                 32 /*MBUF_CACHE_SIZE*/,
3133                                 0,
3134                                 512, /* use very small mbufs */
3135                                 rte_socket_id());
3136                 if (!eventdev_func_mempool) {
3137                         printf("ERROR creating mempool\n");
3138                         goto test_fail;
3139                 }
3140         }
3141         t->mbuf_pool = eventdev_func_mempool;
3142         printf("*** Running Single Directed Packet test...\n");
3143         ret = test_single_directed_packet(t);
3144         if (ret != 0) {
3145                 printf("ERROR - Single Directed Packet test FAILED.\n");
3146                 goto test_fail;
3147         }
3148         printf("*** Running Directed Forward Credit test...\n");
3149         ret = test_directed_forward_credits(t);
3150         if (ret != 0) {
3151                 printf("ERROR - Directed Forward Credit test FAILED.\n");
3152                 goto test_fail;
3153         }
3154         printf("*** Running Single Load Balanced Packet test...\n");
3155         ret = single_packet(t);
3156         if (ret != 0) {
3157                 printf("ERROR - Single Packet test FAILED.\n");
3158                 goto test_fail;
3159         }
3160         printf("*** Running Unordered Basic test...\n");
3161         ret = unordered_basic(t);
3162         if (ret != 0) {
3163                 printf("ERROR -  Unordered Basic test FAILED.\n");
3164                 goto test_fail;
3165         }
3166         printf("*** Running Ordered Basic test...\n");
3167         ret = ordered_basic(t);
3168         if (ret != 0) {
3169                 printf("ERROR -  Ordered Basic test FAILED.\n");
3170                 goto test_fail;
3171         }
3172         printf("*** Running Burst Packets test...\n");
3173         ret = burst_packets(t);
3174         if (ret != 0) {
3175                 printf("ERROR - Burst Packets test FAILED.\n");
3176                 goto test_fail;
3177         }
3178         printf("*** Running Load Balancing test...\n");
3179         ret = load_balancing(t);
3180         if (ret != 0) {
3181                 printf("ERROR - Load Balancing test FAILED.\n");
3182                 goto test_fail;
3183         }
3184         printf("*** Running Prioritized Directed test...\n");
3185         ret = test_priority_directed(t);
3186         if (ret != 0) {
3187                 printf("ERROR - Prioritized Directed test FAILED.\n");
3188                 goto test_fail;
3189         }
3190         printf("*** Running Prioritized Atomic test...\n");
3191         ret = test_priority_atomic(t);
3192         if (ret != 0) {
3193                 printf("ERROR - Prioritized Atomic test FAILED.\n");
3194                 goto test_fail;
3195         }
3196
3197         printf("*** Running Prioritized Ordered test...\n");
3198         ret = test_priority_ordered(t);
3199         if (ret != 0) {
3200                 printf("ERROR - Prioritized Ordered test FAILED.\n");
3201                 goto test_fail;
3202         }
3203         printf("*** Running Prioritized Unordered test...\n");
3204         ret = test_priority_unordered(t);
3205         if (ret != 0) {
3206                 printf("ERROR - Prioritized Unordered test FAILED.\n");
3207                 goto test_fail;
3208         }
3209         printf("*** Running Invalid QID test...\n");
3210         ret = invalid_qid(t);
3211         if (ret != 0) {
3212                 printf("ERROR - Invalid QID test FAILED.\n");
3213                 goto test_fail;
3214         }
3215         printf("*** Running Load Balancing History test...\n");
3216         ret = load_balancing_history(t);
3217         if (ret != 0) {
3218                 printf("ERROR - Load Balancing History test FAILED.\n");
3219                 goto test_fail;
3220         }
3221         printf("*** Running Inflight Count test...\n");
3222         ret = inflight_counts(t);
3223         if (ret != 0) {
3224                 printf("ERROR - Inflight Count test FAILED.\n");
3225                 goto test_fail;
3226         }
3227         printf("*** Running Abuse Inflights test...\n");
3228         ret = abuse_inflights(t);
3229         if (ret != 0) {
3230                 printf("ERROR - Abuse Inflights test FAILED.\n");
3231                 goto test_fail;
3232         }
3233         printf("*** Running XStats test...\n");
3234         ret = xstats_tests(t);
3235         if (ret != 0) {
3236                 printf("ERROR - XStats test FAILED.\n");
3237                 goto test_fail;
3238         }
3239         printf("*** Running XStats ID Reset test...\n");
3240         ret = xstats_id_reset_tests(t);
3241         if (ret != 0) {
3242                 printf("ERROR - XStats ID Reset test FAILED.\n");
3243                 goto test_fail;
3244         }
3245         printf("*** Running XStats Brute Force test...\n");
3246         ret = xstats_brute_force(t);
3247         if (ret != 0) {
3248                 printf("ERROR - XStats Brute Force test FAILED.\n");
3249                 goto test_fail;
3250         }
3251         printf("*** Running XStats ID Abuse test...\n");
3252         ret = xstats_id_abuse_tests(t);
3253         if (ret != 0) {
3254                 printf("ERROR - XStats ID Abuse test FAILED.\n");
3255                 goto test_fail;
3256         }
3257         printf("*** Running QID Priority test...\n");
3258         ret = qid_priorities(t);
3259         if (ret != 0) {
3260                 printf("ERROR - QID Priority test FAILED.\n");
3261                 goto test_fail;
3262         }
3263         printf("*** Running Ordered Reconfigure test...\n");
3264         ret = ordered_reconfigure(t);
3265         if (ret != 0) {
3266                 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3267                 goto test_fail;
3268         }
3269         printf("*** Running Port LB Single Reconfig test...\n");
3270         ret = port_single_lb_reconfig(t);
3271         if (ret != 0) {
3272                 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3273                 goto test_fail;
3274         }
3275         printf("*** Running Port Reconfig Credits test...\n");
3276         ret = port_reconfig_credits(t);
3277         if (ret != 0) {
3278                 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3279                 goto test_fail;
3280         }
3281         printf("*** Running Head-of-line-blocking test...\n");
3282         ret = holb(t);
3283         if (ret != 0) {
3284                 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3285                 goto test_fail;
3286         }
3287         printf("*** Running Stop Flush test...\n");
3288         ret = dev_stop_flush(t);
3289         if (ret != 0) {
3290                 printf("ERROR - Stop Flush test FAILED.\n");
3291                 goto test_fail;
3292         }
3293         if (rte_lcore_count() >= 3) {
3294                 printf("*** Running Worker loopback test...\n");
3295                 ret = worker_loopback(t, 0);
3296                 if (ret != 0) {
3297                         printf("ERROR - Worker loopback test FAILED.\n");
3298                         return ret;
3299                 }
3300
3301                 printf("*** Running Worker loopback test (implicit release disabled)...\n");
3302                 ret = worker_loopback(t, 1);
3303                 if (ret != 0) {
3304                         printf("ERROR - Worker loopback test FAILED.\n");
3305                         goto test_fail;
3306                 }
3307         } else {
3308                 printf("### Not enough cores for worker loopback tests.\n");
3309                 printf("### Need at least 3 cores for the tests.\n");
3310         }
3311
3312         /*
3313          * Free test instance, leaving mempool initialized, and a pointer to it
3314          * in static eventdev_func_mempool, as it is re-used on re-runs
3315          */
3316         free(t);
3317
3318         printf("SW Eventdev Selftest Successful.\n");
3319         return 0;
3320 test_fail:
3321         free(t);
3322         printf("SW Eventdev Selftest Failed.\n");
3323         return -1;
3324 }