b187d0290338bc4ad4f9458d4b31865fa96ee0a4
[deb_dpdk.git] / test / test / test_eventdev_sw.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <string.h>
36 #include <stdint.h>
37 #include <errno.h>
38 #include <unistd.h>
39 #include <sys/queue.h>
40
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_launch.h>
44 #include <rte_eal.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
49 #include <rte_cycles.h>
50
51 #include <rte_eventdev.h>
52 #include "test.h"
53
54 #define MAX_PORTS 16
55 #define MAX_QIDS 16
56 #define NUM_PACKETS (1<<18)
57
58 static int evdev;
59
60 struct test {
61         struct rte_mempool *mbuf_pool;
62         uint8_t port[MAX_PORTS];
63         uint8_t qid[MAX_QIDS];
64         int nb_qids;
65 };
66
67 static struct rte_event release_ev;
68
69 static inline struct rte_mbuf *
70 rte_gen_arp(int portid, struct rte_mempool *mp)
71 {
72         /*
73          * len = 14 + 46
74          * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
75          */
76         static const uint8_t arp_request[] = {
77                 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
78                 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
79                 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
80                 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
81                 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
82                 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83                 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84                 0x00, 0x00, 0x00, 0x00
85         };
86         struct rte_mbuf *m;
87         int pkt_len = sizeof(arp_request) - 1;
88
89         m = rte_pktmbuf_alloc(mp);
90         if (!m)
91                 return 0;
92
93         memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
94                 arp_request, pkt_len);
95         rte_pktmbuf_pkt_len(m) = pkt_len;
96         rte_pktmbuf_data_len(m) = pkt_len;
97
98         RTE_SET_USED(portid);
99
100         return m;
101 }
102
103 static void
104 xstats_print(void)
105 {
106         const uint32_t XSTATS_MAX = 1024;
107         uint32_t i;
108         uint32_t ids[XSTATS_MAX];
109         uint64_t values[XSTATS_MAX];
110         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
111
112         for (i = 0; i < XSTATS_MAX; i++)
113                 ids[i] = i;
114
115         /* Device names / values */
116         int ret = rte_event_dev_xstats_names_get(evdev,
117                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
118                                         xstats_names, ids, XSTATS_MAX);
119         if (ret < 0) {
120                 printf("%d: xstats names get() returned error\n",
121                         __LINE__);
122                 return;
123         }
124         ret = rte_event_dev_xstats_get(evdev,
125                                         RTE_EVENT_DEV_XSTATS_DEVICE,
126                                         0, ids, values, ret);
127         if (ret > (signed int)XSTATS_MAX)
128                 printf("%s %d: more xstats available than space\n",
129                                 __func__, __LINE__);
130         for (i = 0; (signed int)i < ret; i++) {
131                 printf("%d : %s : %"PRIu64"\n",
132                                 i, xstats_names[i].name, values[i]);
133         }
134
135         /* Port names / values */
136         ret = rte_event_dev_xstats_names_get(evdev,
137                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
138                                         xstats_names, ids, XSTATS_MAX);
139         ret = rte_event_dev_xstats_get(evdev,
140                                         RTE_EVENT_DEV_XSTATS_PORT, 1,
141                                         ids, values, ret);
142         if (ret > (signed int)XSTATS_MAX)
143                 printf("%s %d: more xstats available than space\n",
144                                 __func__, __LINE__);
145         for (i = 0; (signed int)i < ret; i++) {
146                 printf("%d : %s : %"PRIu64"\n",
147                                 i, xstats_names[i].name, values[i]);
148         }
149
150         /* Queue names / values */
151         ret = rte_event_dev_xstats_names_get(evdev,
152                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
153                                         xstats_names, ids, XSTATS_MAX);
154         ret = rte_event_dev_xstats_get(evdev,
155                                         RTE_EVENT_DEV_XSTATS_QUEUE,
156                                         1, ids, values, ret);
157         if (ret > (signed int)XSTATS_MAX)
158                 printf("%s %d: more xstats available than space\n",
159                                 __func__, __LINE__);
160         for (i = 0; (signed int)i < ret; i++) {
161                 printf("%d : %s : %"PRIu64"\n",
162                                 i, xstats_names[i].name, values[i]);
163         }
164 }
165
166 /* initialization and config */
167 static inline int
168 init(struct test *t, int nb_queues, int nb_ports)
169 {
170         struct rte_event_dev_config config = {
171                         .nb_event_queues = nb_queues,
172                         .nb_event_ports = nb_ports,
173                         .nb_event_queue_flows = 1024,
174                         .nb_events_limit = 4096,
175                         .nb_event_port_dequeue_depth = 128,
176                         .nb_event_port_enqueue_depth = 128,
177         };
178         int ret;
179
180         void *temp = t->mbuf_pool; /* save and restore mbuf pool */
181
182         memset(t, 0, sizeof(*t));
183         t->mbuf_pool = temp;
184
185         ret = rte_event_dev_configure(evdev, &config);
186         if (ret < 0)
187                 printf("%d: Error configuring device\n", __LINE__);
188         return ret;
189 };
190
191 static inline int
192 create_ports(struct test *t, int num_ports)
193 {
194         int i;
195         static const struct rte_event_port_conf conf = {
196                         .new_event_threshold = 1024,
197                         .dequeue_depth = 32,
198                         .enqueue_depth = 64,
199         };
200         if (num_ports > MAX_PORTS)
201                 return -1;
202
203         for (i = 0; i < num_ports; i++) {
204                 if (rte_event_port_setup(evdev, i, &conf) < 0) {
205                         printf("Error setting up port %d\n", i);
206                         return -1;
207                 }
208                 t->port[i] = i;
209         }
210
211         return 0;
212 }
213
214 static inline int
215 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
216 {
217         int i;
218
219         /* Q creation */
220         const struct rte_event_queue_conf conf = {
221                         .event_queue_cfg = flags,
222                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
223                         .nb_atomic_flows = 1024,
224                         .nb_atomic_order_sequences = 1024,
225         };
226
227         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
228                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
229                         printf("%d: error creating qid %d\n", __LINE__, i);
230                         return -1;
231                 }
232                 t->qid[i] = i;
233         }
234         t->nb_qids += num_qids;
235         if (t->nb_qids > MAX_QIDS)
236                 return -1;
237
238         return 0;
239 }
240
241 static inline int
242 create_atomic_qids(struct test *t, int num_qids)
243 {
244         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
245 }
246
247 static inline int
248 create_ordered_qids(struct test *t, int num_qids)
249 {
250         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
251 }
252
253
254 static inline int
255 create_unordered_qids(struct test *t, int num_qids)
256 {
257         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
258 }
259
260 static inline int
261 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
262 {
263         int i;
264
265         /* Q creation */
266         static const struct rte_event_queue_conf conf = {
267                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
268                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
269                         .nb_atomic_flows = 1024,
270                         .nb_atomic_order_sequences = 1024,
271         };
272
273         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
274                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
275                         printf("%d: error creating qid %d\n", __LINE__, i);
276                         return -1;
277                 }
278                 t->qid[i] = i;
279
280                 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
281                                 &t->qid[i], NULL, 1) != 1) {
282                         printf("%d: error creating link for qid %d\n",
283                                         __LINE__, i);
284                         return -1;
285                 }
286         }
287         t->nb_qids += num_qids;
288         if (t->nb_qids > MAX_QIDS)
289                 return -1;
290
291         return 0;
292 }
293
294 /* destruction */
295 static inline int
296 cleanup(struct test *t __rte_unused)
297 {
298         rte_event_dev_stop(evdev);
299         rte_event_dev_close(evdev);
300         return 0;
301 };
302
303 struct test_event_dev_stats {
304         uint64_t rx_pkts;       /**< Total packets received */
305         uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
306         uint64_t tx_pkts;       /**< Total packets transmitted */
307
308         /** Packets received on this port */
309         uint64_t port_rx_pkts[MAX_PORTS];
310         /** Packets dropped on this port */
311         uint64_t port_rx_dropped[MAX_PORTS];
312         /** Packets inflight on this port */
313         uint64_t port_inflight[MAX_PORTS];
314         /** Packets transmitted on this port */
315         uint64_t port_tx_pkts[MAX_PORTS];
316         /** Packets received on this qid */
317         uint64_t qid_rx_pkts[MAX_QIDS];
318         /** Packets dropped on this qid */
319         uint64_t qid_rx_dropped[MAX_QIDS];
320         /** Packets transmitted on this qid */
321         uint64_t qid_tx_pkts[MAX_QIDS];
322 };
323
324 static inline int
325 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
326 {
327         static uint32_t i;
328         static uint32_t total_ids[3]; /* rx, tx and drop */
329         static uint32_t port_rx_pkts_ids[MAX_PORTS];
330         static uint32_t port_rx_dropped_ids[MAX_PORTS];
331         static uint32_t port_inflight_ids[MAX_PORTS];
332         static uint32_t port_tx_pkts_ids[MAX_PORTS];
333         static uint32_t qid_rx_pkts_ids[MAX_QIDS];
334         static uint32_t qid_rx_dropped_ids[MAX_QIDS];
335         static uint32_t qid_tx_pkts_ids[MAX_QIDS];
336
337
338         stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
339                         "dev_rx", &total_ids[0]);
340         stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
341                         "dev_drop", &total_ids[1]);
342         stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
343                         "dev_tx", &total_ids[2]);
344         for (i = 0; i < MAX_PORTS; i++) {
345                 char name[32];
346                 snprintf(name, sizeof(name), "port_%u_rx", i);
347                 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
348                                 dev_id, name, &port_rx_pkts_ids[i]);
349                 snprintf(name, sizeof(name), "port_%u_drop", i);
350                 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
351                                 dev_id, name, &port_rx_dropped_ids[i]);
352                 snprintf(name, sizeof(name), "port_%u_inflight", i);
353                 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
354                                 dev_id, name, &port_inflight_ids[i]);
355                 snprintf(name, sizeof(name), "port_%u_tx", i);
356                 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
357                                 dev_id, name, &port_tx_pkts_ids[i]);
358         }
359         for (i = 0; i < MAX_QIDS; i++) {
360                 char name[32];
361                 snprintf(name, sizeof(name), "qid_%u_rx", i);
362                 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
363                                 dev_id, name, &qid_rx_pkts_ids[i]);
364                 snprintf(name, sizeof(name), "qid_%u_drop", i);
365                 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
366                                 dev_id, name, &qid_rx_dropped_ids[i]);
367                 snprintf(name, sizeof(name), "qid_%u_tx", i);
368                 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
369                                 dev_id, name, &qid_tx_pkts_ids[i]);
370         }
371
372         return 0;
373 }
374
375 /* run_prio_packet_test
376  * This performs a basic packet priority check on the test instance passed in.
377  * It is factored out of the main priority tests as the same tests must be
378  * performed to ensure prioritization of each type of QID.
379  *
380  * Requirements:
381  *  - An initialized test structure, including mempool
382  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
383  *  - t->qid[0] is the QID to be tested
384  *  - if LB QID, the CQ must be mapped to the QID.
385  */
386 static int
387 run_prio_packet_test(struct test *t)
388 {
389         int err;
390         const uint32_t MAGIC_SEQN[] = {4711, 1234};
391         const uint32_t PRIORITY[] = {
392                 RTE_EVENT_DEV_PRIORITY_NORMAL,
393                 RTE_EVENT_DEV_PRIORITY_HIGHEST
394         };
395         unsigned int i;
396         for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
397                 /* generate pkt and enqueue */
398                 struct rte_event ev;
399                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
400                 if (!arp) {
401                         printf("%d: gen of pkt failed\n", __LINE__);
402                         return -1;
403                 }
404                 arp->seqn = MAGIC_SEQN[i];
405
406                 ev = (struct rte_event){
407                         .priority = PRIORITY[i],
408                         .op = RTE_EVENT_OP_NEW,
409                         .queue_id = t->qid[0],
410                         .mbuf = arp
411                 };
412                 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
413                 if (err < 0) {
414                         printf("%d: error failed to enqueue\n", __LINE__);
415                         return -1;
416                 }
417         }
418
419         rte_event_schedule(evdev);
420
421         struct test_event_dev_stats stats;
422         err = test_event_dev_stats_get(evdev, &stats);
423         if (err) {
424                 printf("%d: error failed to get stats\n", __LINE__);
425                 return -1;
426         }
427
428         if (stats.port_rx_pkts[t->port[0]] != 2) {
429                 printf("%d: error stats incorrect for directed port\n",
430                                 __LINE__);
431                 rte_event_dev_dump(evdev, stdout);
432                 return -1;
433         }
434
435         struct rte_event ev, ev2;
436         uint32_t deq_pkts;
437         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
438         if (deq_pkts != 1) {
439                 printf("%d: error failed to deq\n", __LINE__);
440                 rte_event_dev_dump(evdev, stdout);
441                 return -1;
442         }
443         if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
444                 printf("%d: first packet out not highest priority\n",
445                                 __LINE__);
446                 rte_event_dev_dump(evdev, stdout);
447                 return -1;
448         }
449         rte_pktmbuf_free(ev.mbuf);
450
451         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
452         if (deq_pkts != 1) {
453                 printf("%d: error failed to deq\n", __LINE__);
454                 rte_event_dev_dump(evdev, stdout);
455                 return -1;
456         }
457         if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
458                 printf("%d: second packet out not lower priority\n",
459                                 __LINE__);
460                 rte_event_dev_dump(evdev, stdout);
461                 return -1;
462         }
463         rte_pktmbuf_free(ev2.mbuf);
464
465         cleanup(t);
466         return 0;
467 }
468
469 static int
470 test_single_directed_packet(struct test *t)
471 {
472         const int rx_enq = 0;
473         const int wrk_enq = 2;
474         int err;
475
476         /* Create instance with 3 directed QIDs going to 3 ports */
477         if (init(t, 3, 3) < 0 ||
478                         create_ports(t, 3) < 0 ||
479                         create_directed_qids(t, 3, t->port) < 0)
480                 return -1;
481
482         if (rte_event_dev_start(evdev) < 0) {
483                 printf("%d: Error with start call\n", __LINE__);
484                 return -1;
485         }
486
487         /************** FORWARD ****************/
488         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
489         struct rte_event ev = {
490                         .op = RTE_EVENT_OP_NEW,
491                         .queue_id = wrk_enq,
492                         .mbuf = arp,
493         };
494
495         if (!arp) {
496                 printf("%d: gen of pkt failed\n", __LINE__);
497                 return -1;
498         }
499
500         const uint32_t MAGIC_SEQN = 4711;
501         arp->seqn = MAGIC_SEQN;
502
503         /* generate pkt and enqueue */
504         err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
505         if (err < 0) {
506                 printf("%d: error failed to enqueue\n", __LINE__);
507                 return -1;
508         }
509
510         /* Run schedule() as dir packets may need to be re-ordered */
511         rte_event_schedule(evdev);
512
513         struct test_event_dev_stats stats;
514         err = test_event_dev_stats_get(evdev, &stats);
515         if (err) {
516                 printf("%d: error failed to get stats\n", __LINE__);
517                 return -1;
518         }
519
520         if (stats.port_rx_pkts[rx_enq] != 1) {
521                 printf("%d: error stats incorrect for directed port\n",
522                                 __LINE__);
523                 return -1;
524         }
525
526         uint32_t deq_pkts;
527         deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
528         if (deq_pkts != 1) {
529                 printf("%d: error failed to deq\n", __LINE__);
530                 return -1;
531         }
532
533         err = test_event_dev_stats_get(evdev, &stats);
534         if (stats.port_rx_pkts[wrk_enq] != 0 &&
535                         stats.port_rx_pkts[wrk_enq] != 1) {
536                 printf("%d: error directed stats post-dequeue\n", __LINE__);
537                 return -1;
538         }
539
540         if (ev.mbuf->seqn != MAGIC_SEQN) {
541                 printf("%d: error magic sequence number not dequeued\n",
542                                 __LINE__);
543                 return -1;
544         }
545
546         rte_pktmbuf_free(ev.mbuf);
547         cleanup(t);
548         return 0;
549 }
550
551
552 static int
553 test_priority_directed(struct test *t)
554 {
555         if (init(t, 1, 1) < 0 ||
556                         create_ports(t, 1) < 0 ||
557                         create_directed_qids(t, 1, t->port) < 0) {
558                 printf("%d: Error initializing device\n", __LINE__);
559                 return -1;
560         }
561
562         if (rte_event_dev_start(evdev) < 0) {
563                 printf("%d: Error with start call\n", __LINE__);
564                 return -1;
565         }
566
567         return run_prio_packet_test(t);
568 }
569
570 static int
571 test_priority_atomic(struct test *t)
572 {
573         if (init(t, 1, 1) < 0 ||
574                         create_ports(t, 1) < 0 ||
575                         create_atomic_qids(t, 1) < 0) {
576                 printf("%d: Error initializing device\n", __LINE__);
577                 return -1;
578         }
579
580         /* map the QID */
581         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
582                 printf("%d: error mapping qid to port\n", __LINE__);
583                 return -1;
584         }
585         if (rte_event_dev_start(evdev) < 0) {
586                 printf("%d: Error with start call\n", __LINE__);
587                 return -1;
588         }
589
590         return run_prio_packet_test(t);
591 }
592
593 static int
594 test_priority_ordered(struct test *t)
595 {
596         if (init(t, 1, 1) < 0 ||
597                         create_ports(t, 1) < 0 ||
598                         create_ordered_qids(t, 1) < 0) {
599                 printf("%d: Error initializing device\n", __LINE__);
600                 return -1;
601         }
602
603         /* map the QID */
604         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
605                 printf("%d: error mapping qid to port\n", __LINE__);
606                 return -1;
607         }
608         if (rte_event_dev_start(evdev) < 0) {
609                 printf("%d: Error with start call\n", __LINE__);
610                 return -1;
611         }
612
613         return run_prio_packet_test(t);
614 }
615
616 static int
617 test_priority_unordered(struct test *t)
618 {
619         if (init(t, 1, 1) < 0 ||
620                         create_ports(t, 1) < 0 ||
621                         create_unordered_qids(t, 1) < 0) {
622                 printf("%d: Error initializing device\n", __LINE__);
623                 return -1;
624         }
625
626         /* map the QID */
627         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
628                 printf("%d: error mapping qid to port\n", __LINE__);
629                 return -1;
630         }
631         if (rte_event_dev_start(evdev) < 0) {
632                 printf("%d: Error with start call\n", __LINE__);
633                 return -1;
634         }
635
636         return run_prio_packet_test(t);
637 }
638
639 static int
640 burst_packets(struct test *t)
641 {
642         /************** CONFIG ****************/
643         uint32_t i;
644         int err;
645         int ret;
646
647         /* Create instance with 2 ports and 2 queues */
648         if (init(t, 2, 2) < 0 ||
649                         create_ports(t, 2) < 0 ||
650                         create_atomic_qids(t, 2) < 0) {
651                 printf("%d: Error initializing device\n", __LINE__);
652                 return -1;
653         }
654
655         /* CQ mapping to QID */
656         ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
657         if (ret != 1) {
658                 printf("%d: error mapping lb qid0\n", __LINE__);
659                 return -1;
660         }
661         ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
662         if (ret != 1) {
663                 printf("%d: error mapping lb qid1\n", __LINE__);
664                 return -1;
665         }
666
667         if (rte_event_dev_start(evdev) < 0) {
668                 printf("%d: Error with start call\n", __LINE__);
669                 return -1;
670         }
671
672         /************** FORWARD ****************/
673         const uint32_t rx_port = 0;
674         const uint32_t NUM_PKTS = 2;
675
676         for (i = 0; i < NUM_PKTS; i++) {
677                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
678                 if (!arp) {
679                         printf("%d: error generating pkt\n", __LINE__);
680                         return -1;
681                 }
682
683                 struct rte_event ev = {
684                                 .op = RTE_EVENT_OP_NEW,
685                                 .queue_id = i % 2,
686                                 .flow_id = i % 3,
687                                 .mbuf = arp,
688                 };
689                 /* generate pkt and enqueue */
690                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
691                 if (err < 0) {
692                         printf("%d: Failed to enqueue\n", __LINE__);
693                         return -1;
694                 }
695         }
696         rte_event_schedule(evdev);
697
698         /* Check stats for all NUM_PKTS arrived to sched core */
699         struct test_event_dev_stats stats;
700
701         err = test_event_dev_stats_get(evdev, &stats);
702         if (err) {
703                 printf("%d: failed to get stats\n", __LINE__);
704                 return -1;
705         }
706         if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
707                 printf("%d: Sched core didn't receive all %d pkts\n",
708                                 __LINE__, NUM_PKTS);
709                 rte_event_dev_dump(evdev, stdout);
710                 return -1;
711         }
712
713         uint32_t deq_pkts;
714         int p;
715
716         deq_pkts = 0;
717         /******** DEQ QID 1 *******/
718         do {
719                 struct rte_event ev;
720                 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
721                 deq_pkts += p;
722                 rte_pktmbuf_free(ev.mbuf);
723         } while (p);
724
725         if (deq_pkts != NUM_PKTS/2) {
726                 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
727                                 __LINE__);
728                 return -1;
729         }
730
731         /******** DEQ QID 2 *******/
732         deq_pkts = 0;
733         do {
734                 struct rte_event ev;
735                 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
736                 deq_pkts += p;
737                 rte_pktmbuf_free(ev.mbuf);
738         } while (p);
739         if (deq_pkts != NUM_PKTS/2) {
740                 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
741                                 __LINE__);
742                 return -1;
743         }
744
745         cleanup(t);
746         return 0;
747 }
748
749 static int
750 abuse_inflights(struct test *t)
751 {
752         const int rx_enq = 0;
753         const int wrk_enq = 2;
754         int err;
755
756         /* Create instance with 4 ports */
757         if (init(t, 1, 4) < 0 ||
758                         create_ports(t, 4) < 0 ||
759                         create_atomic_qids(t, 1) < 0) {
760                 printf("%d: Error initializing device\n", __LINE__);
761                 return -1;
762         }
763
764         /* CQ mapping to QID */
765         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
766         if (err != 1) {
767                 printf("%d: error mapping lb qid\n", __LINE__);
768                 cleanup(t);
769                 return -1;
770         }
771
772         if (rte_event_dev_start(evdev) < 0) {
773                 printf("%d: Error with start call\n", __LINE__);
774                 return -1;
775         }
776
777         /* Enqueue op only */
778         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
779         if (err < 0) {
780                 printf("%d: Failed to enqueue\n", __LINE__);
781                 return -1;
782         }
783
784         /* schedule */
785         rte_event_schedule(evdev);
786
787         struct test_event_dev_stats stats;
788
789         err = test_event_dev_stats_get(evdev, &stats);
790         if (err) {
791                 printf("%d: failed to get stats\n", __LINE__);
792                 return -1;
793         }
794
795         if (stats.rx_pkts != 0 ||
796                         stats.tx_pkts != 0 ||
797                         stats.port_inflight[wrk_enq] != 0) {
798                 printf("%d: Sched core didn't handle pkt as expected\n",
799                                 __LINE__);
800                 return -1;
801         }
802
803         cleanup(t);
804         return 0;
805 }
806
807 static int
808 xstats_tests(struct test *t)
809 {
810         const int wrk_enq = 2;
811         int err;
812
813         /* Create instance with 4 ports */
814         if (init(t, 1, 4) < 0 ||
815                         create_ports(t, 4) < 0 ||
816                         create_atomic_qids(t, 1) < 0) {
817                 printf("%d: Error initializing device\n", __LINE__);
818                 return -1;
819         }
820
821         /* CQ mapping to QID */
822         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
823         if (err != 1) {
824                 printf("%d: error mapping lb qid\n", __LINE__);
825                 cleanup(t);
826                 return -1;
827         }
828
829         if (rte_event_dev_start(evdev) < 0) {
830                 printf("%d: Error with start call\n", __LINE__);
831                 return -1;
832         }
833
834         const uint32_t XSTATS_MAX = 1024;
835
836         uint32_t i;
837         uint32_t ids[XSTATS_MAX];
838         uint64_t values[XSTATS_MAX];
839         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
840
841         for (i = 0; i < XSTATS_MAX; i++)
842                 ids[i] = i;
843
844         /* Device names / values */
845         int ret = rte_event_dev_xstats_names_get(evdev,
846                                         RTE_EVENT_DEV_XSTATS_DEVICE,
847                                         0, xstats_names, ids, XSTATS_MAX);
848         if (ret != 6) {
849                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
850                 return -1;
851         }
852         ret = rte_event_dev_xstats_get(evdev,
853                                         RTE_EVENT_DEV_XSTATS_DEVICE,
854                                         0, ids, values, ret);
855         if (ret != 6) {
856                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
857                 return -1;
858         }
859
860         /* Port names / values */
861         ret = rte_event_dev_xstats_names_get(evdev,
862                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
863                                         xstats_names, ids, XSTATS_MAX);
864         if (ret != 21) {
865                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
866                 return -1;
867         }
868         ret = rte_event_dev_xstats_get(evdev,
869                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
870                                         ids, values, ret);
871         if (ret != 21) {
872                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
873                 return -1;
874         }
875
876         /* Queue names / values */
877         ret = rte_event_dev_xstats_names_get(evdev,
878                                         RTE_EVENT_DEV_XSTATS_QUEUE,
879                                         0, xstats_names, ids, XSTATS_MAX);
880         if (ret != 13) {
881                 printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
882                 return -1;
883         }
884
885         /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
886         ret = rte_event_dev_xstats_get(evdev,
887                                         RTE_EVENT_DEV_XSTATS_QUEUE,
888                                         1, ids, values, ret);
889         if (ret != -EINVAL) {
890                 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
891                 return -1;
892         }
893
894         ret = rte_event_dev_xstats_get(evdev,
895                                         RTE_EVENT_DEV_XSTATS_QUEUE,
896                                         0, ids, values, ret);
897         if (ret != 13) {
898                 printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
899                 return -1;
900         }
901
902         /* enqueue packets to check values */
903         for (i = 0; i < 3; i++) {
904                 struct rte_event ev;
905                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
906                 if (!arp) {
907                         printf("%d: gen of pkt failed\n", __LINE__);
908                         return -1;
909                 }
910                 ev.queue_id = t->qid[i];
911                 ev.op = RTE_EVENT_OP_NEW;
912                 ev.mbuf = arp;
913                 ev.flow_id = 7;
914                 arp->seqn = i;
915
916                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
917                 if (err != 1) {
918                         printf("%d: Failed to enqueue\n", __LINE__);
919                         return -1;
920                 }
921         }
922
923         rte_event_schedule(evdev);
924
925         /* Device names / values */
926         int num_stats = rte_event_dev_xstats_names_get(evdev,
927                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
928                                         xstats_names, ids, XSTATS_MAX);
929         if (num_stats < 0)
930                 goto fail;
931         ret = rte_event_dev_xstats_get(evdev,
932                                         RTE_EVENT_DEV_XSTATS_DEVICE,
933                                         0, ids, values, num_stats);
934         static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
935         for (i = 0; (signed int)i < ret; i++) {
936                 if (expected[i] != values[i]) {
937                         printf(
938                                 "%d Error xstat %d (id %d) %s : %"PRIu64
939                                 ", expect %"PRIu64"\n",
940                                 __LINE__, i, ids[i], xstats_names[i].name,
941                                 values[i], expected[i]);
942                         goto fail;
943                 }
944         }
945
946         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
947                                         0, NULL, 0);
948
949         /* ensure reset statistics are zero-ed */
950         static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
951         ret = rte_event_dev_xstats_get(evdev,
952                                         RTE_EVENT_DEV_XSTATS_DEVICE,
953                                         0, ids, values, num_stats);
954         for (i = 0; (signed int)i < ret; i++) {
955                 if (expected_zero[i] != values[i]) {
956                         printf(
957                                 "%d Error, xstat %d (id %d) %s : %"PRIu64
958                                 ", expect %"PRIu64"\n",
959                                 __LINE__, i, ids[i], xstats_names[i].name,
960                                 values[i], expected_zero[i]);
961                         goto fail;
962                 }
963         }
964
965         /* port reset checks */
966         num_stats = rte_event_dev_xstats_names_get(evdev,
967                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
968                                         xstats_names, ids, XSTATS_MAX);
969         if (num_stats < 0)
970                 goto fail;
971         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
972                                         0, ids, values, num_stats);
973
974         static const uint64_t port_expected[] = {
975                 3 /* rx */,
976                 0 /* tx */,
977                 0 /* drop */,
978                 0 /* inflights */,
979                 0 /* avg pkt cycles */,
980                 29 /* credits */,
981                 0 /* rx ring used */,
982                 4096 /* rx ring free */,
983                 0 /* cq ring used */,
984                 32 /* cq ring free */,
985                 0 /* dequeue calls */,
986                 /* 10 dequeue burst buckets */
987                 0, 0, 0, 0, 0,
988                 0, 0, 0, 0, 0,
989         };
990         if (ret != RTE_DIM(port_expected)) {
991                 printf(
992                         "%s %d: wrong number of port stats (%d), expected %zu\n",
993                         __func__, __LINE__, ret, RTE_DIM(port_expected));
994         }
995
996         for (i = 0; (signed int)i < ret; i++) {
997                 if (port_expected[i] != values[i]) {
998                         printf(
999                                 "%s : %d: Error stat %s is %"PRIu64
1000                                 ", expected %"PRIu64"\n",
1001                                 __func__, __LINE__, xstats_names[i].name,
1002                                 values[i], port_expected[i]);
1003                         goto fail;
1004                 }
1005         }
1006
1007         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1008                                         0, NULL, 0);
1009
1010         /* ensure reset statistics are zero-ed */
1011         static const uint64_t port_expected_zero[] = {
1012                 0 /* rx */,
1013                 0 /* tx */,
1014                 0 /* drop */,
1015                 0 /* inflights */,
1016                 0 /* avg pkt cycles */,
1017                 29 /* credits */,
1018                 0 /* rx ring used */,
1019                 4096 /* rx ring free */,
1020                 0 /* cq ring used */,
1021                 32 /* cq ring free */,
1022                 0 /* dequeue calls */,
1023                 /* 10 dequeue burst buckets */
1024                 0, 0, 0, 0, 0,
1025                 0, 0, 0, 0, 0,
1026         };
1027         ret = rte_event_dev_xstats_get(evdev,
1028                                         RTE_EVENT_DEV_XSTATS_PORT,
1029                                         0, ids, values, num_stats);
1030         for (i = 0; (signed int)i < ret; i++) {
1031                 if (port_expected_zero[i] != values[i]) {
1032                         printf(
1033                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1034                                 ", expect %"PRIu64"\n",
1035                                 __LINE__, i, ids[i], xstats_names[i].name,
1036                                 values[i], port_expected_zero[i]);
1037                         goto fail;
1038                 }
1039         }
1040
1041         /* QUEUE STATS TESTS */
1042         num_stats = rte_event_dev_xstats_names_get(evdev,
1043                                                 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1044                                                 xstats_names, ids, XSTATS_MAX);
1045         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1046                                         0, ids, values, num_stats);
1047         if (ret < 0) {
1048                 printf("xstats get returned %d\n", ret);
1049                 goto fail;
1050         }
1051         if ((unsigned int)ret > XSTATS_MAX)
1052                 printf("%s %d: more xstats available than space\n",
1053                                 __func__, __LINE__);
1054
1055         static const uint64_t queue_expected[] = {
1056                 3 /* rx */,
1057                 3 /* tx */,
1058                 0 /* drop */,
1059                 3 /* inflights */,
1060                 512 /* iq size */,
1061                 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1062                 0, 0, 1, 0, /* qid_0_port_X_pinned_flows */
1063         };
1064         for (i = 0; (signed int)i < ret; i++) {
1065                 if (queue_expected[i] != values[i]) {
1066                         printf(
1067                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1068                                 ", expect %"PRIu64"\n",
1069                                 __LINE__, i, ids[i], xstats_names[i].name,
1070                                 values[i], queue_expected[i]);
1071                         goto fail;
1072                 }
1073         }
1074
1075         /* Reset the queue stats here */
1076         ret = rte_event_dev_xstats_reset(evdev,
1077                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1078                                         NULL,
1079                                         0);
1080
1081         /* Verify that the resetable stats are reset, and others are not */
1082         static const uint64_t queue_expected_zero[] = {
1083                 0 /* rx */,
1084                 0 /* tx */,
1085                 0 /* drop */,
1086                 3 /* inflight */,
1087                 512 /* iq size */,
1088                 0, 0, 0, 0, /* 4 iq used */
1089                 0, 0, 1, 0, /* qid to port pinned flows */
1090         };
1091
1092         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1093                                         ids, values, num_stats);
1094         int fails = 0;
1095         for (i = 0; (signed int)i < ret; i++) {
1096                 if (queue_expected_zero[i] != values[i]) {
1097                         printf(
1098                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1099                                 ", expect %"PRIu64"\n",
1100                                 __LINE__, i, ids[i], xstats_names[i].name,
1101                                 values[i], queue_expected_zero[i]);
1102                         fails++;
1103                 }
1104         }
1105         if (fails) {
1106                 printf("%d : %d of values were not as expected above\n",
1107                                 __LINE__, fails);
1108                 goto fail;
1109         }
1110
1111         cleanup(t);
1112         return 0;
1113
1114 fail:
1115         rte_event_dev_dump(0, stdout);
1116         cleanup(t);
1117         return -1;
1118 }
1119
1120
1121 static int
1122 xstats_id_abuse_tests(struct test *t)
1123 {
1124         int err;
1125         const uint32_t XSTATS_MAX = 1024;
1126         const uint32_t link_port = 2;
1127
1128         uint32_t ids[XSTATS_MAX];
1129         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1130
1131         /* Create instance with 4 ports */
1132         if (init(t, 1, 4) < 0 ||
1133                         create_ports(t, 4) < 0 ||
1134                         create_atomic_qids(t, 1) < 0) {
1135                 printf("%d: Error initializing device\n", __LINE__);
1136                 goto fail;
1137         }
1138
1139         err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1140         if (err != 1) {
1141                 printf("%d: error mapping lb qid\n", __LINE__);
1142                 goto fail;
1143         }
1144
1145         if (rte_event_dev_start(evdev) < 0) {
1146                 printf("%d: Error with start call\n", __LINE__);
1147                 goto fail;
1148         }
1149
1150         /* no test for device, as it ignores the port/q number */
1151         int num_stats = rte_event_dev_xstats_names_get(evdev,
1152                                         RTE_EVENT_DEV_XSTATS_PORT,
1153                                         UINT8_MAX-1, xstats_names, ids,
1154                                         XSTATS_MAX);
1155         if (num_stats != 0) {
1156                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1157                                 0, num_stats);
1158                 goto fail;
1159         }
1160
1161         num_stats = rte_event_dev_xstats_names_get(evdev,
1162                                         RTE_EVENT_DEV_XSTATS_QUEUE,
1163                                         UINT8_MAX-1, xstats_names, ids,
1164                                         XSTATS_MAX);
1165         if (num_stats != 0) {
1166                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1167                                 0, num_stats);
1168                 goto fail;
1169         }
1170
1171         cleanup(t);
1172         return 0;
1173 fail:
1174         cleanup(t);
1175         return -1;
1176 }
1177
1178 static int
1179 port_reconfig_credits(struct test *t)
1180 {
1181         if (init(t, 1, 1) < 0) {
1182                 printf("%d: Error initializing device\n", __LINE__);
1183                 return -1;
1184         }
1185
1186         uint32_t i;
1187         const uint32_t NUM_ITERS = 32;
1188         for (i = 0; i < NUM_ITERS; i++) {
1189                 const struct rte_event_queue_conf conf = {
1190                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1191                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1192                         .nb_atomic_flows = 1024,
1193                         .nb_atomic_order_sequences = 1024,
1194                 };
1195                 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1196                         printf("%d: error creating qid\n", __LINE__);
1197                         return -1;
1198                 }
1199                 t->qid[0] = 0;
1200
1201                 static const struct rte_event_port_conf port_conf = {
1202                                 .new_event_threshold = 128,
1203                                 .dequeue_depth = 32,
1204                                 .enqueue_depth = 64,
1205                 };
1206                 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1207                         printf("%d Error setting up port\n", __LINE__);
1208                         return -1;
1209                 }
1210
1211                 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1212                 if (links != 1) {
1213                         printf("%d: error mapping lb qid\n", __LINE__);
1214                         goto fail;
1215                 }
1216
1217                 if (rte_event_dev_start(evdev) < 0) {
1218                         printf("%d: Error with start call\n", __LINE__);
1219                         goto fail;
1220                 }
1221
1222                 const uint32_t NPKTS = 1;
1223                 uint32_t j;
1224                 for (j = 0; j < NPKTS; j++) {
1225                         struct rte_event ev;
1226                         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1227                         if (!arp) {
1228                                 printf("%d: gen of pkt failed\n", __LINE__);
1229                                 goto fail;
1230                         }
1231                         ev.queue_id = t->qid[0];
1232                         ev.op = RTE_EVENT_OP_NEW;
1233                         ev.mbuf = arp;
1234                         int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1235                         if (err != 1) {
1236                                 printf("%d: Failed to enqueue\n", __LINE__);
1237                                 rte_event_dev_dump(0, stdout);
1238                                 goto fail;
1239                         }
1240                 }
1241
1242                 rte_event_schedule(evdev);
1243
1244                 struct rte_event ev[NPKTS];
1245                 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1246                                                         NPKTS, 0);
1247                 if (deq != 1)
1248                         printf("%d error; no packet dequeued\n", __LINE__);
1249
1250                 /* let cleanup below stop the device on last iter */
1251                 if (i != NUM_ITERS-1)
1252                         rte_event_dev_stop(evdev);
1253         }
1254
1255         cleanup(t);
1256         return 0;
1257 fail:
1258         cleanup(t);
1259         return -1;
1260 }
1261
1262 static int
1263 port_single_lb_reconfig(struct test *t)
1264 {
1265         if (init(t, 2, 2) < 0) {
1266                 printf("%d: Error initializing device\n", __LINE__);
1267                 goto fail;
1268         }
1269
1270         static const struct rte_event_queue_conf conf_lb_atomic = {
1271                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1272                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1273                 .nb_atomic_flows = 1024,
1274                 .nb_atomic_order_sequences = 1024,
1275         };
1276         if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1277                 printf("%d: error creating qid\n", __LINE__);
1278                 goto fail;
1279         }
1280
1281         static const struct rte_event_queue_conf conf_single_link = {
1282                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1283                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1284                 .nb_atomic_flows = 1024,
1285                 .nb_atomic_order_sequences = 1024,
1286         };
1287         if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1288                 printf("%d: error creating qid\n", __LINE__);
1289                 goto fail;
1290         }
1291
1292         struct rte_event_port_conf port_conf = {
1293                 .new_event_threshold = 128,
1294                 .dequeue_depth = 32,
1295                 .enqueue_depth = 64,
1296         };
1297         if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1298                 printf("%d Error setting up port\n", __LINE__);
1299                 goto fail;
1300         }
1301         if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1302                 printf("%d Error setting up port\n", __LINE__);
1303                 goto fail;
1304         }
1305
1306         /* link port to lb queue */
1307         uint8_t queue_id = 0;
1308         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1309                 printf("%d: error creating link for qid\n", __LINE__);
1310                 goto fail;
1311         }
1312
1313         int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1314         if (ret != 1) {
1315                 printf("%d: Error unlinking lb port\n", __LINE__);
1316                 goto fail;
1317         }
1318
1319         queue_id = 1;
1320         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1321                 printf("%d: error creating link for qid\n", __LINE__);
1322                 goto fail;
1323         }
1324
1325         queue_id = 0;
1326         int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1327         if (err != 1) {
1328                 printf("%d: error mapping lb qid\n", __LINE__);
1329                 goto fail;
1330         }
1331
1332         if (rte_event_dev_start(evdev) < 0) {
1333                 printf("%d: Error with start call\n", __LINE__);
1334                 goto fail;
1335         }
1336
1337         cleanup(t);
1338         return 0;
1339 fail:
1340         cleanup(t);
1341         return -1;
1342 }
1343
1344 static int
1345 xstats_brute_force(struct test *t)
1346 {
1347         uint32_t i;
1348         const uint32_t XSTATS_MAX = 1024;
1349         uint32_t ids[XSTATS_MAX];
1350         uint64_t values[XSTATS_MAX];
1351         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1352
1353
1354         /* Create instance with 4 ports */
1355         if (init(t, 1, 4) < 0 ||
1356                         create_ports(t, 4) < 0 ||
1357                         create_atomic_qids(t, 1) < 0) {
1358                 printf("%d: Error initializing device\n", __LINE__);
1359                 return -1;
1360         }
1361
1362         int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1363         if (err != 1) {
1364                 printf("%d: error mapping lb qid\n", __LINE__);
1365                 goto fail;
1366         }
1367
1368         if (rte_event_dev_start(evdev) < 0) {
1369                 printf("%d: Error with start call\n", __LINE__);
1370                 goto fail;
1371         }
1372
1373         for (i = 0; i < XSTATS_MAX; i++)
1374                 ids[i] = i;
1375
1376         for (i = 0; i < 3; i++) {
1377                 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1378                 uint32_t j;
1379                 for (j = 0; j < UINT8_MAX; j++) {
1380                         rte_event_dev_xstats_names_get(evdev, mode,
1381                                 j, xstats_names, ids, XSTATS_MAX);
1382
1383                         rte_event_dev_xstats_get(evdev, mode, j, ids,
1384                                                  values, XSTATS_MAX);
1385                 }
1386         }
1387
1388         cleanup(t);
1389         return 0;
1390 fail:
1391         cleanup(t);
1392         return -1;
1393 }
1394
1395 static int
1396 xstats_id_reset_tests(struct test *t)
1397 {
1398         const int wrk_enq = 2;
1399         int err;
1400
1401         /* Create instance with 4 ports */
1402         if (init(t, 1, 4) < 0 ||
1403                         create_ports(t, 4) < 0 ||
1404                         create_atomic_qids(t, 1) < 0) {
1405                 printf("%d: Error initializing device\n", __LINE__);
1406                 return -1;
1407         }
1408
1409         /* CQ mapping to QID */
1410         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1411         if (err != 1) {
1412                 printf("%d: error mapping lb qid\n", __LINE__);
1413                 goto fail;
1414         }
1415
1416         if (rte_event_dev_start(evdev) < 0) {
1417                 printf("%d: Error with start call\n", __LINE__);
1418                 goto fail;
1419         }
1420
1421 #define XSTATS_MAX 1024
1422         int ret;
1423         uint32_t i;
1424         uint32_t ids[XSTATS_MAX];
1425         uint64_t values[XSTATS_MAX];
1426         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1427
1428         for (i = 0; i < XSTATS_MAX; i++)
1429                 ids[i] = i;
1430
1431 #define NUM_DEV_STATS 6
1432         /* Device names / values */
1433         int num_stats = rte_event_dev_xstats_names_get(evdev,
1434                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1435                                         0, xstats_names, ids, XSTATS_MAX);
1436         if (num_stats != NUM_DEV_STATS) {
1437                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1438                                 NUM_DEV_STATS, num_stats);
1439                 goto fail;
1440         }
1441         ret = rte_event_dev_xstats_get(evdev,
1442                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1443                                         0, ids, values, num_stats);
1444         if (ret != NUM_DEV_STATS) {
1445                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1446                                 NUM_DEV_STATS, ret);
1447                 goto fail;
1448         }
1449
1450 #define NPKTS 7
1451         for (i = 0; i < NPKTS; i++) {
1452                 struct rte_event ev;
1453                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1454                 if (!arp) {
1455                         printf("%d: gen of pkt failed\n", __LINE__);
1456                         goto fail;
1457                 }
1458                 ev.queue_id = t->qid[i];
1459                 ev.op = RTE_EVENT_OP_NEW;
1460                 ev.mbuf = arp;
1461                 arp->seqn = i;
1462
1463                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1464                 if (err != 1) {
1465                         printf("%d: Failed to enqueue\n", __LINE__);
1466                         goto fail;
1467                 }
1468         }
1469
1470         rte_event_schedule(evdev);
1471
1472         static const char * const dev_names[] = {
1473                 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1474                 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1475         };
1476         uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1477         for (i = 0; (int)i < ret; i++) {
1478                 unsigned int id;
1479                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1480                                                                 dev_names[i],
1481                                                                 &id);
1482                 if (id != i) {
1483                         printf("%d: %s id incorrect, expected %d got %d\n",
1484                                         __LINE__, dev_names[i], i, id);
1485                         goto fail;
1486                 }
1487                 if (val != dev_expected[i]) {
1488                         printf("%d: %s value incorrect, expected %"
1489                                 PRIu64" got %d\n", __LINE__, dev_names[i],
1490                                 dev_expected[i], id);
1491                         goto fail;
1492                 }
1493                 /* reset to zero */
1494                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1495                                                 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1496                                                 &id,
1497                                                 1);
1498                 if (reset_ret) {
1499                         printf("%d: failed to reset successfully\n", __LINE__);
1500                         goto fail;
1501                 }
1502                 dev_expected[i] = 0;
1503                 /* check value again */
1504                 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1505                 if (val != dev_expected[i]) {
1506                         printf("%d: %s value incorrect, expected %"PRIu64
1507                                 " got %"PRIu64"\n", __LINE__, dev_names[i],
1508                                 dev_expected[i], val);
1509                         goto fail;
1510                 }
1511         };
1512
1513 /* 48 is stat offset from start of the devices whole xstats.
1514  * This WILL break every time we add a statistic to a port
1515  * or the device, but there is no other way to test
1516  */
1517 #define PORT_OFF 48
1518 /* num stats for the tested port. CQ size adds more stats to a port */
1519 #define NUM_PORT_STATS 21
1520 /* the port to test. */
1521 #define PORT 2
1522         num_stats = rte_event_dev_xstats_names_get(evdev,
1523                                         RTE_EVENT_DEV_XSTATS_PORT, PORT,
1524                                         xstats_names, ids, XSTATS_MAX);
1525         if (num_stats != NUM_PORT_STATS) {
1526                 printf("%d: expected %d stats, got return %d\n",
1527                         __LINE__, NUM_PORT_STATS, num_stats);
1528                 goto fail;
1529         }
1530         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1531                                         ids, values, num_stats);
1532
1533         if (ret != NUM_PORT_STATS) {
1534                 printf("%d: expected %d stats, got return %d\n",
1535                                 __LINE__, NUM_PORT_STATS, ret);
1536                 goto fail;
1537         }
1538         static const char * const port_names[] = {
1539                 "port_2_rx",
1540                 "port_2_tx",
1541                 "port_2_drop",
1542                 "port_2_inflight",
1543                 "port_2_avg_pkt_cycles",
1544                 "port_2_credits",
1545                 "port_2_rx_ring_used",
1546                 "port_2_rx_ring_free",
1547                 "port_2_cq_ring_used",
1548                 "port_2_cq_ring_free",
1549                 "port_2_dequeue_calls",
1550                 "port_2_dequeues_returning_0",
1551                 "port_2_dequeues_returning_1-4",
1552                 "port_2_dequeues_returning_5-8",
1553                 "port_2_dequeues_returning_9-12",
1554                 "port_2_dequeues_returning_13-16",
1555                 "port_2_dequeues_returning_17-20",
1556                 "port_2_dequeues_returning_21-24",
1557                 "port_2_dequeues_returning_25-28",
1558                 "port_2_dequeues_returning_29-32",
1559                 "port_2_dequeues_returning_33-36",
1560         };
1561         uint64_t port_expected[] = {
1562                 0, /* rx */
1563                 NPKTS, /* tx */
1564                 0, /* drop */
1565                 NPKTS, /* inflight */
1566                 0, /* avg pkt cycles */
1567                 0, /* credits */
1568                 0, /* rx ring used */
1569                 4096, /* rx ring free */
1570                 NPKTS,  /* cq ring used */
1571                 25, /* cq ring free */
1572                 0, /* dequeue zero calls */
1573                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1574                 0, 0, 0, 0, 0,
1575         };
1576         uint64_t port_expected_zero[] = {
1577                 0, /* rx */
1578                 0, /* tx */
1579                 0, /* drop */
1580                 NPKTS, /* inflight */
1581                 0, /* avg pkt cycles */
1582                 0, /* credits */
1583                 0, /* rx ring used */
1584                 4096, /* rx ring free */
1585                 NPKTS,  /* cq ring used */
1586                 25, /* cq ring free */
1587                 0, /* dequeue zero calls */
1588                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1589                 0, 0, 0, 0, 0,
1590         };
1591         if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1592                         RTE_DIM(port_names) != NUM_PORT_STATS) {
1593                 printf("%d: port array of wrong size\n", __LINE__);
1594                 goto fail;
1595         }
1596
1597         int failed = 0;
1598         for (i = 0; (int)i < ret; i++) {
1599                 unsigned int id;
1600                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1601                                                                 port_names[i],
1602                                                                 &id);
1603                 if (id != i + PORT_OFF) {
1604                         printf("%d: %s id incorrect, expected %d got %d\n",
1605                                         __LINE__, port_names[i], i+PORT_OFF,
1606                                         id);
1607                         failed = 1;
1608                 }
1609                 if (val != port_expected[i]) {
1610                         printf("%d: %s value incorrect, expected %"PRIu64
1611                                 " got %d\n", __LINE__, port_names[i],
1612                                 port_expected[i], id);
1613                         failed = 1;
1614                 }
1615                 /* reset to zero */
1616                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1617                                                 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1618                                                 &id,
1619                                                 1);
1620                 if (reset_ret) {
1621                         printf("%d: failed to reset successfully\n", __LINE__);
1622                         failed = 1;
1623                 }
1624                 /* check value again */
1625                 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1626                 if (val != port_expected_zero[i]) {
1627                         printf("%d: %s value incorrect, expected %"PRIu64
1628                                 " got %"PRIu64"\n", __LINE__, port_names[i],
1629                                 port_expected_zero[i], val);
1630                         failed = 1;
1631                 }
1632         };
1633         if (failed)
1634                 goto fail;
1635
1636 /* num queue stats */
1637 #define NUM_Q_STATS 13
1638 /* queue offset from start of the devices whole xstats.
1639  * This will break every time we add a statistic to a device/port/queue
1640  */
1641 #define QUEUE_OFF 90
1642         const uint32_t queue = 0;
1643         num_stats = rte_event_dev_xstats_names_get(evdev,
1644                                         RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1645                                         xstats_names, ids, XSTATS_MAX);
1646         if (num_stats != NUM_Q_STATS) {
1647                 printf("%d: expected %d stats, got return %d\n",
1648                         __LINE__, NUM_Q_STATS, num_stats);
1649                 goto fail;
1650         }
1651         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1652                                         queue, ids, values, num_stats);
1653         if (ret != NUM_Q_STATS) {
1654                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1655                 goto fail;
1656         }
1657         static const char * const queue_names[] = {
1658                 "qid_0_rx",
1659                 "qid_0_tx",
1660                 "qid_0_drop",
1661                 "qid_0_inflight",
1662                 "qid_0_iq_size",
1663                 "qid_0_iq_0_used",
1664                 "qid_0_iq_1_used",
1665                 "qid_0_iq_2_used",
1666                 "qid_0_iq_3_used",
1667                 "qid_0_port_0_pinned_flows",
1668                 "qid_0_port_1_pinned_flows",
1669                 "qid_0_port_2_pinned_flows",
1670                 "qid_0_port_3_pinned_flows",
1671         };
1672         uint64_t queue_expected[] = {
1673                 7, /* rx */
1674                 7, /* tx */
1675                 0, /* drop */
1676                 7, /* inflight */
1677                 512, /* iq size */
1678                 0, /* iq 0 used */
1679                 0, /* iq 1 used */
1680                 0, /* iq 2 used */
1681                 0, /* iq 3 used */
1682                 0, /* qid 0 port 0 pinned flows */
1683                 0, /* qid 0 port 1 pinned flows */
1684                 1, /* qid 0 port 2 pinned flows */
1685                 0, /* qid 0 port 4 pinned flows */
1686         };
1687         uint64_t queue_expected_zero[] = {
1688                 0, /* rx */
1689                 0, /* tx */
1690                 0, /* drop */
1691                 7, /* inflight */
1692                 512, /* iq size */
1693                 0, /* iq 0 used */
1694                 0, /* iq 1 used */
1695                 0, /* iq 2 used */
1696                 0, /* iq 3 used */
1697                 0, /* qid 0 port 0 pinned flows */
1698                 0, /* qid 0 port 1 pinned flows */
1699                 1, /* qid 0 port 2 pinned flows */
1700                 0, /* qid 0 port 4 pinned flows */
1701         };
1702         if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1703                         RTE_DIM(queue_names) != NUM_Q_STATS) {
1704                 printf("%d : queue array of wrong size\n", __LINE__);
1705                 goto fail;
1706         }
1707
1708         failed = 0;
1709         for (i = 0; (int)i < ret; i++) {
1710                 unsigned int id;
1711                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1712                                                                 queue_names[i],
1713                                                                 &id);
1714                 if (id != i + QUEUE_OFF) {
1715                         printf("%d: %s id incorrect, expected %d got %d\n",
1716                                         __LINE__, queue_names[i], i+QUEUE_OFF,
1717                                         id);
1718                         failed = 1;
1719                 }
1720                 if (val != queue_expected[i]) {
1721                         printf("%d: %s value incorrect, expected %"PRIu64
1722                                 " got %d\n", __LINE__, queue_names[i],
1723                                 queue_expected[i], id);
1724                         failed = 1;
1725                 }
1726                 /* reset to zero */
1727                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1728                                                 RTE_EVENT_DEV_XSTATS_QUEUE,
1729                                                 queue, &id, 1);
1730                 if (reset_ret) {
1731                         printf("%d: failed to reset successfully\n", __LINE__);
1732                         failed = 1;
1733                 }
1734                 /* check value again */
1735                 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1736                                                         0);
1737                 if (val != queue_expected_zero[i]) {
1738                         printf("%d: %s value incorrect, expected %"PRIu64
1739                                 " got %"PRIu64"\n", __LINE__, queue_names[i],
1740                                 queue_expected_zero[i], val);
1741                         failed = 1;
1742                 }
1743         };
1744
1745         if (failed)
1746                 goto fail;
1747
1748         cleanup(t);
1749         return 0;
1750 fail:
1751         cleanup(t);
1752         return -1;
1753 }
1754
1755 static int
1756 ordered_reconfigure(struct test *t)
1757 {
1758         if (init(t, 1, 1) < 0 ||
1759                         create_ports(t, 1) < 0) {
1760                 printf("%d: Error initializing device\n", __LINE__);
1761                 return -1;
1762         }
1763
1764         const struct rte_event_queue_conf conf = {
1765                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
1766                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1767                         .nb_atomic_flows = 1024,
1768                         .nb_atomic_order_sequences = 1024,
1769         };
1770
1771         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1772                 printf("%d: error creating qid\n", __LINE__);
1773                 goto failed;
1774         }
1775
1776         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1777                 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1778                 goto failed;
1779         }
1780
1781         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1782         if (rte_event_dev_start(evdev) < 0) {
1783                 printf("%d: Error with start call\n", __LINE__);
1784                 return -1;
1785         }
1786
1787         cleanup(t);
1788         return 0;
1789 failed:
1790         cleanup(t);
1791         return -1;
1792 }
1793
1794 static int
1795 qid_priorities(struct test *t)
1796 {
1797         /* Test works by having a CQ with enough empty space for all packets,
1798          * and enqueueing 3 packets to 3 QIDs. They must return based on the
1799          * priority of the QID, not the ingress order, to pass the test
1800          */
1801         unsigned int i;
1802         /* Create instance with 1 ports, and 3 qids */
1803         if (init(t, 3, 1) < 0 ||
1804                         create_ports(t, 1) < 0) {
1805                 printf("%d: Error initializing device\n", __LINE__);
1806                 return -1;
1807         }
1808
1809         for (i = 0; i < 3; i++) {
1810                 /* Create QID */
1811                 const struct rte_event_queue_conf conf = {
1812                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1813                         /* increase priority (0 == highest), as we go */
1814                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1815                         .nb_atomic_flows = 1024,
1816                         .nb_atomic_order_sequences = 1024,
1817                 };
1818
1819                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1820                         printf("%d: error creating qid %d\n", __LINE__, i);
1821                         return -1;
1822                 }
1823                 t->qid[i] = i;
1824         }
1825         t->nb_qids = i;
1826         /* map all QIDs to port */
1827         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1828
1829         if (rte_event_dev_start(evdev) < 0) {
1830                 printf("%d: Error with start call\n", __LINE__);
1831                 return -1;
1832         }
1833
1834         /* enqueue 3 packets, setting seqn and QID to check priority */
1835         for (i = 0; i < 3; i++) {
1836                 struct rte_event ev;
1837                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1838                 if (!arp) {
1839                         printf("%d: gen of pkt failed\n", __LINE__);
1840                         return -1;
1841                 }
1842                 ev.queue_id = t->qid[i];
1843                 ev.op = RTE_EVENT_OP_NEW;
1844                 ev.mbuf = arp;
1845                 arp->seqn = i;
1846
1847                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1848                 if (err != 1) {
1849                         printf("%d: Failed to enqueue\n", __LINE__);
1850                         return -1;
1851                 }
1852         }
1853
1854         rte_event_schedule(evdev);
1855
1856         /* dequeue packets, verify priority was upheld */
1857         struct rte_event ev[32];
1858         uint32_t deq_pkts =
1859                 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1860         if (deq_pkts != 3) {
1861                 printf("%d: failed to deq packets\n", __LINE__);
1862                 rte_event_dev_dump(evdev, stdout);
1863                 return -1;
1864         }
1865         for (i = 0; i < 3; i++) {
1866                 if (ev[i].mbuf->seqn != 2-i) {
1867                         printf(
1868                                 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1869                                         __LINE__, i);
1870                 }
1871         }
1872
1873         cleanup(t);
1874         return 0;
1875 }
1876
1877 static int
1878 load_balancing(struct test *t)
1879 {
1880         const int rx_enq = 0;
1881         int err;
1882         uint32_t i;
1883
1884         if (init(t, 1, 4) < 0 ||
1885                         create_ports(t, 4) < 0 ||
1886                         create_atomic_qids(t, 1) < 0) {
1887                 printf("%d: Error initializing device\n", __LINE__);
1888                 return -1;
1889         }
1890
1891         for (i = 0; i < 3; i++) {
1892                 /* map port 1 - 3 inclusive */
1893                 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1894                                 NULL, 1) != 1) {
1895                         printf("%d: error mapping qid to port %d\n",
1896                                         __LINE__, i);
1897                         return -1;
1898                 }
1899         }
1900
1901         if (rte_event_dev_start(evdev) < 0) {
1902                 printf("%d: Error with start call\n", __LINE__);
1903                 return -1;
1904         }
1905
1906         /************** FORWARD ****************/
1907         /*
1908          * Create a set of flows that test the load-balancing operation of the
1909          * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1910          * with a new flow, which should be sent to the 3rd mapped CQ
1911          */
1912         static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1913
1914         for (i = 0; i < RTE_DIM(flows); i++) {
1915                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1916                 if (!arp) {
1917                         printf("%d: gen of pkt failed\n", __LINE__);
1918                         return -1;
1919                 }
1920
1921                 struct rte_event ev = {
1922                                 .op = RTE_EVENT_OP_NEW,
1923                                 .queue_id = t->qid[0],
1924                                 .flow_id = flows[i],
1925                                 .mbuf = arp,
1926                 };
1927                 /* generate pkt and enqueue */
1928                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1929                 if (err < 0) {
1930                         printf("%d: Failed to enqueue\n", __LINE__);
1931                         return -1;
1932                 }
1933         }
1934
1935         rte_event_schedule(evdev);
1936
1937         struct test_event_dev_stats stats;
1938         err = test_event_dev_stats_get(evdev, &stats);
1939         if (err) {
1940                 printf("%d: failed to get stats\n", __LINE__);
1941                 return -1;
1942         }
1943
1944         if (stats.port_inflight[1] != 4) {
1945                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
1946                                 __func__);
1947                 return -1;
1948         }
1949         if (stats.port_inflight[2] != 2) {
1950                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
1951                                 __func__);
1952                 return -1;
1953         }
1954         if (stats.port_inflight[3] != 3) {
1955                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
1956                                 __func__);
1957                 return -1;
1958         }
1959
1960         cleanup(t);
1961         return 0;
1962 }
1963
1964 static int
1965 load_balancing_history(struct test *t)
1966 {
1967         struct test_event_dev_stats stats = {0};
1968         const int rx_enq = 0;
1969         int err;
1970         uint32_t i;
1971
1972         /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
1973         if (init(t, 1, 4) < 0 ||
1974                         create_ports(t, 4) < 0 ||
1975                         create_atomic_qids(t, 1) < 0)
1976                 return -1;
1977
1978         /* CQ mapping to QID */
1979         if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
1980                 printf("%d: error mapping port 1 qid\n", __LINE__);
1981                 return -1;
1982         }
1983         if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
1984                 printf("%d: error mapping port 2 qid\n", __LINE__);
1985                 return -1;
1986         }
1987         if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
1988                 printf("%d: error mapping port 3 qid\n", __LINE__);
1989                 return -1;
1990         }
1991         if (rte_event_dev_start(evdev) < 0) {
1992                 printf("%d: Error with start call\n", __LINE__);
1993                 return -1;
1994         }
1995
1996         /*
1997          * Create a set of flows that test the load-balancing operation of the
1998          * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
1999          * the packet from CQ 0, send in a new set of flows. Ensure that:
2000          *  1. The new flow 3 gets into the empty CQ0
2001          *  2. packets for existing flow gets added into CQ1
2002          *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2003          *     more outstanding pkts
2004          *
2005          *  This test makes sure that when a flow ends (i.e. all packets
2006          *  have been completed for that flow), that the flow can be moved
2007          *  to a different CQ when new packets come in for that flow.
2008          */
2009         static uint32_t flows1[] = {0, 1, 1, 2};
2010
2011         for (i = 0; i < RTE_DIM(flows1); i++) {
2012                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2013                 struct rte_event ev = {
2014                                 .flow_id = flows1[i],
2015                                 .op = RTE_EVENT_OP_NEW,
2016                                 .queue_id = t->qid[0],
2017                                 .event_type = RTE_EVENT_TYPE_CPU,
2018                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2019                                 .mbuf = arp
2020                 };
2021
2022                 if (!arp) {
2023                         printf("%d: gen of pkt failed\n", __LINE__);
2024                         return -1;
2025                 }
2026                 arp->hash.rss = flows1[i];
2027                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2028                 if (err < 0) {
2029                         printf("%d: Failed to enqueue\n", __LINE__);
2030                         return -1;
2031                 }
2032         }
2033
2034         /* call the scheduler */
2035         rte_event_schedule(evdev);
2036
2037         /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2038         struct rte_event ev;
2039         if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2040                 printf("%d: failed to dequeue\n", __LINE__);
2041                 return -1;
2042         }
2043         if (ev.mbuf->hash.rss != flows1[0]) {
2044                 printf("%d: unexpected flow received\n", __LINE__);
2045                 return -1;
2046         }
2047
2048         /* drop the flow 0 packet from port 1 */
2049         rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2050
2051         /* call the scheduler */
2052         rte_event_schedule(evdev);
2053
2054         /*
2055          * Set up the next set of flows, first a new flow to fill up
2056          * CQ 0, so that the next flow 0 packet should go to CQ2
2057          */
2058         static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2059
2060         for (i = 0; i < RTE_DIM(flows2); i++) {
2061                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2062                 struct rte_event ev = {
2063                                 .flow_id = flows2[i],
2064                                 .op = RTE_EVENT_OP_NEW,
2065                                 .queue_id = t->qid[0],
2066                                 .event_type = RTE_EVENT_TYPE_CPU,
2067                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2068                                 .mbuf = arp
2069                 };
2070
2071                 if (!arp) {
2072                         printf("%d: gen of pkt failed\n", __LINE__);
2073                         return -1;
2074                 }
2075                 arp->hash.rss = flows2[i];
2076
2077                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2078                 if (err < 0) {
2079                         printf("%d: Failed to enqueue\n", __LINE__);
2080                         return -1;
2081                 }
2082         }
2083
2084         /* schedule */
2085         rte_event_schedule(evdev);
2086
2087         err = test_event_dev_stats_get(evdev, &stats);
2088         if (err) {
2089                 printf("%d:failed to get stats\n", __LINE__);
2090                 return -1;
2091         }
2092
2093         /*
2094          * Now check the resulting inflights on each port.
2095          */
2096         if (stats.port_inflight[1] != 3) {
2097                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2098                                 __func__);
2099                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2100                                 (unsigned int)stats.port_inflight[1],
2101                                 (unsigned int)stats.port_inflight[2],
2102                                 (unsigned int)stats.port_inflight[3]);
2103                 return -1;
2104         }
2105         if (stats.port_inflight[2] != 4) {
2106                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2107                                 __func__);
2108                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2109                                 (unsigned int)stats.port_inflight[1],
2110                                 (unsigned int)stats.port_inflight[2],
2111                                 (unsigned int)stats.port_inflight[3]);
2112                 return -1;
2113         }
2114         if (stats.port_inflight[3] != 2) {
2115                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2116                                 __func__);
2117                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2118                                 (unsigned int)stats.port_inflight[1],
2119                                 (unsigned int)stats.port_inflight[2],
2120                                 (unsigned int)stats.port_inflight[3]);
2121                 return -1;
2122         }
2123
2124         for (i = 1; i <= 3; i++) {
2125                 struct rte_event ev;
2126                 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2127                         rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2128         }
2129         rte_event_schedule(evdev);
2130
2131         cleanup(t);
2132         return 0;
2133 }
2134
2135 static int
2136 invalid_qid(struct test *t)
2137 {
2138         struct test_event_dev_stats stats;
2139         const int rx_enq = 0;
2140         int err;
2141         uint32_t i;
2142
2143         if (init(t, 1, 4) < 0 ||
2144                         create_ports(t, 4) < 0 ||
2145                         create_atomic_qids(t, 1) < 0) {
2146                 printf("%d: Error initializing device\n", __LINE__);
2147                 return -1;
2148         }
2149
2150         /* CQ mapping to QID */
2151         for (i = 0; i < 4; i++) {
2152                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2153                                 NULL, 1);
2154                 if (err != 1) {
2155                         printf("%d: error mapping port 1 qid\n", __LINE__);
2156                         return -1;
2157                 }
2158         }
2159
2160         if (rte_event_dev_start(evdev) < 0) {
2161                 printf("%d: Error with start call\n", __LINE__);
2162                 return -1;
2163         }
2164
2165         /*
2166          * Send in a packet with an invalid qid to the scheduler.
2167          * We should see the packed enqueued OK, but the inflights for
2168          * that packet should not be incremented, and the rx_dropped
2169          * should be incremented.
2170          */
2171         static uint32_t flows1[] = {20};
2172
2173         for (i = 0; i < RTE_DIM(flows1); i++) {
2174                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2175                 if (!arp) {
2176                         printf("%d: gen of pkt failed\n", __LINE__);
2177                         return -1;
2178                 }
2179
2180                 struct rte_event ev = {
2181                                 .op = RTE_EVENT_OP_NEW,
2182                                 .queue_id = t->qid[0] + flows1[i],
2183                                 .flow_id = i,
2184                                 .mbuf = arp,
2185                 };
2186                 /* generate pkt and enqueue */
2187                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2188                 if (err < 0) {
2189                         printf("%d: Failed to enqueue\n", __LINE__);
2190                         return -1;
2191                 }
2192         }
2193
2194         /* call the scheduler */
2195         rte_event_schedule(evdev);
2196
2197         err = test_event_dev_stats_get(evdev, &stats);
2198         if (err) {
2199                 printf("%d: failed to get stats\n", __LINE__);
2200                 return -1;
2201         }
2202
2203         /*
2204          * Now check the resulting inflights on the port, and the rx_dropped.
2205          */
2206         if (stats.port_inflight[0] != 0) {
2207                 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2208                                 __func__);
2209                 rte_event_dev_dump(evdev, stdout);
2210                 return -1;
2211         }
2212         if (stats.port_rx_dropped[0] != 1) {
2213                 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2214                 rte_event_dev_dump(evdev, stdout);
2215                 return -1;
2216         }
2217         /* each packet drop should only be counted in one place - port or dev */
2218         if (stats.rx_dropped != 0) {
2219                 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2220                                 __func__);
2221                 rte_event_dev_dump(evdev, stdout);
2222                 return -1;
2223         }
2224
2225         cleanup(t);
2226         return 0;
2227 }
2228
2229 static int
2230 single_packet(struct test *t)
2231 {
2232         const uint32_t MAGIC_SEQN = 7321;
2233         struct rte_event ev;
2234         struct test_event_dev_stats stats;
2235         const int rx_enq = 0;
2236         const int wrk_enq = 2;
2237         int err;
2238
2239         /* Create instance with 4 ports */
2240         if (init(t, 1, 4) < 0 ||
2241                         create_ports(t, 4) < 0 ||
2242                         create_atomic_qids(t, 1) < 0) {
2243                 printf("%d: Error initializing device\n", __LINE__);
2244                 return -1;
2245         }
2246
2247         /* CQ mapping to QID */
2248         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2249         if (err != 1) {
2250                 printf("%d: error mapping lb qid\n", __LINE__);
2251                 cleanup(t);
2252                 return -1;
2253         }
2254
2255         if (rte_event_dev_start(evdev) < 0) {
2256                 printf("%d: Error with start call\n", __LINE__);
2257                 return -1;
2258         }
2259
2260         /************** Gen pkt and enqueue ****************/
2261         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2262         if (!arp) {
2263                 printf("%d: gen of pkt failed\n", __LINE__);
2264                 return -1;
2265         }
2266
2267         ev.op = RTE_EVENT_OP_NEW;
2268         ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2269         ev.mbuf = arp;
2270         ev.queue_id = 0;
2271         ev.flow_id = 3;
2272         arp->seqn = MAGIC_SEQN;
2273
2274         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2275         if (err < 0) {
2276                 printf("%d: Failed to enqueue\n", __LINE__);
2277                 return -1;
2278         }
2279
2280         rte_event_schedule(evdev);
2281
2282         err = test_event_dev_stats_get(evdev, &stats);
2283         if (err) {
2284                 printf("%d: failed to get stats\n", __LINE__);
2285                 return -1;
2286         }
2287
2288         if (stats.rx_pkts != 1 ||
2289                         stats.tx_pkts != 1 ||
2290                         stats.port_inflight[wrk_enq] != 1) {
2291                 printf("%d: Sched core didn't handle pkt as expected\n",
2292                                 __LINE__);
2293                 rte_event_dev_dump(evdev, stdout);
2294                 return -1;
2295         }
2296
2297         uint32_t deq_pkts;
2298
2299         deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2300         if (deq_pkts < 1) {
2301                 printf("%d: Failed to deq\n", __LINE__);
2302                 return -1;
2303         }
2304
2305         err = test_event_dev_stats_get(evdev, &stats);
2306         if (err) {
2307                 printf("%d: failed to get stats\n", __LINE__);
2308                 return -1;
2309         }
2310
2311         err = test_event_dev_stats_get(evdev, &stats);
2312         if (ev.mbuf->seqn != MAGIC_SEQN) {
2313                 printf("%d: magic sequence number not dequeued\n", __LINE__);
2314                 return -1;
2315         }
2316
2317         rte_pktmbuf_free(ev.mbuf);
2318         err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2319         if (err < 0) {
2320                 printf("%d: Failed to enqueue\n", __LINE__);
2321                 return -1;
2322         }
2323         rte_event_schedule(evdev);
2324
2325         err = test_event_dev_stats_get(evdev, &stats);
2326         if (stats.port_inflight[wrk_enq] != 0) {
2327                 printf("%d: port inflight not correct\n", __LINE__);
2328                 return -1;
2329         }
2330
2331         cleanup(t);
2332         return 0;
2333 }
2334
2335 static int
2336 inflight_counts(struct test *t)
2337 {
2338         struct rte_event ev;
2339         struct test_event_dev_stats stats;
2340         const int rx_enq = 0;
2341         const int p1 = 1;
2342         const int p2 = 2;
2343         int err;
2344         int i;
2345
2346         /* Create instance with 4 ports */
2347         if (init(t, 2, 3) < 0 ||
2348                         create_ports(t, 3) < 0 ||
2349                         create_atomic_qids(t, 2) < 0) {
2350                 printf("%d: Error initializing device\n", __LINE__);
2351                 return -1;
2352         }
2353
2354         /* CQ mapping to QID */
2355         err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2356         if (err != 1) {
2357                 printf("%d: error mapping lb qid\n", __LINE__);
2358                 cleanup(t);
2359                 return -1;
2360         }
2361         err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2362         if (err != 1) {
2363                 printf("%d: error mapping lb qid\n", __LINE__);
2364                 cleanup(t);
2365                 return -1;
2366         }
2367
2368         if (rte_event_dev_start(evdev) < 0) {
2369                 printf("%d: Error with start call\n", __LINE__);
2370                 return -1;
2371         }
2372
2373         /************** FORWARD ****************/
2374 #define QID1_NUM 5
2375         for (i = 0; i < QID1_NUM; i++) {
2376                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2377
2378                 if (!arp) {
2379                         printf("%d: gen of pkt failed\n", __LINE__);
2380                         goto err;
2381                 }
2382
2383                 ev.queue_id =  t->qid[0];
2384                 ev.op = RTE_EVENT_OP_NEW;
2385                 ev.mbuf = arp;
2386                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2387                 if (err != 1) {
2388                         printf("%d: Failed to enqueue\n", __LINE__);
2389                         goto err;
2390                 }
2391         }
2392 #define QID2_NUM 3
2393         for (i = 0; i < QID2_NUM; i++) {
2394                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2395
2396                 if (!arp) {
2397                         printf("%d: gen of pkt failed\n", __LINE__);
2398                         goto err;
2399                 }
2400                 ev.queue_id =  t->qid[1];
2401                 ev.op = RTE_EVENT_OP_NEW;
2402                 ev.mbuf = arp;
2403                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2404                 if (err != 1) {
2405                         printf("%d: Failed to enqueue\n", __LINE__);
2406                         goto err;
2407                 }
2408         }
2409
2410         /* schedule */
2411         rte_event_schedule(evdev);
2412
2413         err = test_event_dev_stats_get(evdev, &stats);
2414         if (err) {
2415                 printf("%d: failed to get stats\n", __LINE__);
2416                 goto err;
2417         }
2418
2419         if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2420                         stats.tx_pkts != QID1_NUM + QID2_NUM) {
2421                 printf("%d: Sched core didn't handle pkt as expected\n",
2422                                 __LINE__);
2423                 goto err;
2424         }
2425
2426         if (stats.port_inflight[p1] != QID1_NUM) {
2427                 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2428                                 __func__);
2429                 goto err;
2430         }
2431         if (stats.port_inflight[p2] != QID2_NUM) {
2432                 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2433                                 __func__);
2434                 goto err;
2435         }
2436
2437         /************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
2438         /* port 1 */
2439         struct rte_event events[QID1_NUM + QID2_NUM];
2440         uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2441                         RTE_DIM(events), 0);
2442
2443         if (deq_pkts != QID1_NUM) {
2444                 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2445                 goto err;
2446         }
2447         err = test_event_dev_stats_get(evdev, &stats);
2448         if (stats.port_inflight[p1] != QID1_NUM) {
2449                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2450                                 __LINE__);
2451                 goto err;
2452         }
2453         for (i = 0; i < QID1_NUM; i++) {
2454                 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2455                                 1);
2456                 if (err != 1) {
2457                         printf("%d: %s rte enqueue of inf release failed\n",
2458                                 __LINE__, __func__);
2459                         goto err;
2460                 }
2461         }
2462
2463         /*
2464          * As the scheduler core decrements inflights, it needs to run to
2465          * process packets to act on the drop messages
2466          */
2467         rte_event_schedule(evdev);
2468
2469         err = test_event_dev_stats_get(evdev, &stats);
2470         if (stats.port_inflight[p1] != 0) {
2471                 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2472                 goto err;
2473         }
2474
2475         /* port2 */
2476         deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2477                         RTE_DIM(events), 0);
2478         if (deq_pkts != QID2_NUM) {
2479                 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2480                 goto err;
2481         }
2482         err = test_event_dev_stats_get(evdev, &stats);
2483         if (stats.port_inflight[p2] != QID2_NUM) {
2484                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2485                                 __LINE__);
2486                 goto err;
2487         }
2488         for (i = 0; i < QID2_NUM; i++) {
2489                 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2490                                 1);
2491                 if (err != 1) {
2492                         printf("%d: %s rte enqueue of inf release failed\n",
2493                                 __LINE__, __func__);
2494                         goto err;
2495                 }
2496         }
2497
2498         /*
2499          * As the scheduler core decrements inflights, it needs to run to
2500          * process packets to act on the drop messages
2501          */
2502         rte_event_schedule(evdev);
2503
2504         err = test_event_dev_stats_get(evdev, &stats);
2505         if (stats.port_inflight[p2] != 0) {
2506                 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2507                 goto err;
2508         }
2509         cleanup(t);
2510         return 0;
2511
2512 err:
2513         rte_event_dev_dump(evdev, stdout);
2514         cleanup(t);
2515         return -1;
2516 }
2517
2518 static int
2519 parallel_basic(struct test *t, int check_order)
2520 {
2521         const uint8_t rx_port = 0;
2522         const uint8_t w1_port = 1;
2523         const uint8_t w3_port = 3;
2524         const uint8_t tx_port = 4;
2525         int err;
2526         int i;
2527         uint32_t deq_pkts, j;
2528         struct rte_mbuf *mbufs[3];
2529         struct rte_mbuf *mbufs_out[3] = { 0 };
2530         const uint32_t MAGIC_SEQN = 1234;
2531
2532         /* Create instance with 4 ports */
2533         if (init(t, 2, tx_port + 1) < 0 ||
2534                         create_ports(t, tx_port + 1) < 0 ||
2535                         (check_order ?  create_ordered_qids(t, 1) :
2536                                 create_unordered_qids(t, 1)) < 0 ||
2537                         create_directed_qids(t, 1, &tx_port)) {
2538                 printf("%d: Error initializing device\n", __LINE__);
2539                 return -1;
2540         }
2541
2542         /*
2543          * CQ mapping to QID
2544          * We need three ports, all mapped to the same ordered qid0. Then we'll
2545          * take a packet out to each port, re-enqueue in reverse order,
2546          * then make sure the reordering has taken place properly when we
2547          * dequeue from the tx_port.
2548          *
2549          * Simplified test setup diagram:
2550          *
2551          * rx_port        w1_port
2552          *        \     /         \
2553          *         qid0 - w2_port - qid1
2554          *              \         /     \
2555          *                w3_port        tx_port
2556          */
2557         /* CQ mapping to QID for LB ports (directed mapped on create) */
2558         for (i = w1_port; i <= w3_port; i++) {
2559                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2560                                 1);
2561                 if (err != 1) {
2562                         printf("%d: error mapping lb qid\n", __LINE__);
2563                         cleanup(t);
2564                         return -1;
2565                 }
2566         }
2567
2568         if (rte_event_dev_start(evdev) < 0) {
2569                 printf("%d: Error with start call\n", __LINE__);
2570                 return -1;
2571         }
2572
2573         /* Enqueue 3 packets to the rx port */
2574         for (i = 0; i < 3; i++) {
2575                 struct rte_event ev;
2576                 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2577                 if (!mbufs[i]) {
2578                         printf("%d: gen of pkt failed\n", __LINE__);
2579                         return -1;
2580                 }
2581
2582                 ev.queue_id = t->qid[0];
2583                 ev.op = RTE_EVENT_OP_NEW;
2584                 ev.mbuf = mbufs[i];
2585                 mbufs[i]->seqn = MAGIC_SEQN + i;
2586
2587                 /* generate pkt and enqueue */
2588                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2589                 if (err != 1) {
2590                         printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2591                                         __LINE__, i, err);
2592                         return -1;
2593                 }
2594         }
2595
2596         rte_event_schedule(evdev);
2597
2598         /* use extra slot to make logic in loops easier */
2599         struct rte_event deq_ev[w3_port + 1];
2600
2601         /* Dequeue the 3 packets, one from each worker port */
2602         for (i = w1_port; i <= w3_port; i++) {
2603                 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2604                                 &deq_ev[i], 1, 0);
2605                 if (deq_pkts != 1) {
2606                         printf("%d: Failed to deq\n", __LINE__);
2607                         rte_event_dev_dump(evdev, stdout);
2608                         return -1;
2609                 }
2610         }
2611
2612         /* Enqueue each packet in reverse order, flushing after each one */
2613         for (i = w3_port; i >= w1_port; i--) {
2614
2615                 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2616                 deq_ev[i].queue_id = t->qid[1];
2617                 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2618                 if (err != 1) {
2619                         printf("%d: Failed to enqueue\n", __LINE__);
2620                         return -1;
2621                 }
2622         }
2623         rte_event_schedule(evdev);
2624
2625         /* dequeue from the tx ports, we should get 3 packets */
2626         deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2627                         3, 0);
2628
2629         /* Check to see if we've got all 3 packets */
2630         if (deq_pkts != 3) {
2631                 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2632                         __LINE__, deq_pkts, tx_port);
2633                 rte_event_dev_dump(evdev, stdout);
2634                 return 1;
2635         }
2636
2637         /* Check to see if the sequence numbers are in expected order */
2638         if (check_order) {
2639                 for (j = 0 ; j < deq_pkts ; j++) {
2640                         if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2641                                 printf(
2642                                         "%d: Incorrect sequence number(%d) from port %d\n",
2643                                         __LINE__, mbufs_out[j]->seqn, tx_port);
2644                                 return -1;
2645                         }
2646                 }
2647         }
2648
2649         /* Destroy the instance */
2650         cleanup(t);
2651         return 0;
2652 }
2653
2654 static int
2655 ordered_basic(struct test *t)
2656 {
2657         return parallel_basic(t, 1);
2658 }
2659
2660 static int
2661 unordered_basic(struct test *t)
2662 {
2663         return parallel_basic(t, 0);
2664 }
2665
2666 static int
2667 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2668 {
2669         const struct rte_event new_ev = {
2670                         .op = RTE_EVENT_OP_NEW
2671                         /* all other fields zero */
2672         };
2673         struct rte_event ev = new_ev;
2674         unsigned int rx_port = 0; /* port we get the first flow on */
2675         char rx_port_used_stat[64];
2676         char rx_port_free_stat[64];
2677         char other_port_used_stat[64];
2678
2679         if (init(t, 1, 2) < 0 ||
2680                         create_ports(t, 2) < 0 ||
2681                         create_atomic_qids(t, 1) < 0) {
2682                 printf("%d: Error initializing device\n", __LINE__);
2683                 return -1;
2684         }
2685         int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2686         if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2687                         nb_links != 1) {
2688                 printf("%d: Error links queue to ports\n", __LINE__);
2689                 goto err;
2690         }
2691         if (rte_event_dev_start(evdev) < 0) {
2692                 printf("%d: Error with start call\n", __LINE__);
2693                 goto err;
2694         }
2695
2696         /* send one packet and see where it goes, port 0 or 1 */
2697         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2698                 printf("%d: Error doing first enqueue\n", __LINE__);
2699                 goto err;
2700         }
2701         rte_event_schedule(evdev);
2702
2703         if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2704                         != 1)
2705                 rx_port = 1;
2706
2707         snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2708                         "port_%u_cq_ring_used", rx_port);
2709         snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2710                         "port_%u_cq_ring_free", rx_port);
2711         snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2712                         "port_%u_cq_ring_used", rx_port ^ 1);
2713         if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2714                         != 1) {
2715                 printf("%d: Error, first event not scheduled\n", __LINE__);
2716                 goto err;
2717         }
2718
2719         /* now fill up the rx port's queue with one flow to cause HOLB */
2720         do {
2721                 ev = new_ev;
2722                 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2723                         printf("%d: Error with enqueue\n", __LINE__);
2724                         goto err;
2725                 }
2726                 rte_event_schedule(evdev);
2727         } while (rte_event_dev_xstats_by_name_get(evdev,
2728                                 rx_port_free_stat, NULL) != 0);
2729
2730         /* one more packet, which needs to stay in IQ - i.e. HOLB */
2731         ev = new_ev;
2732         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2733                 printf("%d: Error with enqueue\n", __LINE__);
2734                 goto err;
2735         }
2736         rte_event_schedule(evdev);
2737
2738         /* check that the other port still has an empty CQ */
2739         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2740                         != 0) {
2741                 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2742                 goto err;
2743         }
2744         /* check IQ now has one packet */
2745         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2746                         != 1) {
2747                 printf("%d: Error, QID does not have exactly 1 packet\n",
2748                         __LINE__);
2749                 goto err;
2750         }
2751
2752         /* send another flow, which should pass the other IQ entry */
2753         ev = new_ev;
2754         ev.flow_id = 1;
2755         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2756                 printf("%d: Error with enqueue\n", __LINE__);
2757                 goto err;
2758         }
2759         rte_event_schedule(evdev);
2760
2761         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2762                         != 1) {
2763                 printf("%d: Error, second flow did not pass out first\n",
2764                         __LINE__);
2765                 goto err;
2766         }
2767
2768         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2769                         != 1) {
2770                 printf("%d: Error, QID does not have exactly 1 packet\n",
2771                         __LINE__);
2772                 goto err;
2773         }
2774         cleanup(t);
2775         return 0;
2776 err:
2777         rte_event_dev_dump(evdev, stdout);
2778         cleanup(t);
2779         return -1;
2780 }
2781
2782 static int
2783 worker_loopback_worker_fn(void *arg)
2784 {
2785         struct test *t = arg;
2786         uint8_t port = t->port[1];
2787         int count = 0;
2788         int enqd;
2789
2790         /*
2791          * Takes packets from the input port and then loops them back through
2792          * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2793          * so each packet goes through 8*16 = 128 times.
2794          */
2795         printf("%d: \tWorker function started\n", __LINE__);
2796         while (count < NUM_PACKETS) {
2797 #define BURST_SIZE 32
2798                 struct rte_event ev[BURST_SIZE];
2799                 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2800                                 BURST_SIZE, 0);
2801                 if (nb_rx == 0) {
2802                         rte_pause();
2803                         continue;
2804                 }
2805
2806                 for (i = 0; i < nb_rx; i++) {
2807                         ev[i].queue_id++;
2808                         if (ev[i].queue_id != 8) {
2809                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2810                                 enqd = rte_event_enqueue_burst(evdev, port,
2811                                                 &ev[i], 1);
2812                                 if (enqd != 1) {
2813                                         printf("%d: Can't enqueue FWD!!\n",
2814                                                         __LINE__);
2815                                         return -1;
2816                                 }
2817                                 continue;
2818                         }
2819
2820                         ev[i].queue_id = 0;
2821                         ev[i].mbuf->udata64++;
2822                         if (ev[i].mbuf->udata64 != 16) {
2823                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2824                                 enqd = rte_event_enqueue_burst(evdev, port,
2825                                                 &ev[i], 1);
2826                                 if (enqd != 1) {
2827                                         printf("%d: Can't enqueue FWD!!\n",
2828                                                         __LINE__);
2829                                         return -1;
2830                                 }
2831                                 continue;
2832                         }
2833                         /* we have hit 16 iterations through system - drop */
2834                         rte_pktmbuf_free(ev[i].mbuf);
2835                         count++;
2836                         ev[i].op = RTE_EVENT_OP_RELEASE;
2837                         enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2838                         if (enqd != 1) {
2839                                 printf("%d drop enqueue failed\n", __LINE__);
2840                                 return -1;
2841                         }
2842                 }
2843         }
2844
2845         return 0;
2846 }
2847
2848 static int
2849 worker_loopback_producer_fn(void *arg)
2850 {
2851         struct test *t = arg;
2852         uint8_t port = t->port[0];
2853         uint64_t count = 0;
2854
2855         printf("%d: \tProducer function started\n", __LINE__);
2856         while (count < NUM_PACKETS) {
2857                 struct rte_mbuf *m = 0;
2858                 do {
2859                         m = rte_pktmbuf_alloc(t->mbuf_pool);
2860                 } while (m == NULL);
2861
2862                 m->udata64 = 0;
2863
2864                 struct rte_event ev = {
2865                                 .op = RTE_EVENT_OP_NEW,
2866                                 .queue_id = t->qid[0],
2867                                 .flow_id = (uintptr_t)m & 0xFFFF,
2868                                 .mbuf = m,
2869                 };
2870
2871                 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2872                         while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2873                                         1)
2874                                 rte_pause();
2875                 }
2876
2877                 count++;
2878         }
2879
2880         return 0;
2881 }
2882
2883 static int
2884 worker_loopback(struct test *t)
2885 {
2886         /* use a single producer core, and a worker core to see what happens
2887          * if the worker loops packets back multiple times
2888          */
2889         struct test_event_dev_stats stats;
2890         uint64_t print_cycles = 0, cycles = 0;
2891         uint64_t tx_pkts = 0;
2892         int err;
2893         int w_lcore, p_lcore;
2894
2895         if (init(t, 8, 2) < 0 ||
2896                         create_atomic_qids(t, 8) < 0) {
2897                 printf("%d: Error initializing device\n", __LINE__);
2898                 return -1;
2899         }
2900
2901         /* RX with low max events */
2902         static struct rte_event_port_conf conf = {
2903                         .dequeue_depth = 32,
2904                         .enqueue_depth = 64,
2905         };
2906         /* beware: this cannot be initialized in the static above as it would
2907          * only be initialized once - and this needs to be set for multiple runs
2908          */
2909         conf.new_event_threshold = 512;
2910
2911         if (rte_event_port_setup(evdev, 0, &conf) < 0) {
2912                 printf("Error setting up RX port\n");
2913                 return -1;
2914         }
2915         t->port[0] = 0;
2916         /* TX with higher max events */
2917         conf.new_event_threshold = 4096;
2918         if (rte_event_port_setup(evdev, 1, &conf) < 0) {
2919                 printf("Error setting up TX port\n");
2920                 return -1;
2921         }
2922         t->port[1] = 1;
2923
2924         /* CQ mapping to QID */
2925         err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2926         if (err != 8) { /* should have mapped all queues*/
2927                 printf("%d: error mapping port 2 to all qids\n", __LINE__);
2928                 return -1;
2929         }
2930
2931         if (rte_event_dev_start(evdev) < 0) {
2932                 printf("%d: Error with start call\n", __LINE__);
2933                 return -1;
2934         }
2935
2936         p_lcore = rte_get_next_lcore(
2937                         /* start core */ -1,
2938                         /* skip master */ 1,
2939                         /* wrap */ 0);
2940         w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
2941
2942         rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
2943         rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
2944
2945         print_cycles = cycles = rte_get_timer_cycles();
2946         while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
2947                         rte_eal_get_lcore_state(w_lcore) != FINISHED) {
2948
2949                 rte_event_schedule(evdev);
2950
2951                 uint64_t new_cycles = rte_get_timer_cycles();
2952
2953                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
2954                         test_event_dev_stats_get(evdev, &stats);
2955                         printf(
2956                                 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
2957                                 __LINE__, stats.rx_pkts, stats.tx_pkts);
2958
2959                         print_cycles = new_cycles;
2960                 }
2961                 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
2962                         test_event_dev_stats_get(evdev, &stats);
2963                         if (stats.tx_pkts == tx_pkts) {
2964                                 rte_event_dev_dump(evdev, stdout);
2965                                 printf("Dumping xstats:\n");
2966                                 xstats_print();
2967                                 printf(
2968                                         "%d: No schedules for seconds, deadlock\n",
2969                                         __LINE__);
2970                                 return -1;
2971                         }
2972                         tx_pkts = stats.tx_pkts;
2973                         cycles = new_cycles;
2974                 }
2975         }
2976         rte_event_schedule(evdev); /* ensure all completions are flushed */
2977
2978         rte_eal_mp_wait_lcore();
2979
2980         cleanup(t);
2981         return 0;
2982 }
2983
2984 static struct rte_mempool *eventdev_func_mempool;
2985
2986 static int
2987 test_sw_eventdev(void)
2988 {
2989         struct test *t = malloc(sizeof(struct test));
2990         int ret;
2991
2992         /* manually initialize the op, older gcc's complain on static
2993          * initialization of struct elements that are a bitfield.
2994          */
2995         release_ev.op = RTE_EVENT_OP_RELEASE;
2996
2997         const char *eventdev_name = "event_sw0";
2998         evdev = rte_event_dev_get_dev_id(eventdev_name);
2999         if (evdev < 0) {
3000                 printf("%d: Eventdev %s not found - creating.\n",
3001                                 __LINE__, eventdev_name);
3002                 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3003                         printf("Error creating eventdev\n");
3004                         return -1;
3005                 }
3006                 evdev = rte_event_dev_get_dev_id(eventdev_name);
3007                 if (evdev < 0) {
3008                         printf("Error finding newly created eventdev\n");
3009                         return -1;
3010                 }
3011         }
3012
3013         /* Only create mbuf pool once, reuse for each test run */
3014         if (!eventdev_func_mempool) {
3015                 eventdev_func_mempool = rte_pktmbuf_pool_create(
3016                                 "EVENTDEV_SW_SA_MBUF_POOL",
3017                                 (1<<12), /* 4k buffers */
3018                                 32 /*MBUF_CACHE_SIZE*/,
3019                                 0,
3020                                 512, /* use very small mbufs */
3021                                 rte_socket_id());
3022                 if (!eventdev_func_mempool) {
3023                         printf("ERROR creating mempool\n");
3024                         return -1;
3025                 }
3026         }
3027         t->mbuf_pool = eventdev_func_mempool;
3028
3029         printf("*** Running Single Directed Packet test...\n");
3030         ret = test_single_directed_packet(t);
3031         if (ret != 0) {
3032                 printf("ERROR - Single Directed Packet test FAILED.\n");
3033                 return ret;
3034         }
3035         printf("*** Running Single Load Balanced Packet test...\n");
3036         ret = single_packet(t);
3037         if (ret != 0) {
3038                 printf("ERROR - Single Packet test FAILED.\n");
3039                 return ret;
3040         }
3041         printf("*** Running Unordered Basic test...\n");
3042         ret = unordered_basic(t);
3043         if (ret != 0) {
3044                 printf("ERROR -  Unordered Basic test FAILED.\n");
3045                 return ret;
3046         }
3047         printf("*** Running Ordered Basic test...\n");
3048         ret = ordered_basic(t);
3049         if (ret != 0) {
3050                 printf("ERROR -  Ordered Basic test FAILED.\n");
3051                 return ret;
3052         }
3053         printf("*** Running Burst Packets test...\n");
3054         ret = burst_packets(t);
3055         if (ret != 0) {
3056                 printf("ERROR - Burst Packets test FAILED.\n");
3057                 return ret;
3058         }
3059         printf("*** Running Load Balancing test...\n");
3060         ret = load_balancing(t);
3061         if (ret != 0) {
3062                 printf("ERROR - Load Balancing test FAILED.\n");
3063                 return ret;
3064         }
3065         printf("*** Running Prioritized Directed test...\n");
3066         ret = test_priority_directed(t);
3067         if (ret != 0) {
3068                 printf("ERROR - Prioritized Directed test FAILED.\n");
3069                 return ret;
3070         }
3071         printf("*** Running Prioritized Atomic test...\n");
3072         ret = test_priority_atomic(t);
3073         if (ret != 0) {
3074                 printf("ERROR - Prioritized Atomic test FAILED.\n");
3075                 return ret;
3076         }
3077
3078         printf("*** Running Prioritized Ordered test...\n");
3079         ret = test_priority_ordered(t);
3080         if (ret != 0) {
3081                 printf("ERROR - Prioritized Ordered test FAILED.\n");
3082                 return ret;
3083         }
3084         printf("*** Running Prioritized Unordered test...\n");
3085         ret = test_priority_unordered(t);
3086         if (ret != 0) {
3087                 printf("ERROR - Prioritized Unordered test FAILED.\n");
3088                 return ret;
3089         }
3090         printf("*** Running Invalid QID test...\n");
3091         ret = invalid_qid(t);
3092         if (ret != 0) {
3093                 printf("ERROR - Invalid QID test FAILED.\n");
3094                 return ret;
3095         }
3096         printf("*** Running Load Balancing History test...\n");
3097         ret = load_balancing_history(t);
3098         if (ret != 0) {
3099                 printf("ERROR - Load Balancing History test FAILED.\n");
3100                 return ret;
3101         }
3102         printf("*** Running Inflight Count test...\n");
3103         ret = inflight_counts(t);
3104         if (ret != 0) {
3105                 printf("ERROR - Inflight Count test FAILED.\n");
3106                 return ret;
3107         }
3108         printf("*** Running Abuse Inflights test...\n");
3109         ret = abuse_inflights(t);
3110         if (ret != 0) {
3111                 printf("ERROR - Abuse Inflights test FAILED.\n");
3112                 return ret;
3113         }
3114         printf("*** Running XStats test...\n");
3115         ret = xstats_tests(t);
3116         if (ret != 0) {
3117                 printf("ERROR - XStats test FAILED.\n");
3118                 return ret;
3119         }
3120         printf("*** Running XStats ID Reset test...\n");
3121         ret = xstats_id_reset_tests(t);
3122         if (ret != 0) {
3123                 printf("ERROR - XStats ID Reset test FAILED.\n");
3124                 return ret;
3125         }
3126         printf("*** Running XStats Brute Force test...\n");
3127         ret = xstats_brute_force(t);
3128         if (ret != 0) {
3129                 printf("ERROR - XStats Brute Force test FAILED.\n");
3130                 return ret;
3131         }
3132         printf("*** Running XStats ID Abuse test...\n");
3133         ret = xstats_id_abuse_tests(t);
3134         if (ret != 0) {
3135                 printf("ERROR - XStats ID Abuse test FAILED.\n");
3136                 return ret;
3137         }
3138         printf("*** Running QID Priority test...\n");
3139         ret = qid_priorities(t);
3140         if (ret != 0) {
3141                 printf("ERROR - QID Priority test FAILED.\n");
3142                 return ret;
3143         }
3144         printf("*** Running Ordered Reconfigure test...\n");
3145         ret = ordered_reconfigure(t);
3146         if (ret != 0) {
3147                 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3148                 return ret;
3149         }
3150         printf("*** Running Port LB Single Reconfig test...\n");
3151         ret = port_single_lb_reconfig(t);
3152         if (ret != 0) {
3153                 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3154                 return ret;
3155         }
3156         printf("*** Running Port Reconfig Credits test...\n");
3157         ret = port_reconfig_credits(t);
3158         if (ret != 0) {
3159                 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3160                 return ret;
3161         }
3162         printf("*** Running Head-of-line-blocking test...\n");
3163         ret = holb(t);
3164         if (ret != 0) {
3165                 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3166                 return ret;
3167         }
3168         if (rte_lcore_count() >= 3) {
3169                 printf("*** Running Worker loopback test...\n");
3170                 ret = worker_loopback(t);
3171                 if (ret != 0) {
3172                         printf("ERROR - Worker loopback test FAILED.\n");
3173                         return ret;
3174                 }
3175         } else {
3176                 printf("### Not enough cores for worker loopback test.\n");
3177                 printf("### Need at least 3 cores for test.\n");
3178         }
3179         /*
3180          * Free test instance, leaving mempool initialized, and a pointer to it
3181          * in static eventdev_func_mempool, as it is re-used on re-runs
3182          */
3183         free(t);
3184
3185         return 0;
3186 }
3187
3188 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);