New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / event / dpaa / dpaa_eventdev.c
1 /*   SPDX-License-Identifier:        BSD-3-Clause
2  *   Copyright 2017 NXP
3  */
4
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/epoll.h>
12
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_dev.h>
18 #include <rte_eal.h>
19 #include <rte_lcore.h>
20 #include <rte_log.h>
21 #include <rte_malloc.h>
22 #include <rte_memcpy.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
25 #include <rte_pci.h>
26 #include <rte_eventdev.h>
27 #include <rte_eventdev_pmd_vdev.h>
28 #include <rte_ethdev.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_dpaa_bus.h>
31 #include <rte_dpaa_logs.h>
32 #include <rte_cycles.h>
33 #include <rte_kvargs.h>
34
35 #include <dpaa_ethdev.h>
36 #include "dpaa_eventdev.h"
37 #include <dpaa_mempool.h>
38
39 /*
40  * Clarifications
41  * Evendev = Virtual Instance for SoC
42  * Eventport = Portal Instance
43  * Eventqueue = Channel Instance
44  * 1 Eventdev can have N Eventqueue
45  */
46
47 #define DISABLE_INTR_MODE "disable_intr"
48
49 static int
50 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
51                                  uint64_t *timeout_ticks)
52 {
53         EVENTDEV_INIT_FUNC_TRACE();
54
55         RTE_SET_USED(dev);
56
57         uint64_t cycles_per_second;
58
59         cycles_per_second = rte_get_timer_hz();
60         *timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
61
62         return 0;
63 }
64
65 static int
66 dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
67                                  uint64_t *timeout_ticks)
68 {
69         RTE_SET_USED(dev);
70
71         *timeout_ticks = ns/1000;
72         return 0;
73 }
74
75 static void
76 dpaa_eventq_portal_add(u16 ch_id)
77 {
78         uint32_t sdqcr;
79
80         sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
81         qman_static_dequeue_add(sdqcr, NULL);
82 }
83
84 static uint16_t
85 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
86                          uint16_t nb_events)
87 {
88         uint16_t i;
89         struct rte_mbuf *mbuf;
90
91         RTE_SET_USED(port);
92         /*Release all the contexts saved previously*/
93         for (i = 0; i < nb_events; i++) {
94                 switch (ev[i].op) {
95                 case RTE_EVENT_OP_RELEASE:
96                         qman_dca_index(ev[i].impl_opaque, 0);
97                         mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
98                         mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
99                         DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
100                         DPAA_PER_LCORE_DQRR_SIZE--;
101                         break;
102                 default:
103                         break;
104                 }
105         }
106
107         return nb_events;
108 }
109
110 static uint16_t
111 dpaa_event_enqueue(void *port, const struct rte_event *ev)
112 {
113         return dpaa_event_enqueue_burst(port, ev, 1);
114 }
115
116 static void drain_4_bytes(int fd, fd_set *fdset)
117 {
118         if (FD_ISSET(fd, fdset)) {
119                 /* drain 4 bytes */
120                 uint32_t junk;
121                 ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
122                 if (sjunk != sizeof(junk))
123                         DPAA_EVENTDEV_ERR("UIO irq read error");
124         }
125 }
126
127 static inline int
128 dpaa_event_dequeue_wait(uint64_t timeout_ticks)
129 {
130         int fd_qman, nfds;
131         int ret;
132         fd_set readset;
133
134         /* Go into (and back out of) IRQ mode for each select,
135          * it simplifies exit-path considerations and other
136          * potential nastiness.
137          */
138         struct timeval tv = {
139                 .tv_sec = timeout_ticks / 1000000,
140                 .tv_usec = timeout_ticks % 1000000
141         };
142
143         fd_qman = qman_thread_fd();
144         nfds = fd_qman + 1;
145         FD_ZERO(&readset);
146         FD_SET(fd_qman, &readset);
147
148         qman_irqsource_add(QM_PIRQ_DQRI);
149
150         ret = select(nfds, &readset, NULL, NULL, &tv);
151         if (ret < 0)
152                 return ret;
153         /* Calling irqsource_remove() prior to thread_irq()
154          * means thread_irq() will not process whatever caused
155          * the interrupts, however it does ensure that, once
156          * thread_irq() re-enables interrupts, they won't fire
157          * again immediately.
158          */
159         qman_irqsource_remove(~0);
160         drain_4_bytes(fd_qman, &readset);
161         qman_thread_irq();
162
163         return ret;
164 }
165
166 static uint16_t
167 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
168                          uint16_t nb_events, uint64_t timeout_ticks)
169 {
170         int ret;
171         u16 ch_id;
172         void *buffers[8];
173         u32 num_frames, i, irq = 0;
174         uint64_t cur_ticks = 0, wait_time_ticks = 0;
175         struct dpaa_port *portal = (struct dpaa_port *)port;
176         struct rte_mbuf *mbuf;
177
178         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
179                 /* Affine current thread context to a qman portal */
180                 ret = rte_dpaa_portal_init((void *)0);
181                 if (ret) {
182                         DPAA_EVENTDEV_ERR("Unable to initialize portal");
183                         return ret;
184                 }
185         }
186
187         if (unlikely(!portal->is_port_linked)) {
188                 /*
189                  * Affine event queue for current thread context
190                  * to a qman portal.
191                  */
192                 for (i = 0; i < portal->num_linked_evq; i++) {
193                         ch_id = portal->evq_info[i].ch_id;
194                         dpaa_eventq_portal_add(ch_id);
195                 }
196                 portal->is_port_linked = true;
197         }
198
199         /* Check if there are atomic contexts to be released */
200         i = 0;
201         while (DPAA_PER_LCORE_DQRR_SIZE) {
202                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
203                         qman_dca_index(i, 0);
204                         mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
205                         mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
206                         DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
207                         DPAA_PER_LCORE_DQRR_SIZE--;
208                 }
209                 i++;
210         }
211         DPAA_PER_LCORE_DQRR_HELD = 0;
212
213         if (timeout_ticks)
214                 wait_time_ticks = timeout_ticks;
215         else
216                 wait_time_ticks = portal->timeout_us;
217
218         wait_time_ticks += rte_get_timer_cycles();
219         do {
220                 /* Lets dequeue the frames */
221                 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
222                 if (irq)
223                         irq = 0;
224                 if (num_frames)
225                         break;
226                 cur_ticks = rte_get_timer_cycles();
227         } while (cur_ticks < wait_time_ticks);
228
229         return num_frames;
230 }
231
232 static uint16_t
233 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
234 {
235         return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
236 }
237
238 static uint16_t
239 dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
240                               uint16_t nb_events, uint64_t timeout_ticks)
241 {
242         int ret;
243         u16 ch_id;
244         void *buffers[8];
245         u32 num_frames, i, irq = 0;
246         uint64_t cur_ticks = 0, wait_time_ticks = 0;
247         struct dpaa_port *portal = (struct dpaa_port *)port;
248         struct rte_mbuf *mbuf;
249
250         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
251                 /* Affine current thread context to a qman portal */
252                 ret = rte_dpaa_portal_init((void *)0);
253                 if (ret) {
254                         DPAA_EVENTDEV_ERR("Unable to initialize portal");
255                         return ret;
256                 }
257         }
258
259         if (unlikely(!portal->is_port_linked)) {
260                 /*
261                  * Affine event queue for current thread context
262                  * to a qman portal.
263                  */
264                 for (i = 0; i < portal->num_linked_evq; i++) {
265                         ch_id = portal->evq_info[i].ch_id;
266                         dpaa_eventq_portal_add(ch_id);
267                 }
268                 portal->is_port_linked = true;
269         }
270
271         /* Check if there are atomic contexts to be released */
272         i = 0;
273         while (DPAA_PER_LCORE_DQRR_SIZE) {
274                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
275                         qman_dca_index(i, 0);
276                         mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
277                         mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
278                         DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
279                         DPAA_PER_LCORE_DQRR_SIZE--;
280                 }
281                 i++;
282         }
283         DPAA_PER_LCORE_DQRR_HELD = 0;
284
285         if (timeout_ticks)
286                 wait_time_ticks = timeout_ticks;
287         else
288                 wait_time_ticks = portal->timeout_us;
289
290         do {
291                 /* Lets dequeue the frames */
292                 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
293                 if (irq)
294                         irq = 0;
295                 if (num_frames)
296                         break;
297                 if (wait_time_ticks) { /* wait for time */
298                         if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
299                                 irq = 1;
300                                 continue;
301                         }
302                         break; /* no event after waiting */
303                 }
304                 cur_ticks = rte_get_timer_cycles();
305         } while (cur_ticks < wait_time_ticks);
306
307         return num_frames;
308 }
309
310 static uint16_t
311 dpaa_event_dequeue_intr(void *port,
312                         struct rte_event *ev,
313                         uint64_t timeout_ticks)
314 {
315         return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);
316 }
317
318 static void
319 dpaa_event_dev_info_get(struct rte_eventdev *dev,
320                         struct rte_event_dev_info *dev_info)
321 {
322         EVENTDEV_INIT_FUNC_TRACE();
323
324         RTE_SET_USED(dev);
325         dev_info->driver_name = "event_dpaa";
326         dev_info->min_dequeue_timeout_ns =
327                 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
328         dev_info->max_dequeue_timeout_ns =
329                 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
330         dev_info->dequeue_timeout_ns =
331                 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
332         dev_info->max_event_queues =
333                 DPAA_EVENT_MAX_QUEUES;
334         dev_info->max_event_queue_flows =
335                 DPAA_EVENT_MAX_QUEUE_FLOWS;
336         dev_info->max_event_queue_priority_levels =
337                 DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
338         dev_info->max_event_priority_levels =
339                 DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
340         dev_info->max_event_ports =
341                 DPAA_EVENT_MAX_EVENT_PORT;
342         dev_info->max_event_port_dequeue_depth =
343                 DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
344         dev_info->max_event_port_enqueue_depth =
345                 DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
346         /*
347          * TODO: Need to find out that how to fetch this info
348          * from kernel or somewhere else.
349          */
350         dev_info->max_num_events =
351                 DPAA_EVENT_MAX_NUM_EVENTS;
352         dev_info->event_dev_cap =
353                 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
354                 RTE_EVENT_DEV_CAP_BURST_MODE |
355                 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
356                 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
357 }
358
359 static int
360 dpaa_event_dev_configure(const struct rte_eventdev *dev)
361 {
362         struct dpaa_eventdev *priv = dev->data->dev_private;
363         struct rte_event_dev_config *conf = &dev->data->dev_conf;
364         int ret, i;
365         uint32_t *ch_id;
366
367         EVENTDEV_INIT_FUNC_TRACE();
368         priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
369         priv->nb_events_limit = conf->nb_events_limit;
370         priv->nb_event_queues = conf->nb_event_queues;
371         priv->nb_event_ports = conf->nb_event_ports;
372         priv->nb_event_queue_flows = conf->nb_event_queue_flows;
373         priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
374         priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
375         priv->event_dev_cfg = conf->event_dev_cfg;
376
377         ch_id = rte_malloc("dpaa-channels",
378                           sizeof(uint32_t) * priv->nb_event_queues,
379                           RTE_CACHE_LINE_SIZE);
380         if (ch_id == NULL) {
381                 DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n");
382                 return -ENOMEM;
383         }
384         /* Create requested event queues within the given event device */
385         ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
386         if (ret < 0) {
387                 DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n",
388                                  priv->nb_event_queues, ret);
389                 rte_free(ch_id);
390                 return ret;
391         }
392         for (i = 0; i < priv->nb_event_queues; i++)
393                 priv->evq_info[i].ch_id = (u16)ch_id[i];
394
395         /* Lets prepare event ports */
396         memset(&priv->ports[0], 0,
397               sizeof(struct dpaa_port) * priv->nb_event_ports);
398
399         /* Check dequeue timeout method is per dequeue or global */
400         if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
401                 /*
402                  * Use timeout value as given in dequeue operation.
403                  * So invalidating this timeout value.
404                  */
405                 priv->dequeue_timeout_ns = 0;
406
407         } else if (conf->dequeue_timeout_ns == 0) {
408                 priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
409         } else {
410                 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
411         }
412
413         for (i = 0; i < priv->nb_event_ports; i++) {
414                 if (priv->intr_mode) {
415                         priv->ports[i].timeout_us =
416                                 priv->dequeue_timeout_ns/1000;
417                 } else {
418                         uint64_t cycles_per_second;
419
420                         cycles_per_second = rte_get_timer_hz();
421                         priv->ports[i].timeout_us =
422                                 (priv->dequeue_timeout_ns * cycles_per_second)
423                                         / NS_PER_S;
424                 }
425         }
426
427         /*
428          * TODO: Currently portals are affined with threads. Maximum threads
429          * can be created equals to number of lcore.
430          */
431         rte_free(ch_id);
432         DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
433
434         return 0;
435 }
436
437 static int
438 dpaa_event_dev_start(struct rte_eventdev *dev)
439 {
440         EVENTDEV_INIT_FUNC_TRACE();
441         RTE_SET_USED(dev);
442
443         return 0;
444 }
445
446 static void
447 dpaa_event_dev_stop(struct rte_eventdev *dev)
448 {
449         EVENTDEV_INIT_FUNC_TRACE();
450         RTE_SET_USED(dev);
451 }
452
453 static int
454 dpaa_event_dev_close(struct rte_eventdev *dev)
455 {
456         EVENTDEV_INIT_FUNC_TRACE();
457         RTE_SET_USED(dev);
458
459         return 0;
460 }
461
462 static void
463 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
464                           struct rte_event_queue_conf *queue_conf)
465 {
466         EVENTDEV_INIT_FUNC_TRACE();
467
468         RTE_SET_USED(dev);
469         RTE_SET_USED(queue_id);
470
471         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
472         queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
473         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
474 }
475
476 static int
477 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
478                        const struct rte_event_queue_conf *queue_conf)
479 {
480         struct dpaa_eventdev *priv = dev->data->dev_private;
481         struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
482
483         EVENTDEV_INIT_FUNC_TRACE();
484
485         switch (queue_conf->schedule_type) {
486         case RTE_SCHED_TYPE_PARALLEL:
487         case RTE_SCHED_TYPE_ATOMIC:
488                 break;
489         case RTE_SCHED_TYPE_ORDERED:
490                 DPAA_EVENTDEV_ERR("Schedule type is not supported.");
491                 return -1;
492         }
493         evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
494         evq_info->event_queue_id = queue_id;
495
496         return 0;
497 }
498
499 static void
500 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
501 {
502         EVENTDEV_INIT_FUNC_TRACE();
503
504         RTE_SET_USED(dev);
505         RTE_SET_USED(queue_id);
506 }
507
508 static void
509 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
510                                  struct rte_event_port_conf *port_conf)
511 {
512         EVENTDEV_INIT_FUNC_TRACE();
513
514         RTE_SET_USED(dev);
515         RTE_SET_USED(port_id);
516
517         port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
518         port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
519         port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
520 }
521
522 static int
523 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
524                       const struct rte_event_port_conf *port_conf)
525 {
526         struct dpaa_eventdev *eventdev = dev->data->dev_private;
527
528         EVENTDEV_INIT_FUNC_TRACE();
529
530         RTE_SET_USED(port_conf);
531         dev->data->ports[port_id] = &eventdev->ports[port_id];
532
533         return 0;
534 }
535
536 static void
537 dpaa_event_port_release(void *port)
538 {
539         EVENTDEV_INIT_FUNC_TRACE();
540
541         RTE_SET_USED(port);
542 }
543
544 static int
545 dpaa_event_port_link(struct rte_eventdev *dev, void *port,
546                      const uint8_t queues[], const uint8_t priorities[],
547                      uint16_t nb_links)
548 {
549         struct dpaa_eventdev *priv = dev->data->dev_private;
550         struct dpaa_port *event_port = (struct dpaa_port *)port;
551         struct dpaa_eventq *event_queue;
552         uint8_t eventq_id;
553         int i;
554
555         RTE_SET_USED(dev);
556         RTE_SET_USED(priorities);
557
558         /* First check that input configuration are valid */
559         for (i = 0; i < nb_links; i++) {
560                 eventq_id = queues[i];
561                 event_queue = &priv->evq_info[eventq_id];
562                 if ((event_queue->event_queue_cfg
563                         & RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
564                         && (event_queue->event_port)) {
565                         return -EINVAL;
566                 }
567         }
568
569         for (i = 0; i < nb_links; i++) {
570                 eventq_id = queues[i];
571                 event_queue = &priv->evq_info[eventq_id];
572                 event_port->evq_info[i].event_queue_id = eventq_id;
573                 event_port->evq_info[i].ch_id = event_queue->ch_id;
574                 event_queue->event_port = port;
575         }
576
577         event_port->num_linked_evq = event_port->num_linked_evq + i;
578
579         return (int)i;
580 }
581
582 static int
583 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
584                        uint8_t queues[], uint16_t nb_links)
585 {
586         int i;
587         uint8_t eventq_id;
588         struct dpaa_eventq *event_queue;
589         struct dpaa_eventdev *priv = dev->data->dev_private;
590         struct dpaa_port *event_port = (struct dpaa_port *)port;
591
592         if (!event_port->num_linked_evq)
593                 return nb_links;
594
595         for (i = 0; i < nb_links; i++) {
596                 eventq_id = queues[i];
597                 event_port->evq_info[eventq_id].event_queue_id = -1;
598                 event_port->evq_info[eventq_id].ch_id = 0;
599                 event_queue = &priv->evq_info[eventq_id];
600                 event_queue->event_port = NULL;
601         }
602
603         if (event_port->num_linked_evq)
604                 event_port->num_linked_evq = event_port->num_linked_evq - i;
605
606         return (int)i;
607 }
608
609 static int
610 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
611                                    const struct rte_eth_dev *eth_dev,
612                                    uint32_t *caps)
613 {
614         const char *ethdev_driver = eth_dev->device->driver->name;
615
616         EVENTDEV_INIT_FUNC_TRACE();
617
618         RTE_SET_USED(dev);
619
620         if (!strcmp(ethdev_driver, "net_dpaa"))
621                 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
622         else
623                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
624
625         return 0;
626 }
627
628 static int
629 dpaa_event_eth_rx_adapter_queue_add(
630                 const struct rte_eventdev *dev,
631                 const struct rte_eth_dev *eth_dev,
632                 int32_t rx_queue_id,
633                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
634 {
635         struct dpaa_eventdev *eventdev = dev->data->dev_private;
636         uint8_t ev_qid = queue_conf->ev.queue_id;
637         u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
638         struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
639         int ret, i;
640
641         EVENTDEV_INIT_FUNC_TRACE();
642
643         if (rx_queue_id == -1) {
644                 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
645                         ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
646                                                      queue_conf);
647                         if (ret) {
648                                 DPAA_EVENTDEV_ERR(
649                                         "Event Queue attach failed:%d\n", ret);
650                                 goto detach_configured_queues;
651                         }
652                 }
653                 return 0;
654         }
655
656         ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
657         if (ret)
658                 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
659         return ret;
660
661 detach_configured_queues:
662
663         for (i = (i - 1); i >= 0 ; i--)
664                 dpaa_eth_eventq_detach(eth_dev, i);
665
666         return ret;
667 }
668
669 static int
670 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
671                                     const struct rte_eth_dev *eth_dev,
672                                     int32_t rx_queue_id)
673 {
674         int ret, i;
675         struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
676
677         EVENTDEV_INIT_FUNC_TRACE();
678
679         RTE_SET_USED(dev);
680         if (rx_queue_id == -1) {
681                 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
682                         ret = dpaa_eth_eventq_detach(eth_dev, i);
683                         if (ret)
684                                 DPAA_EVENTDEV_ERR(
685                                         "Event Queue detach failed:%d\n", ret);
686                 }
687
688                 return 0;
689         }
690
691         ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
692         if (ret)
693                 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
694         return ret;
695 }
696
697 static int
698 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
699                                 const struct rte_eth_dev *eth_dev)
700 {
701         EVENTDEV_INIT_FUNC_TRACE();
702
703         RTE_SET_USED(dev);
704         RTE_SET_USED(eth_dev);
705
706         return 0;
707 }
708
709 static int
710 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
711                                const struct rte_eth_dev *eth_dev)
712 {
713         EVENTDEV_INIT_FUNC_TRACE();
714
715         RTE_SET_USED(dev);
716         RTE_SET_USED(eth_dev);
717
718         return 0;
719 }
720
721 static struct rte_eventdev_ops dpaa_eventdev_ops = {
722         .dev_infos_get    = dpaa_event_dev_info_get,
723         .dev_configure    = dpaa_event_dev_configure,
724         .dev_start        = dpaa_event_dev_start,
725         .dev_stop         = dpaa_event_dev_stop,
726         .dev_close        = dpaa_event_dev_close,
727         .queue_def_conf   = dpaa_event_queue_def_conf,
728         .queue_setup      = dpaa_event_queue_setup,
729         .queue_release    = dpaa_event_queue_release,
730         .port_def_conf    = dpaa_event_port_default_conf_get,
731         .port_setup       = dpaa_event_port_setup,
732         .port_release       = dpaa_event_port_release,
733         .port_link        = dpaa_event_port_link,
734         .port_unlink      = dpaa_event_port_unlink,
735         .timeout_ticks    = dpaa_event_dequeue_timeout_ticks,
736         .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
737         .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
738         .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
739         .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
740         .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
741 };
742
743 static int flag_check_handler(__rte_unused const char *key,
744                 const char *value, __rte_unused void *opaque)
745 {
746         if (strcmp(value, "1"))
747                 return -1;
748
749         return 0;
750 }
751
752 static int
753 dpaa_event_check_flags(const char *params)
754 {
755         struct rte_kvargs *kvlist;
756
757         if (params == NULL || params[0] == '\0')
758                 return 0;
759
760         kvlist = rte_kvargs_parse(params, NULL);
761         if (kvlist == NULL)
762                 return 0;
763
764         if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
765                 rte_kvargs_free(kvlist);
766                 return 0;
767         }
768         /* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
769         if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
770                                 flag_check_handler, NULL) < 0) {
771                 rte_kvargs_free(kvlist);
772                 return 0;
773         }
774         rte_kvargs_free(kvlist);
775
776         return 1;
777 }
778
779 static int
780 dpaa_event_dev_create(const char *name, const char *params)
781 {
782         struct rte_eventdev *eventdev;
783         struct dpaa_eventdev *priv;
784
785         eventdev = rte_event_pmd_vdev_init(name,
786                                            sizeof(struct dpaa_eventdev),
787                                            rte_socket_id());
788         if (eventdev == NULL) {
789                 DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
790                 goto fail;
791         }
792         priv = eventdev->data->dev_private;
793
794         eventdev->dev_ops       = &dpaa_eventdev_ops;
795         eventdev->enqueue       = dpaa_event_enqueue;
796         eventdev->enqueue_burst = dpaa_event_enqueue_burst;
797
798         if (dpaa_event_check_flags(params)) {
799                 eventdev->dequeue       = dpaa_event_dequeue;
800                 eventdev->dequeue_burst = dpaa_event_dequeue_burst;
801         } else {
802                 priv->intr_mode = 1;
803                 eventdev->dev_ops->timeout_ticks =
804                                 dpaa_event_dequeue_timeout_ticks_intr;
805                 eventdev->dequeue       = dpaa_event_dequeue_intr;
806                 eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
807         }
808
809         /* For secondary processes, the primary has done all the work */
810         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
811                 return 0;
812
813         priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
814
815         return 0;
816 fail:
817         return -EFAULT;
818 }
819
820 static int
821 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
822 {
823         const char *name;
824         const char *params;
825
826         name = rte_vdev_device_name(vdev);
827         DPAA_EVENTDEV_INFO("Initializing %s", name);
828
829         params = rte_vdev_device_args(vdev);
830
831         return dpaa_event_dev_create(name, params);
832 }
833
834 static int
835 dpaa_event_dev_remove(struct rte_vdev_device *vdev)
836 {
837         const char *name;
838
839         name = rte_vdev_device_name(vdev);
840         DPAA_EVENTDEV_INFO("Closing %s", name);
841
842         return rte_event_pmd_vdev_uninit(name);
843 }
844
845 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
846         .probe = dpaa_event_dev_probe,
847         .remove = dpaa_event_dev_remove
848 };
849
850 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
851 RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
852                 DISABLE_INTR_MODE "=<int>");