6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of NXP nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/epoll.h>
41 #include <rte_atomic.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_debug.h>
47 #include <rte_fslmc.h>
48 #include <rte_lcore.h>
50 #include <rte_malloc.h>
51 #include <rte_memcpy.h>
52 #include <rte_memory.h>
54 #include <rte_bus_vdev.h>
55 #include <rte_ethdev.h>
56 #include <rte_event_eth_rx_adapter.h>
58 #include <fslmc_vfio.h>
59 #include <dpaa2_hw_pvt.h>
60 #include <dpaa2_hw_mempool.h>
61 #include <dpaa2_hw_dpio.h>
62 #include <dpaa2_ethdev.h>
63 #include "dpaa2_eventdev.h"
64 #include <portal/dpaa2_hw_pvt.h>
65 #include <mc/fsl_dpci.h>
68 * Evendev = SoC Instance
69 * Eventport = DPIO Instance
70 * Eventqueue = DPCON Instance
71 * 1 Eventdev can have N Eventqueue
72 * Soft Event Flow is DPCI Instance
76 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
79 struct rte_eventdev *ev_dev =
80 ((struct dpaa2_io_portal_t *)port)->eventdev;
81 struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
82 uint32_t queue_id = ev[0].queue_id;
83 struct evq_info_t *evq_info = &priv->evq_info[queue_id];
85 struct qbman_swp *swp;
86 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
87 uint32_t loop, frames_to_send;
88 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
94 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
95 ret = dpaa2_affine_qbman_swp();
97 PMD_DRV_LOG(ERR, "Failure in affining portal\n");
102 swp = DPAA2_PER_LCORE_PORTAL;
105 frames_to_send = (nb_events >> 3) ?
106 MAX_TX_RING_SLOTS : nb_events;
108 for (loop = 0; loop < frames_to_send; loop++) {
109 const struct rte_event *event = &ev[num_tx + loop];
111 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
112 fqid = evq_info->dpci->queue[
113 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
115 fqid = evq_info->dpci->queue[
116 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
118 /* Prepare enqueue descriptor */
119 qbman_eq_desc_clear(&eqdesc[loop]);
120 qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
121 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
122 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
124 if (event->impl_opaque) {
125 uint8_t dqrr_index = event->impl_opaque - 1;
127 qbman_eq_desc_set_dca(&eqdesc[loop], 1,
129 DPAA2_PER_LCORE_DPIO->dqrr_size--;
130 DPAA2_PER_LCORE_DPIO->dqrr_held &=
134 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
137 * todo - need to align with hw context data
140 struct rte_event *ev_temp = rte_malloc(NULL,
141 sizeof(struct rte_event), 0);
146 frames_to_send = loop;
147 PMD_DRV_LOG(ERR, "Unable to allocate memory");
150 rte_memcpy(ev_temp, event, sizeof(struct rte_event));
151 DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
152 DPAA2_SET_FD_LEN((&fd_arr[loop]),
153 sizeof(struct rte_event));
157 while (loop < frames_to_send) {
158 loop += qbman_swp_enqueue_multiple_desc(swp,
159 &eqdesc[loop], &fd_arr[loop],
160 frames_to_send - loop);
162 num_tx += frames_to_send;
163 nb_events -= frames_to_send;
170 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
172 return dpaa2_eventdev_enqueue_burst(port, ev, 1);
175 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
177 struct epoll_event epoll_ev;
180 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
181 QBMAN_SWP_INTERRUPT_DQRI);
184 ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
185 &epoll_ev, 1, timeout_ticks);
187 /* sometimes due to some spurious interrupts epoll_wait fails
188 * with errno EINTR. so here we are retrying epoll_wait in such
189 * case to avoid the problem.
191 if (errno == EINTR) {
192 PMD_DRV_LOG(DEBUG, "epoll_wait fails\n");
194 PMD_DRV_LOG(DEBUG, "Dequeue burst Failed\n");
200 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
201 const struct qbman_fd *fd,
202 const struct qbman_result *dq,
203 struct dpaa2_queue *rxq,
204 struct rte_event *ev)
206 struct rte_event *ev_temp =
207 (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
211 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
214 qbman_swp_dqrr_consume(swp, dq);
217 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
218 const struct qbman_fd *fd,
219 const struct qbman_result *dq,
220 struct dpaa2_queue *rxq,
221 struct rte_event *ev)
223 struct rte_event *ev_temp =
224 (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
225 uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
230 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
232 ev->impl_opaque = dqrr_index + 1;
233 DPAA2_PER_LCORE_DPIO->dqrr_size++;
234 DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
238 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
239 uint16_t nb_events, uint64_t timeout_ticks)
241 const struct qbman_result *dq;
242 struct qbman_swp *swp;
243 const struct qbman_fd *fd;
244 struct dpaa2_queue *rxq;
245 int num_pkts = 0, ret, i = 0;
249 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
250 ret = dpaa2_affine_qbman_swp();
252 PMD_DRV_LOG(ERR, "Failure in affining portal\n");
257 swp = DPAA2_PER_LCORE_PORTAL;
259 /* Check if there are atomic contexts to be released */
260 while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
261 if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
262 dq = qbman_get_dqrr_from_idx(swp, i);
263 qbman_swp_dqrr_consume(swp, dq);
264 DPAA2_PER_LCORE_DPIO->dqrr_size--;
268 DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
271 dq = qbman_swp_dqrr_next(swp);
273 if (!num_pkts && timeout_ticks) {
274 dpaa2_eventdev_dequeue_wait(timeout_ticks);
281 fd = qbman_result_DQ_fd(dq);
283 rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
285 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
287 qbman_swp_dqrr_consume(swp, dq);
288 PMD_DRV_LOG(ERR, "Null Return VQ received\n");
293 } while (num_pkts < nb_events);
299 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
300 uint64_t timeout_ticks)
302 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
306 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
307 struct rte_event_dev_info *dev_info)
309 struct dpaa2_eventdev *priv = dev->data->dev_private;
311 PMD_DRV_FUNC_TRACE();
315 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
316 dev_info->min_dequeue_timeout_ns =
317 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
318 dev_info->max_dequeue_timeout_ns =
319 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
320 dev_info->dequeue_timeout_ns =
321 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
322 dev_info->max_event_queues = priv->max_event_queues;
323 dev_info->max_event_queue_flows =
324 DPAA2_EVENT_MAX_QUEUE_FLOWS;
325 dev_info->max_event_queue_priority_levels =
326 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
327 dev_info->max_event_priority_levels =
328 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
329 dev_info->max_event_ports = RTE_MAX_LCORE;
330 dev_info->max_event_port_dequeue_depth =
331 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
332 dev_info->max_event_port_enqueue_depth =
333 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
334 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
335 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
336 RTE_EVENT_DEV_CAP_BURST_MODE;
340 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
342 struct dpaa2_eventdev *priv = dev->data->dev_private;
343 struct rte_event_dev_config *conf = &dev->data->dev_conf;
345 PMD_DRV_FUNC_TRACE();
347 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
348 priv->nb_event_queues = conf->nb_event_queues;
349 priv->nb_event_ports = conf->nb_event_ports;
350 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
351 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
352 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
353 priv->event_dev_cfg = conf->event_dev_cfg;
355 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
360 dpaa2_eventdev_start(struct rte_eventdev *dev)
362 PMD_DRV_FUNC_TRACE();
370 dpaa2_eventdev_stop(struct rte_eventdev *dev)
372 PMD_DRV_FUNC_TRACE();
378 dpaa2_eventdev_close(struct rte_eventdev *dev)
380 PMD_DRV_FUNC_TRACE();
388 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
389 struct rte_event_queue_conf *queue_conf)
391 PMD_DRV_FUNC_TRACE();
394 RTE_SET_USED(queue_id);
395 RTE_SET_USED(queue_conf);
397 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
398 queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
399 RTE_SCHED_TYPE_PARALLEL;
400 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
404 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
406 PMD_DRV_FUNC_TRACE();
409 RTE_SET_USED(queue_id);
413 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
414 const struct rte_event_queue_conf *queue_conf)
416 struct dpaa2_eventdev *priv = dev->data->dev_private;
417 struct evq_info_t *evq_info =
418 &priv->evq_info[queue_id];
420 PMD_DRV_FUNC_TRACE();
422 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
428 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
429 struct rte_event_port_conf *port_conf)
431 PMD_DRV_FUNC_TRACE();
434 RTE_SET_USED(port_id);
435 RTE_SET_USED(port_conf);
437 port_conf->new_event_threshold =
438 DPAA2_EVENT_MAX_NUM_EVENTS;
439 port_conf->dequeue_depth =
440 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
441 port_conf->enqueue_depth =
442 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
446 dpaa2_eventdev_port_release(void *port)
448 PMD_DRV_FUNC_TRACE();
454 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
455 const struct rte_event_port_conf *port_conf)
457 PMD_DRV_FUNC_TRACE();
459 RTE_SET_USED(port_conf);
461 if (!dpaa2_io_portal[port_id].dpio_dev) {
462 dpaa2_io_portal[port_id].dpio_dev =
463 dpaa2_get_qbman_swp(port_id);
464 rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
465 if (!dpaa2_io_portal[port_id].dpio_dev)
469 dpaa2_io_portal[port_id].eventdev = dev;
470 dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
475 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
476 uint8_t queues[], uint16_t nb_unlinks)
478 struct dpaa2_eventdev *priv = dev->data->dev_private;
479 struct dpaa2_io_portal_t *dpaa2_portal = port;
480 struct evq_info_t *evq_info;
483 PMD_DRV_FUNC_TRACE();
485 for (i = 0; i < nb_unlinks; i++) {
486 evq_info = &priv->evq_info[queues[i]];
487 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
488 evq_info->dpcon->channel_index, 0);
489 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
490 0, dpaa2_portal->dpio_dev->token,
491 evq_info->dpcon->dpcon_id);
495 return (int)nb_unlinks;
499 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
500 const uint8_t queues[], const uint8_t priorities[],
503 struct dpaa2_eventdev *priv = dev->data->dev_private;
504 struct dpaa2_io_portal_t *dpaa2_portal = port;
505 struct evq_info_t *evq_info;
506 uint8_t channel_index;
509 PMD_DRV_FUNC_TRACE();
511 for (i = 0; i < nb_links; i++) {
512 evq_info = &priv->evq_info[queues[i]];
516 ret = dpio_add_static_dequeue_channel(
517 dpaa2_portal->dpio_dev->dpio,
518 CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
519 evq_info->dpcon->dpcon_id, &channel_index);
521 PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
526 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
528 evq_info->dpcon->channel_index = channel_index;
532 RTE_SET_USED(priorities);
534 return (int)nb_links;
536 for (n = 0; n < i; n++) {
537 evq_info = &priv->evq_info[queues[n]];
538 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
539 evq_info->dpcon->channel_index, 0);
540 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
541 0, dpaa2_portal->dpio_dev->token,
542 evq_info->dpcon->dpcon_id);
549 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
550 uint64_t *timeout_ticks)
554 PMD_DRV_FUNC_TRACE();
557 *timeout_ticks = ns * scale;
563 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
565 PMD_DRV_FUNC_TRACE();
572 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
573 const struct rte_eth_dev *eth_dev,
576 const char *ethdev_driver = eth_dev->device->driver->name;
578 PMD_DRV_FUNC_TRACE();
582 if (!strcmp(ethdev_driver, "net_dpaa2"))
583 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
585 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
591 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
592 const struct rte_eth_dev *eth_dev,
593 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
595 struct dpaa2_eventdev *priv = dev->data->dev_private;
596 uint8_t ev_qid = queue_conf->ev.queue_id;
597 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
600 PMD_DRV_FUNC_TRACE();
602 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
603 ret = dpaa2_eth_eventq_attach(eth_dev, i,
604 dpcon_id, queue_conf);
606 PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
613 for (i = (i - 1); i >= 0 ; i--)
614 dpaa2_eth_eventq_detach(eth_dev, i);
620 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
621 const struct rte_eth_dev *eth_dev,
623 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
625 struct dpaa2_eventdev *priv = dev->data->dev_private;
626 uint8_t ev_qid = queue_conf->ev.queue_id;
627 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
630 PMD_DRV_FUNC_TRACE();
632 if (rx_queue_id == -1)
633 return dpaa2_eventdev_eth_queue_add_all(dev,
634 eth_dev, queue_conf);
636 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
637 dpcon_id, queue_conf);
639 PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
646 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
647 const struct rte_eth_dev *eth_dev)
651 PMD_DRV_FUNC_TRACE();
655 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
656 ret = dpaa2_eth_eventq_detach(eth_dev, i);
658 PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
668 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
669 const struct rte_eth_dev *eth_dev,
674 PMD_DRV_FUNC_TRACE();
676 if (rx_queue_id == -1)
677 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
679 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
681 PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
689 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
690 const struct rte_eth_dev *eth_dev)
692 PMD_DRV_FUNC_TRACE();
695 RTE_SET_USED(eth_dev);
701 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
702 const struct rte_eth_dev *eth_dev)
704 PMD_DRV_FUNC_TRACE();
707 RTE_SET_USED(eth_dev);
712 static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
713 .dev_infos_get = dpaa2_eventdev_info_get,
714 .dev_configure = dpaa2_eventdev_configure,
715 .dev_start = dpaa2_eventdev_start,
716 .dev_stop = dpaa2_eventdev_stop,
717 .dev_close = dpaa2_eventdev_close,
718 .queue_def_conf = dpaa2_eventdev_queue_def_conf,
719 .queue_setup = dpaa2_eventdev_queue_setup,
720 .queue_release = dpaa2_eventdev_queue_release,
721 .port_def_conf = dpaa2_eventdev_port_def_conf,
722 .port_setup = dpaa2_eventdev_port_setup,
723 .port_release = dpaa2_eventdev_port_release,
724 .port_link = dpaa2_eventdev_port_link,
725 .port_unlink = dpaa2_eventdev_port_unlink,
726 .timeout_ticks = dpaa2_eventdev_timeout_ticks,
727 .dump = dpaa2_eventdev_dump,
728 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
729 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
730 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
731 .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
732 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
736 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
737 struct dpaa2_dpcon_dev *dpcon_dev)
739 struct dpci_rx_queue_cfg rx_queue_cfg;
742 /*Do settings to get the frame on a DPCON object*/
743 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
744 DPCI_QUEUE_OPT_USER_CTX;
745 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
746 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
747 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
749 dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
750 dpaa2_eventdev_process_parallel;
751 dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
752 dpaa2_eventdev_process_atomic;
754 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
755 rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
756 ret = dpci_set_rx_queue(&dpci_dev->dpci,
762 "set_rx_q failed with err code: %d", ret);
770 dpaa2_eventdev_create(const char *name)
772 struct rte_eventdev *eventdev;
773 struct dpaa2_eventdev *priv;
774 struct dpaa2_dpcon_dev *dpcon_dev = NULL;
775 struct dpaa2_dpci_dev *dpci_dev = NULL;
778 eventdev = rte_event_pmd_vdev_init(name,
779 sizeof(struct dpaa2_eventdev),
781 if (eventdev == NULL) {
782 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
786 eventdev->dev_ops = &dpaa2_eventdev_ops;
787 eventdev->enqueue = dpaa2_eventdev_enqueue;
788 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
789 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
790 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
791 eventdev->dequeue = dpaa2_eventdev_dequeue;
792 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
794 /* For secondary processes, the primary has done all the work */
795 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
798 priv = eventdev->data->dev_private;
799 priv->max_event_queues = 0;
802 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
805 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
807 dpci_dev = rte_dpaa2_alloc_dpci_dev();
809 rte_dpaa2_free_dpcon_dev(dpcon_dev);
812 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
814 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
817 "dpci setup failed with err code: %d", ret);
820 priv->max_event_queues++;
821 } while (dpcon_dev && dpci_dev);
829 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
833 name = rte_vdev_device_name(vdev);
834 PMD_DRV_LOG(INFO, "Initializing %s", name);
835 return dpaa2_eventdev_create(name);
839 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
843 name = rte_vdev_device_name(vdev);
844 PMD_DRV_LOG(INFO, "Closing %s", name);
846 return rte_event_pmd_vdev_uninit(name);
849 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
850 .probe = dpaa2_eventdev_probe,
851 .remove = dpaa2_eventdev_remove
854 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);