New upstream version 18.08
[deb_dpdk.git] / lib / librte_eventdev / rte_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
23 #include <rte_eal.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 #include <rte_ethdev.h>
32 #include <rte_cryptodev.h>
33 #include <rte_cryptodev_pmd.h>
34
35 #include "rte_eventdev.h"
36 #include "rte_eventdev_pmd.h"
37
38 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
39
40 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
41
42 static struct rte_eventdev_global eventdev_globals = {
43         .nb_devs                = 0
44 };
45
46 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
47
48 /* Event dev north bound API implementation */
49
50 uint8_t
51 rte_event_dev_count(void)
52 {
53         return rte_eventdev_globals->nb_devs;
54 }
55
56 int
57 rte_event_dev_get_dev_id(const char *name)
58 {
59         int i;
60         uint8_t cmp;
61
62         if (!name)
63                 return -EINVAL;
64
65         for (i = 0; i < rte_eventdev_globals->nb_devs; i++) {
66                 cmp = (strncmp(rte_event_devices[i].data->name, name,
67                                 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
68                         (rte_event_devices[i].dev ? (strncmp(
69                                 rte_event_devices[i].dev->driver->name, name,
70                                          RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
71                 if (cmp && (rte_event_devices[i].attached ==
72                                         RTE_EVENTDEV_ATTACHED))
73                         return i;
74         }
75         return -ENODEV;
76 }
77
78 int
79 rte_event_dev_socket_id(uint8_t dev_id)
80 {
81         struct rte_eventdev *dev;
82
83         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
84         dev = &rte_eventdevs[dev_id];
85
86         return dev->data->socket_id;
87 }
88
89 int
90 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
91 {
92         struct rte_eventdev *dev;
93
94         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
95         dev = &rte_eventdevs[dev_id];
96
97         if (dev_info == NULL)
98                 return -EINVAL;
99
100         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
101
102         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
103         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
104
105         dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
106
107         dev_info->dev = dev->dev;
108         return 0;
109 }
110
111 int
112 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
113                                 uint32_t *caps)
114 {
115         struct rte_eventdev *dev;
116
117         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
118         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
119
120         dev = &rte_eventdevs[dev_id];
121
122         if (caps == NULL)
123                 return -EINVAL;
124         *caps = 0;
125
126         return dev->dev_ops->eth_rx_adapter_caps_get ?
127                                 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
128                                                 &rte_eth_devices[eth_port_id],
129                                                 caps)
130                                 : 0;
131 }
132
133 int __rte_experimental
134 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
135 {
136         struct rte_eventdev *dev;
137         const struct rte_event_timer_adapter_ops *ops;
138
139         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
140
141         dev = &rte_eventdevs[dev_id];
142
143         if (caps == NULL)
144                 return -EINVAL;
145         *caps = 0;
146
147         return dev->dev_ops->timer_adapter_caps_get ?
148                                 (*dev->dev_ops->timer_adapter_caps_get)(dev,
149                                                                         0,
150                                                                         caps,
151                                                                         &ops)
152                                 : 0;
153 }
154
155 int __rte_experimental
156 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
157                                   uint32_t *caps)
158 {
159         struct rte_eventdev *dev;
160         struct rte_cryptodev *cdev;
161
162         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
163         if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
164                 return -EINVAL;
165
166         dev = &rte_eventdevs[dev_id];
167         cdev = rte_cryptodev_pmd_get_dev(cdev_id);
168
169         if (caps == NULL)
170                 return -EINVAL;
171         *caps = 0;
172
173         return dev->dev_ops->crypto_adapter_caps_get ?
174                 (*dev->dev_ops->crypto_adapter_caps_get)
175                 (dev, cdev, caps) : -ENOTSUP;
176 }
177
178 static inline int
179 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
180 {
181         uint8_t old_nb_queues = dev->data->nb_queues;
182         struct rte_event_queue_conf *queues_cfg;
183         unsigned int i;
184
185         RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
186                          dev->data->dev_id);
187
188         /* First time configuration */
189         if (dev->data->queues_cfg == NULL && nb_queues != 0) {
190                 /* Allocate memory to store queue configuration */
191                 dev->data->queues_cfg = rte_zmalloc_socket(
192                                 "eventdev->data->queues_cfg",
193                                 sizeof(dev->data->queues_cfg[0]) * nb_queues,
194                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
195                 if (dev->data->queues_cfg == NULL) {
196                         dev->data->nb_queues = 0;
197                         RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
198                                         "nb_queues %u", nb_queues);
199                         return -(ENOMEM);
200                 }
201         /* Re-configure */
202         } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
203                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
204
205                 for (i = nb_queues; i < old_nb_queues; i++)
206                         (*dev->dev_ops->queue_release)(dev, i);
207
208                 /* Re allocate memory to store queue configuration */
209                 queues_cfg = dev->data->queues_cfg;
210                 queues_cfg = rte_realloc(queues_cfg,
211                                 sizeof(queues_cfg[0]) * nb_queues,
212                                 RTE_CACHE_LINE_SIZE);
213                 if (queues_cfg == NULL) {
214                         RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
215                                                 " nb_queues %u", nb_queues);
216                         return -(ENOMEM);
217                 }
218                 dev->data->queues_cfg = queues_cfg;
219
220                 if (nb_queues > old_nb_queues) {
221                         uint8_t new_qs = nb_queues - old_nb_queues;
222
223                         memset(queues_cfg + old_nb_queues, 0,
224                                 sizeof(queues_cfg[0]) * new_qs);
225                 }
226         } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
227                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
228
229                 for (i = nb_queues; i < old_nb_queues; i++)
230                         (*dev->dev_ops->queue_release)(dev, i);
231         }
232
233         dev->data->nb_queues = nb_queues;
234         return 0;
235 }
236
237 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
238
239 static inline int
240 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
241 {
242         uint8_t old_nb_ports = dev->data->nb_ports;
243         void **ports;
244         uint16_t *links_map;
245         struct rte_event_port_conf *ports_cfg;
246         unsigned int i;
247
248         RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
249                          dev->data->dev_id);
250
251         /* First time configuration */
252         if (dev->data->ports == NULL && nb_ports != 0) {
253                 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
254                                 sizeof(dev->data->ports[0]) * nb_ports,
255                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
256                 if (dev->data->ports == NULL) {
257                         dev->data->nb_ports = 0;
258                         RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
259                                         "nb_ports %u", nb_ports);
260                         return -(ENOMEM);
261                 }
262
263                 /* Allocate memory to store port configurations */
264                 dev->data->ports_cfg =
265                         rte_zmalloc_socket("eventdev->ports_cfg",
266                         sizeof(dev->data->ports_cfg[0]) * nb_ports,
267                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
268                 if (dev->data->ports_cfg == NULL) {
269                         dev->data->nb_ports = 0;
270                         RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
271                                         "nb_ports %u", nb_ports);
272                         return -(ENOMEM);
273                 }
274
275                 /* Allocate memory to store queue to port link connection */
276                 dev->data->links_map =
277                         rte_zmalloc_socket("eventdev->links_map",
278                         sizeof(dev->data->links_map[0]) * nb_ports *
279                         RTE_EVENT_MAX_QUEUES_PER_DEV,
280                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
281                 if (dev->data->links_map == NULL) {
282                         dev->data->nb_ports = 0;
283                         RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
284                                         "nb_ports %u", nb_ports);
285                         return -(ENOMEM);
286                 }
287                 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
288                         dev->data->links_map[i] =
289                                 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
290         } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
291                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
292
293                 ports = dev->data->ports;
294                 ports_cfg = dev->data->ports_cfg;
295                 links_map = dev->data->links_map;
296
297                 for (i = nb_ports; i < old_nb_ports; i++)
298                         (*dev->dev_ops->port_release)(ports[i]);
299
300                 /* Realloc memory for ports */
301                 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
302                                 RTE_CACHE_LINE_SIZE);
303                 if (ports == NULL) {
304                         RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
305                                                 " nb_ports %u", nb_ports);
306                         return -(ENOMEM);
307                 }
308
309                 /* Realloc memory for ports_cfg */
310                 ports_cfg = rte_realloc(ports_cfg,
311                         sizeof(ports_cfg[0]) * nb_ports,
312                         RTE_CACHE_LINE_SIZE);
313                 if (ports_cfg == NULL) {
314                         RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
315                                                 " nb_ports %u", nb_ports);
316                         return -(ENOMEM);
317                 }
318
319                 /* Realloc memory to store queue to port link connection */
320                 links_map = rte_realloc(links_map,
321                         sizeof(dev->data->links_map[0]) * nb_ports *
322                         RTE_EVENT_MAX_QUEUES_PER_DEV,
323                         RTE_CACHE_LINE_SIZE);
324                 if (links_map == NULL) {
325                         dev->data->nb_ports = 0;
326                         RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
327                                         "nb_ports %u", nb_ports);
328                         return -(ENOMEM);
329                 }
330
331                 if (nb_ports > old_nb_ports) {
332                         uint8_t new_ps = nb_ports - old_nb_ports;
333                         unsigned int old_links_map_end =
334                                 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
335                         unsigned int links_map_end =
336                                 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
337
338                         memset(ports + old_nb_ports, 0,
339                                 sizeof(ports[0]) * new_ps);
340                         memset(ports_cfg + old_nb_ports, 0,
341                                 sizeof(ports_cfg[0]) * new_ps);
342                         for (i = old_links_map_end; i < links_map_end; i++)
343                                 links_map[i] =
344                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
345                 }
346
347                 dev->data->ports = ports;
348                 dev->data->ports_cfg = ports_cfg;
349                 dev->data->links_map = links_map;
350         } else if (dev->data->ports != NULL && nb_ports == 0) {
351                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
352
353                 ports = dev->data->ports;
354                 for (i = nb_ports; i < old_nb_ports; i++)
355                         (*dev->dev_ops->port_release)(ports[i]);
356         }
357
358         dev->data->nb_ports = nb_ports;
359         return 0;
360 }
361
362 int
363 rte_event_dev_configure(uint8_t dev_id,
364                         const struct rte_event_dev_config *dev_conf)
365 {
366         struct rte_eventdev *dev;
367         struct rte_event_dev_info info;
368         int diag;
369
370         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
371         dev = &rte_eventdevs[dev_id];
372
373         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
374         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
375
376         if (dev->data->dev_started) {
377                 RTE_EDEV_LOG_ERR(
378                     "device %d must be stopped to allow configuration", dev_id);
379                 return -EBUSY;
380         }
381
382         if (dev_conf == NULL)
383                 return -EINVAL;
384
385         (*dev->dev_ops->dev_infos_get)(dev, &info);
386
387         /* Check dequeue_timeout_ns value is in limit */
388         if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
389                 if (dev_conf->dequeue_timeout_ns &&
390                     (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
391                         || dev_conf->dequeue_timeout_ns >
392                                  info.max_dequeue_timeout_ns)) {
393                         RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
394                         " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
395                         dev_id, dev_conf->dequeue_timeout_ns,
396                         info.min_dequeue_timeout_ns,
397                         info.max_dequeue_timeout_ns);
398                         return -EINVAL;
399                 }
400         }
401
402         /* Check nb_events_limit is in limit */
403         if (dev_conf->nb_events_limit > info.max_num_events) {
404                 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
405                 dev_id, dev_conf->nb_events_limit, info.max_num_events);
406                 return -EINVAL;
407         }
408
409         /* Check nb_event_queues is in limit */
410         if (!dev_conf->nb_event_queues) {
411                 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
412                                         dev_id);
413                 return -EINVAL;
414         }
415         if (dev_conf->nb_event_queues > info.max_event_queues) {
416                 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
417                 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
418                 return -EINVAL;
419         }
420
421         /* Check nb_event_ports is in limit */
422         if (!dev_conf->nb_event_ports) {
423                 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
424                 return -EINVAL;
425         }
426         if (dev_conf->nb_event_ports > info.max_event_ports) {
427                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
428                 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
429                 return -EINVAL;
430         }
431
432         /* Check nb_event_queue_flows is in limit */
433         if (!dev_conf->nb_event_queue_flows) {
434                 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
435                 return -EINVAL;
436         }
437         if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
438                 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
439                 dev_id, dev_conf->nb_event_queue_flows,
440                 info.max_event_queue_flows);
441                 return -EINVAL;
442         }
443
444         /* Check nb_event_port_dequeue_depth is in limit */
445         if (!dev_conf->nb_event_port_dequeue_depth) {
446                 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
447                                         dev_id);
448                 return -EINVAL;
449         }
450         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
451                  (dev_conf->nb_event_port_dequeue_depth >
452                          info.max_event_port_dequeue_depth)) {
453                 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
454                 dev_id, dev_conf->nb_event_port_dequeue_depth,
455                 info.max_event_port_dequeue_depth);
456                 return -EINVAL;
457         }
458
459         /* Check nb_event_port_enqueue_depth is in limit */
460         if (!dev_conf->nb_event_port_enqueue_depth) {
461                 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
462                                         dev_id);
463                 return -EINVAL;
464         }
465         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
466                 (dev_conf->nb_event_port_enqueue_depth >
467                          info.max_event_port_enqueue_depth)) {
468                 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
469                 dev_id, dev_conf->nb_event_port_enqueue_depth,
470                 info.max_event_port_enqueue_depth);
471                 return -EINVAL;
472         }
473
474         /* Copy the dev_conf parameter into the dev structure */
475         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
476
477         /* Setup new number of queues and reconfigure device. */
478         diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
479         if (diag != 0) {
480                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
481                                 dev_id, diag);
482                 return diag;
483         }
484
485         /* Setup new number of ports and reconfigure device. */
486         diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
487         if (diag != 0) {
488                 rte_event_dev_queue_config(dev, 0);
489                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
490                                 dev_id, diag);
491                 return diag;
492         }
493
494         /* Configure the device */
495         diag = (*dev->dev_ops->dev_configure)(dev);
496         if (diag != 0) {
497                 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
498                 rte_event_dev_queue_config(dev, 0);
499                 rte_event_dev_port_config(dev, 0);
500         }
501
502         dev->data->event_dev_cap = info.event_dev_cap;
503         return diag;
504 }
505
506 static inline int
507 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
508 {
509         if (queue_id < dev->data->nb_queues && queue_id <
510                                 RTE_EVENT_MAX_QUEUES_PER_DEV)
511                 return 1;
512         else
513                 return 0;
514 }
515
516 int
517 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
518                                  struct rte_event_queue_conf *queue_conf)
519 {
520         struct rte_eventdev *dev;
521
522         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
523         dev = &rte_eventdevs[dev_id];
524
525         if (queue_conf == NULL)
526                 return -EINVAL;
527
528         if (!is_valid_queue(dev, queue_id)) {
529                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
530                 return -EINVAL;
531         }
532
533         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
534         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
535         (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
536         return 0;
537 }
538
539 static inline int
540 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
541 {
542         if (queue_conf &&
543                 !(queue_conf->event_queue_cfg &
544                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
545                 ((queue_conf->event_queue_cfg &
546                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
547                 (queue_conf->schedule_type
548                         == RTE_SCHED_TYPE_ATOMIC)
549                 ))
550                 return 1;
551         else
552                 return 0;
553 }
554
555 static inline int
556 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
557 {
558         if (queue_conf &&
559                 !(queue_conf->event_queue_cfg &
560                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
561                 ((queue_conf->event_queue_cfg &
562                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
563                 (queue_conf->schedule_type
564                         == RTE_SCHED_TYPE_ORDERED)
565                 ))
566                 return 1;
567         else
568                 return 0;
569 }
570
571
572 int
573 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
574                       const struct rte_event_queue_conf *queue_conf)
575 {
576         struct rte_eventdev *dev;
577         struct rte_event_queue_conf def_conf;
578
579         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
580         dev = &rte_eventdevs[dev_id];
581
582         if (!is_valid_queue(dev, queue_id)) {
583                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
584                 return -EINVAL;
585         }
586
587         /* Check nb_atomic_flows limit */
588         if (is_valid_atomic_queue_conf(queue_conf)) {
589                 if (queue_conf->nb_atomic_flows == 0 ||
590                     queue_conf->nb_atomic_flows >
591                         dev->data->dev_conf.nb_event_queue_flows) {
592                         RTE_EDEV_LOG_ERR(
593                 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
594                         dev_id, queue_id, queue_conf->nb_atomic_flows,
595                         dev->data->dev_conf.nb_event_queue_flows);
596                         return -EINVAL;
597                 }
598         }
599
600         /* Check nb_atomic_order_sequences limit */
601         if (is_valid_ordered_queue_conf(queue_conf)) {
602                 if (queue_conf->nb_atomic_order_sequences == 0 ||
603                     queue_conf->nb_atomic_order_sequences >
604                         dev->data->dev_conf.nb_event_queue_flows) {
605                         RTE_EDEV_LOG_ERR(
606                 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
607                         dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
608                         dev->data->dev_conf.nb_event_queue_flows);
609                         return -EINVAL;
610                 }
611         }
612
613         if (dev->data->dev_started) {
614                 RTE_EDEV_LOG_ERR(
615                     "device %d must be stopped to allow queue setup", dev_id);
616                 return -EBUSY;
617         }
618
619         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
620
621         if (queue_conf == NULL) {
622                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
623                                         -ENOTSUP);
624                 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
625                 queue_conf = &def_conf;
626         }
627
628         dev->data->queues_cfg[queue_id] = *queue_conf;
629         return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
630 }
631
632 static inline int
633 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
634 {
635         if (port_id < dev->data->nb_ports)
636                 return 1;
637         else
638                 return 0;
639 }
640
641 int
642 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
643                                  struct rte_event_port_conf *port_conf)
644 {
645         struct rte_eventdev *dev;
646
647         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
648         dev = &rte_eventdevs[dev_id];
649
650         if (port_conf == NULL)
651                 return -EINVAL;
652
653         if (!is_valid_port(dev, port_id)) {
654                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
655                 return -EINVAL;
656         }
657
658         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
659         memset(port_conf, 0, sizeof(struct rte_event_port_conf));
660         (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
661         return 0;
662 }
663
664 int
665 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
666                      const struct rte_event_port_conf *port_conf)
667 {
668         struct rte_eventdev *dev;
669         struct rte_event_port_conf def_conf;
670         int diag;
671
672         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
673         dev = &rte_eventdevs[dev_id];
674
675         if (!is_valid_port(dev, port_id)) {
676                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
677                 return -EINVAL;
678         }
679
680         /* Check new_event_threshold limit */
681         if ((port_conf && !port_conf->new_event_threshold) ||
682                         (port_conf && port_conf->new_event_threshold >
683                                  dev->data->dev_conf.nb_events_limit)) {
684                 RTE_EDEV_LOG_ERR(
685                    "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
686                         dev_id, port_id, port_conf->new_event_threshold,
687                         dev->data->dev_conf.nb_events_limit);
688                 return -EINVAL;
689         }
690
691         /* Check dequeue_depth limit */
692         if ((port_conf && !port_conf->dequeue_depth) ||
693                         (port_conf && port_conf->dequeue_depth >
694                 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
695                 RTE_EDEV_LOG_ERR(
696                    "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
697                         dev_id, port_id, port_conf->dequeue_depth,
698                         dev->data->dev_conf.nb_event_port_dequeue_depth);
699                 return -EINVAL;
700         }
701
702         /* Check enqueue_depth limit */
703         if ((port_conf && !port_conf->enqueue_depth) ||
704                         (port_conf && port_conf->enqueue_depth >
705                 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
706                 RTE_EDEV_LOG_ERR(
707                    "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
708                         dev_id, port_id, port_conf->enqueue_depth,
709                         dev->data->dev_conf.nb_event_port_enqueue_depth);
710                 return -EINVAL;
711         }
712
713         if (port_conf && port_conf->disable_implicit_release &&
714             !(dev->data->event_dev_cap &
715               RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
716                 RTE_EDEV_LOG_ERR(
717                    "dev%d port%d Implicit release disable not supported",
718                         dev_id, port_id);
719                 return -EINVAL;
720         }
721
722         if (dev->data->dev_started) {
723                 RTE_EDEV_LOG_ERR(
724                     "device %d must be stopped to allow port setup", dev_id);
725                 return -EBUSY;
726         }
727
728         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
729
730         if (port_conf == NULL) {
731                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
732                                         -ENOTSUP);
733                 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
734                 port_conf = &def_conf;
735         }
736
737         dev->data->ports_cfg[port_id] = *port_conf;
738
739         diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
740
741         /* Unlink all the queues from this port(default state after setup) */
742         if (!diag)
743                 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
744
745         if (diag < 0)
746                 return diag;
747
748         return 0;
749 }
750
751 int
752 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
753                        uint32_t *attr_value)
754 {
755         struct rte_eventdev *dev;
756
757         if (!attr_value)
758                 return -EINVAL;
759         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
760         dev = &rte_eventdevs[dev_id];
761
762         switch (attr_id) {
763         case RTE_EVENT_DEV_ATTR_PORT_COUNT:
764                 *attr_value = dev->data->nb_ports;
765                 break;
766         case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
767                 *attr_value = dev->data->nb_queues;
768                 break;
769         case RTE_EVENT_DEV_ATTR_STARTED:
770                 *attr_value = dev->data->dev_started;
771                 break;
772         default:
773                 return -EINVAL;
774         }
775
776         return 0;
777 }
778
779 int
780 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
781                         uint32_t *attr_value)
782 {
783         struct rte_eventdev *dev;
784
785         if (!attr_value)
786                 return -EINVAL;
787
788         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
789         dev = &rte_eventdevs[dev_id];
790         if (!is_valid_port(dev, port_id)) {
791                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
792                 return -EINVAL;
793         }
794
795         switch (attr_id) {
796         case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
797                 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
798                 break;
799         case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
800                 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
801                 break;
802         case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
803                 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
804                 break;
805         default:
806                 return -EINVAL;
807         };
808         return 0;
809 }
810
811 int
812 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
813                         uint32_t *attr_value)
814 {
815         struct rte_event_queue_conf *conf;
816         struct rte_eventdev *dev;
817
818         if (!attr_value)
819                 return -EINVAL;
820
821         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
822         dev = &rte_eventdevs[dev_id];
823         if (!is_valid_queue(dev, queue_id)) {
824                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
825                 return -EINVAL;
826         }
827
828         conf = &dev->data->queues_cfg[queue_id];
829
830         switch (attr_id) {
831         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
832                 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
833                 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
834                         *attr_value = conf->priority;
835                 break;
836         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
837                 *attr_value = conf->nb_atomic_flows;
838                 break;
839         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
840                 *attr_value = conf->nb_atomic_order_sequences;
841                 break;
842         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
843                 *attr_value = conf->event_queue_cfg;
844                 break;
845         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
846                 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
847                         return -EOVERFLOW;
848
849                 *attr_value = conf->schedule_type;
850                 break;
851         default:
852                 return -EINVAL;
853         };
854         return 0;
855 }
856
857 int
858 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
859                     const uint8_t queues[], const uint8_t priorities[],
860                     uint16_t nb_links)
861 {
862         struct rte_eventdev *dev;
863         uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
864         uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
865         uint16_t *links_map;
866         int i, diag;
867
868         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
869         dev = &rte_eventdevs[dev_id];
870
871         if (*dev->dev_ops->port_link == NULL) {
872                 RTE_PMD_DEBUG_TRACE("Function not supported\n");
873                 rte_errno = -ENOTSUP;
874                 return 0;
875         }
876
877         if (!is_valid_port(dev, port_id)) {
878                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
879                 rte_errno = -EINVAL;
880                 return 0;
881         }
882
883         if (queues == NULL) {
884                 for (i = 0; i < dev->data->nb_queues; i++)
885                         queues_list[i] = i;
886
887                 queues = queues_list;
888                 nb_links = dev->data->nb_queues;
889         }
890
891         if (priorities == NULL) {
892                 for (i = 0; i < nb_links; i++)
893                         priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
894
895                 priorities = priorities_list;
896         }
897
898         for (i = 0; i < nb_links; i++)
899                 if (queues[i] >= dev->data->nb_queues) {
900                         rte_errno = -EINVAL;
901                         return 0;
902                 }
903
904         diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
905                                                 queues, priorities, nb_links);
906         if (diag < 0)
907                 return diag;
908
909         links_map = dev->data->links_map;
910         /* Point links_map to this port specific area */
911         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
912         for (i = 0; i < diag; i++)
913                 links_map[queues[i]] = (uint8_t)priorities[i];
914
915         return diag;
916 }
917
918 int
919 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
920                       uint8_t queues[], uint16_t nb_unlinks)
921 {
922         struct rte_eventdev *dev;
923         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
924         int i, diag, j;
925         uint16_t *links_map;
926
927         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
928         dev = &rte_eventdevs[dev_id];
929
930         if (*dev->dev_ops->port_unlink == NULL) {
931                 RTE_PMD_DEBUG_TRACE("Function not supported\n");
932                 rte_errno = -ENOTSUP;
933                 return 0;
934         }
935
936         if (!is_valid_port(dev, port_id)) {
937                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
938                 rte_errno = -EINVAL;
939                 return 0;
940         }
941
942         links_map = dev->data->links_map;
943         /* Point links_map to this port specific area */
944         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
945
946         if (queues == NULL) {
947                 j = 0;
948                 for (i = 0; i < dev->data->nb_queues; i++) {
949                         if (links_map[i] !=
950                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
951                                 all_queues[j] = i;
952                                 j++;
953                         }
954                 }
955                 queues = all_queues;
956         } else {
957                 for (j = 0; j < nb_unlinks; j++) {
958                         if (links_map[queues[j]] ==
959                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
960                                 break;
961                 }
962         }
963
964         nb_unlinks = j;
965         for (i = 0; i < nb_unlinks; i++)
966                 if (queues[i] >= dev->data->nb_queues) {
967                         rte_errno = -EINVAL;
968                         return 0;
969                 }
970
971         diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
972                                         queues, nb_unlinks);
973
974         if (diag < 0)
975                 return diag;
976
977         for (i = 0; i < diag; i++)
978                 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
979
980         return diag;
981 }
982
983 int
984 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
985                          uint8_t queues[], uint8_t priorities[])
986 {
987         struct rte_eventdev *dev;
988         uint16_t *links_map;
989         int i, count = 0;
990
991         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
992         dev = &rte_eventdevs[dev_id];
993         if (!is_valid_port(dev, port_id)) {
994                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
995                 return -EINVAL;
996         }
997
998         links_map = dev->data->links_map;
999         /* Point links_map to this port specific area */
1000         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1001         for (i = 0; i < dev->data->nb_queues; i++) {
1002                 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1003                         queues[count] = i;
1004                         priorities[count] = (uint8_t)links_map[i];
1005                         ++count;
1006                 }
1007         }
1008         return count;
1009 }
1010
1011 int
1012 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1013                                  uint64_t *timeout_ticks)
1014 {
1015         struct rte_eventdev *dev;
1016
1017         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1018         dev = &rte_eventdevs[dev_id];
1019         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1020
1021         if (timeout_ticks == NULL)
1022                 return -EINVAL;
1023
1024         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1025 }
1026
1027 int
1028 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1029 {
1030         struct rte_eventdev *dev;
1031
1032         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1033         dev = &rte_eventdevs[dev_id];
1034
1035         if (service_id == NULL)
1036                 return -EINVAL;
1037
1038         if (dev->data->service_inited)
1039                 *service_id = dev->data->service_id;
1040
1041         return dev->data->service_inited ? 0 : -ESRCH;
1042 }
1043
1044 int
1045 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1046 {
1047         struct rte_eventdev *dev;
1048
1049         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1050         dev = &rte_eventdevs[dev_id];
1051         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1052
1053         (*dev->dev_ops->dump)(dev, f);
1054         return 0;
1055
1056 }
1057
1058 static int
1059 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1060                 uint8_t queue_port_id)
1061 {
1062         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1063         if (dev->dev_ops->xstats_get_names != NULL)
1064                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1065                                                         queue_port_id,
1066                                                         NULL, NULL, 0);
1067         return 0;
1068 }
1069
1070 int
1071 rte_event_dev_xstats_names_get(uint8_t dev_id,
1072                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1073                 struct rte_event_dev_xstats_name *xstats_names,
1074                 unsigned int *ids, unsigned int size)
1075 {
1076         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1077         const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1078                                                           queue_port_id);
1079         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1080                         (int)size < cnt_expected_entries)
1081                 return cnt_expected_entries;
1082
1083         /* dev_id checked above */
1084         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1085
1086         if (dev->dev_ops->xstats_get_names != NULL)
1087                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1088                                 queue_port_id, xstats_names, ids, size);
1089
1090         return -ENOTSUP;
1091 }
1092
1093 /* retrieve eventdev extended statistics */
1094 int
1095 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1096                 uint8_t queue_port_id, const unsigned int ids[],
1097                 uint64_t values[], unsigned int n)
1098 {
1099         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1100         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1101
1102         /* implemented by the driver */
1103         if (dev->dev_ops->xstats_get != NULL)
1104                 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1105                                 ids, values, n);
1106         return -ENOTSUP;
1107 }
1108
1109 uint64_t
1110 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1111                 unsigned int *id)
1112 {
1113         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1114         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1115         unsigned int temp = -1;
1116
1117         if (id != NULL)
1118                 *id = (unsigned int)-1;
1119         else
1120                 id = &temp; /* ensure driver never gets a NULL value */
1121
1122         /* implemented by driver */
1123         if (dev->dev_ops->xstats_get_by_name != NULL)
1124                 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1125         return -ENOTSUP;
1126 }
1127
1128 int rte_event_dev_xstats_reset(uint8_t dev_id,
1129                 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1130                 const uint32_t ids[], uint32_t nb_ids)
1131 {
1132         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1133         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1134
1135         if (dev->dev_ops->xstats_reset != NULL)
1136                 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1137                                                         ids, nb_ids);
1138         return -ENOTSUP;
1139 }
1140
1141 int rte_event_dev_selftest(uint8_t dev_id)
1142 {
1143         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1144         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1145
1146         if (dev->dev_ops->dev_selftest != NULL)
1147                 return (*dev->dev_ops->dev_selftest)();
1148         return -ENOTSUP;
1149 }
1150
1151 int
1152 rte_event_dev_start(uint8_t dev_id)
1153 {
1154         struct rte_eventdev *dev;
1155         int diag;
1156
1157         RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1158
1159         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1160         dev = &rte_eventdevs[dev_id];
1161         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1162
1163         if (dev->data->dev_started != 0) {
1164                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1165                         dev_id);
1166                 return 0;
1167         }
1168
1169         diag = (*dev->dev_ops->dev_start)(dev);
1170         if (diag == 0)
1171                 dev->data->dev_started = 1;
1172         else
1173                 return diag;
1174
1175         return 0;
1176 }
1177
1178 int
1179 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1180                 eventdev_stop_flush_t callback, void *userdata)
1181 {
1182         struct rte_eventdev *dev;
1183
1184         RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1185
1186         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1187         dev = &rte_eventdevs[dev_id];
1188
1189         dev->dev_ops->dev_stop_flush = callback;
1190         dev->data->dev_stop_flush_arg = userdata;
1191
1192         return 0;
1193 }
1194
1195 void
1196 rte_event_dev_stop(uint8_t dev_id)
1197 {
1198         struct rte_eventdev *dev;
1199
1200         RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1201
1202         RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1203         dev = &rte_eventdevs[dev_id];
1204         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1205
1206         if (dev->data->dev_started == 0) {
1207                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1208                         dev_id);
1209                 return;
1210         }
1211
1212         dev->data->dev_started = 0;
1213         (*dev->dev_ops->dev_stop)(dev);
1214 }
1215
1216 int
1217 rte_event_dev_close(uint8_t dev_id)
1218 {
1219         struct rte_eventdev *dev;
1220
1221         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1222         dev = &rte_eventdevs[dev_id];
1223         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1224
1225         /* Device must be stopped before it can be closed */
1226         if (dev->data->dev_started == 1) {
1227                 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1228                                 dev_id);
1229                 return -EBUSY;
1230         }
1231
1232         return (*dev->dev_ops->dev_close)(dev);
1233 }
1234
1235 static inline int
1236 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1237                 int socket_id)
1238 {
1239         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1240         const struct rte_memzone *mz;
1241         int n;
1242
1243         /* Generate memzone name */
1244         n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1245         if (n >= (int)sizeof(mz_name))
1246                 return -EINVAL;
1247
1248         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1249                 mz = rte_memzone_reserve(mz_name,
1250                                 sizeof(struct rte_eventdev_data),
1251                                 socket_id, 0);
1252         } else
1253                 mz = rte_memzone_lookup(mz_name);
1254
1255         if (mz == NULL)
1256                 return -ENOMEM;
1257
1258         *data = mz->addr;
1259         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1260                 memset(*data, 0, sizeof(struct rte_eventdev_data));
1261
1262         return 0;
1263 }
1264
1265 static inline uint8_t
1266 rte_eventdev_find_free_device_index(void)
1267 {
1268         uint8_t dev_id;
1269
1270         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1271                 if (rte_eventdevs[dev_id].attached ==
1272                                 RTE_EVENTDEV_DETACHED)
1273                         return dev_id;
1274         }
1275         return RTE_EVENT_MAX_DEVS;
1276 }
1277
1278 struct rte_eventdev *
1279 rte_event_pmd_allocate(const char *name, int socket_id)
1280 {
1281         struct rte_eventdev *eventdev;
1282         uint8_t dev_id;
1283
1284         if (rte_event_pmd_get_named_dev(name) != NULL) {
1285                 RTE_EDEV_LOG_ERR("Event device with name %s already "
1286                                 "allocated!", name);
1287                 return NULL;
1288         }
1289
1290         dev_id = rte_eventdev_find_free_device_index();
1291         if (dev_id == RTE_EVENT_MAX_DEVS) {
1292                 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1293                 return NULL;
1294         }
1295
1296         eventdev = &rte_eventdevs[dev_id];
1297
1298         if (eventdev->data == NULL) {
1299                 struct rte_eventdev_data *eventdev_data = NULL;
1300
1301                 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1302                                 socket_id);
1303
1304                 if (retval < 0 || eventdev_data == NULL)
1305                         return NULL;
1306
1307                 eventdev->data = eventdev_data;
1308
1309                 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1310                                 "%s", name);
1311
1312                 eventdev->data->dev_id = dev_id;
1313                 eventdev->data->socket_id = socket_id;
1314                 eventdev->data->dev_started = 0;
1315
1316                 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1317
1318                 eventdev_globals.nb_devs++;
1319         }
1320
1321         return eventdev;
1322 }
1323
1324 int
1325 rte_event_pmd_release(struct rte_eventdev *eventdev)
1326 {
1327         int ret;
1328         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1329         const struct rte_memzone *mz;
1330
1331         if (eventdev == NULL)
1332                 return -EINVAL;
1333
1334         eventdev->attached = RTE_EVENTDEV_DETACHED;
1335         eventdev_globals.nb_devs--;
1336
1337         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1338                 rte_free(eventdev->data->dev_private);
1339
1340                 /* Generate memzone name */
1341                 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1342                                 eventdev->data->dev_id);
1343                 if (ret >= (int)sizeof(mz_name))
1344                         return -EINVAL;
1345
1346                 mz = rte_memzone_lookup(mz_name);
1347                 if (mz == NULL)
1348                         return -ENOMEM;
1349
1350                 ret = rte_memzone_free(mz);
1351                 if (ret)
1352                         return ret;
1353         }
1354
1355         eventdev->data = NULL;
1356         return 0;
1357 }