New upstream version 18.02
[deb_dpdk.git] / lib / librte_eventdev / rte_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
23 #include <rte_eal.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 #include <rte_ethdev.h>
32
33 #include "rte_eventdev.h"
34 #include "rte_eventdev_pmd.h"
35
36 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
37
38 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
39
40 static struct rte_eventdev_global eventdev_globals = {
41         .nb_devs                = 0
42 };
43
44 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
45
46 /* Event dev north bound API implementation */
47
48 uint8_t
49 rte_event_dev_count(void)
50 {
51         return rte_eventdev_globals->nb_devs;
52 }
53
54 int
55 rte_event_dev_get_dev_id(const char *name)
56 {
57         int i;
58
59         if (!name)
60                 return -EINVAL;
61
62         for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
63                 if ((strcmp(rte_event_devices[i].data->name, name)
64                                 == 0) &&
65                                 (rte_event_devices[i].attached ==
66                                                 RTE_EVENTDEV_ATTACHED))
67                         return i;
68         return -ENODEV;
69 }
70
71 int
72 rte_event_dev_socket_id(uint8_t dev_id)
73 {
74         struct rte_eventdev *dev;
75
76         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77         dev = &rte_eventdevs[dev_id];
78
79         return dev->data->socket_id;
80 }
81
82 int
83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
84 {
85         struct rte_eventdev *dev;
86
87         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88         dev = &rte_eventdevs[dev_id];
89
90         if (dev_info == NULL)
91                 return -EINVAL;
92
93         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
94
95         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
96         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
97
98         dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
99
100         dev_info->dev = dev->dev;
101         return 0;
102 }
103
104 int
105 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
106                                 uint32_t *caps)
107 {
108         struct rte_eventdev *dev;
109
110         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
111         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
112
113         dev = &rte_eventdevs[dev_id];
114
115         if (caps == NULL)
116                 return -EINVAL;
117         *caps = 0;
118
119         return dev->dev_ops->eth_rx_adapter_caps_get ?
120                                 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
121                                                 &rte_eth_devices[eth_port_id],
122                                                 caps)
123                                 : 0;
124 }
125
126 static inline int
127 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
128 {
129         uint8_t old_nb_queues = dev->data->nb_queues;
130         struct rte_event_queue_conf *queues_cfg;
131         unsigned int i;
132
133         RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
134                          dev->data->dev_id);
135
136         /* First time configuration */
137         if (dev->data->queues_cfg == NULL && nb_queues != 0) {
138                 /* Allocate memory to store queue configuration */
139                 dev->data->queues_cfg = rte_zmalloc_socket(
140                                 "eventdev->data->queues_cfg",
141                                 sizeof(dev->data->queues_cfg[0]) * nb_queues,
142                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
143                 if (dev->data->queues_cfg == NULL) {
144                         dev->data->nb_queues = 0;
145                         RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
146                                         "nb_queues %u", nb_queues);
147                         return -(ENOMEM);
148                 }
149         /* Re-configure */
150         } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
151                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
152
153                 for (i = nb_queues; i < old_nb_queues; i++)
154                         (*dev->dev_ops->queue_release)(dev, i);
155
156                 /* Re allocate memory to store queue configuration */
157                 queues_cfg = dev->data->queues_cfg;
158                 queues_cfg = rte_realloc(queues_cfg,
159                                 sizeof(queues_cfg[0]) * nb_queues,
160                                 RTE_CACHE_LINE_SIZE);
161                 if (queues_cfg == NULL) {
162                         RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
163                                                 " nb_queues %u", nb_queues);
164                         return -(ENOMEM);
165                 }
166                 dev->data->queues_cfg = queues_cfg;
167
168                 if (nb_queues > old_nb_queues) {
169                         uint8_t new_qs = nb_queues - old_nb_queues;
170
171                         memset(queues_cfg + old_nb_queues, 0,
172                                 sizeof(queues_cfg[0]) * new_qs);
173                 }
174         } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
175                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
176
177                 for (i = nb_queues; i < old_nb_queues; i++)
178                         (*dev->dev_ops->queue_release)(dev, i);
179         }
180
181         dev->data->nb_queues = nb_queues;
182         return 0;
183 }
184
185 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
186
187 static inline int
188 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
189 {
190         uint8_t old_nb_ports = dev->data->nb_ports;
191         void **ports;
192         uint16_t *links_map;
193         struct rte_event_port_conf *ports_cfg;
194         unsigned int i;
195
196         RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
197                          dev->data->dev_id);
198
199         /* First time configuration */
200         if (dev->data->ports == NULL && nb_ports != 0) {
201                 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
202                                 sizeof(dev->data->ports[0]) * nb_ports,
203                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
204                 if (dev->data->ports == NULL) {
205                         dev->data->nb_ports = 0;
206                         RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
207                                         "nb_ports %u", nb_ports);
208                         return -(ENOMEM);
209                 }
210
211                 /* Allocate memory to store port configurations */
212                 dev->data->ports_cfg =
213                         rte_zmalloc_socket("eventdev->ports_cfg",
214                         sizeof(dev->data->ports_cfg[0]) * nb_ports,
215                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
216                 if (dev->data->ports_cfg == NULL) {
217                         dev->data->nb_ports = 0;
218                         RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
219                                         "nb_ports %u", nb_ports);
220                         return -(ENOMEM);
221                 }
222
223                 /* Allocate memory to store queue to port link connection */
224                 dev->data->links_map =
225                         rte_zmalloc_socket("eventdev->links_map",
226                         sizeof(dev->data->links_map[0]) * nb_ports *
227                         RTE_EVENT_MAX_QUEUES_PER_DEV,
228                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
229                 if (dev->data->links_map == NULL) {
230                         dev->data->nb_ports = 0;
231                         RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
232                                         "nb_ports %u", nb_ports);
233                         return -(ENOMEM);
234                 }
235                 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
236                         dev->data->links_map[i] =
237                                 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
238         } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
239                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
240
241                 ports = dev->data->ports;
242                 ports_cfg = dev->data->ports_cfg;
243                 links_map = dev->data->links_map;
244
245                 for (i = nb_ports; i < old_nb_ports; i++)
246                         (*dev->dev_ops->port_release)(ports[i]);
247
248                 /* Realloc memory for ports */
249                 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
250                                 RTE_CACHE_LINE_SIZE);
251                 if (ports == NULL) {
252                         RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
253                                                 " nb_ports %u", nb_ports);
254                         return -(ENOMEM);
255                 }
256
257                 /* Realloc memory for ports_cfg */
258                 ports_cfg = rte_realloc(ports_cfg,
259                         sizeof(ports_cfg[0]) * nb_ports,
260                         RTE_CACHE_LINE_SIZE);
261                 if (ports_cfg == NULL) {
262                         RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
263                                                 " nb_ports %u", nb_ports);
264                         return -(ENOMEM);
265                 }
266
267                 /* Realloc memory to store queue to port link connection */
268                 links_map = rte_realloc(links_map,
269                         sizeof(dev->data->links_map[0]) * nb_ports *
270                         RTE_EVENT_MAX_QUEUES_PER_DEV,
271                         RTE_CACHE_LINE_SIZE);
272                 if (links_map == NULL) {
273                         dev->data->nb_ports = 0;
274                         RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
275                                         "nb_ports %u", nb_ports);
276                         return -(ENOMEM);
277                 }
278
279                 if (nb_ports > old_nb_ports) {
280                         uint8_t new_ps = nb_ports - old_nb_ports;
281                         unsigned int old_links_map_end =
282                                 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
283                         unsigned int links_map_end =
284                                 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
285
286                         memset(ports + old_nb_ports, 0,
287                                 sizeof(ports[0]) * new_ps);
288                         memset(ports_cfg + old_nb_ports, 0,
289                                 sizeof(ports_cfg[0]) * new_ps);
290                         for (i = old_links_map_end; i < links_map_end; i++)
291                                 links_map[i] =
292                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
293                 }
294
295                 dev->data->ports = ports;
296                 dev->data->ports_cfg = ports_cfg;
297                 dev->data->links_map = links_map;
298         } else if (dev->data->ports != NULL && nb_ports == 0) {
299                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
300
301                 ports = dev->data->ports;
302                 for (i = nb_ports; i < old_nb_ports; i++)
303                         (*dev->dev_ops->port_release)(ports[i]);
304         }
305
306         dev->data->nb_ports = nb_ports;
307         return 0;
308 }
309
310 int
311 rte_event_dev_configure(uint8_t dev_id,
312                         const struct rte_event_dev_config *dev_conf)
313 {
314         struct rte_eventdev *dev;
315         struct rte_event_dev_info info;
316         int diag;
317
318         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
319         dev = &rte_eventdevs[dev_id];
320
321         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
322         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
323
324         if (dev->data->dev_started) {
325                 RTE_EDEV_LOG_ERR(
326                     "device %d must be stopped to allow configuration", dev_id);
327                 return -EBUSY;
328         }
329
330         if (dev_conf == NULL)
331                 return -EINVAL;
332
333         (*dev->dev_ops->dev_infos_get)(dev, &info);
334
335         /* Check dequeue_timeout_ns value is in limit */
336         if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
337                 if (dev_conf->dequeue_timeout_ns &&
338                     (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
339                         || dev_conf->dequeue_timeout_ns >
340                                  info.max_dequeue_timeout_ns)) {
341                         RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
342                         " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
343                         dev_id, dev_conf->dequeue_timeout_ns,
344                         info.min_dequeue_timeout_ns,
345                         info.max_dequeue_timeout_ns);
346                         return -EINVAL;
347                 }
348         }
349
350         /* Check nb_events_limit is in limit */
351         if (dev_conf->nb_events_limit > info.max_num_events) {
352                 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
353                 dev_id, dev_conf->nb_events_limit, info.max_num_events);
354                 return -EINVAL;
355         }
356
357         /* Check nb_event_queues is in limit */
358         if (!dev_conf->nb_event_queues) {
359                 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
360                                         dev_id);
361                 return -EINVAL;
362         }
363         if (dev_conf->nb_event_queues > info.max_event_queues) {
364                 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
365                 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
366                 return -EINVAL;
367         }
368
369         /* Check nb_event_ports is in limit */
370         if (!dev_conf->nb_event_ports) {
371                 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
372                 return -EINVAL;
373         }
374         if (dev_conf->nb_event_ports > info.max_event_ports) {
375                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
376                 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
377                 return -EINVAL;
378         }
379
380         /* Check nb_event_queue_flows is in limit */
381         if (!dev_conf->nb_event_queue_flows) {
382                 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
383                 return -EINVAL;
384         }
385         if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
386                 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
387                 dev_id, dev_conf->nb_event_queue_flows,
388                 info.max_event_queue_flows);
389                 return -EINVAL;
390         }
391
392         /* Check nb_event_port_dequeue_depth is in limit */
393         if (!dev_conf->nb_event_port_dequeue_depth) {
394                 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
395                                         dev_id);
396                 return -EINVAL;
397         }
398         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
399                  (dev_conf->nb_event_port_dequeue_depth >
400                          info.max_event_port_dequeue_depth)) {
401                 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
402                 dev_id, dev_conf->nb_event_port_dequeue_depth,
403                 info.max_event_port_dequeue_depth);
404                 return -EINVAL;
405         }
406
407         /* Check nb_event_port_enqueue_depth is in limit */
408         if (!dev_conf->nb_event_port_enqueue_depth) {
409                 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
410                                         dev_id);
411                 return -EINVAL;
412         }
413         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
414                 (dev_conf->nb_event_port_enqueue_depth >
415                          info.max_event_port_enqueue_depth)) {
416                 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
417                 dev_id, dev_conf->nb_event_port_enqueue_depth,
418                 info.max_event_port_enqueue_depth);
419                 return -EINVAL;
420         }
421
422         /* Copy the dev_conf parameter into the dev structure */
423         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
424
425         /* Setup new number of queues and reconfigure device. */
426         diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
427         if (diag != 0) {
428                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
429                                 dev_id, diag);
430                 return diag;
431         }
432
433         /* Setup new number of ports and reconfigure device. */
434         diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
435         if (diag != 0) {
436                 rte_event_dev_queue_config(dev, 0);
437                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
438                                 dev_id, diag);
439                 return diag;
440         }
441
442         /* Configure the device */
443         diag = (*dev->dev_ops->dev_configure)(dev);
444         if (diag != 0) {
445                 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
446                 rte_event_dev_queue_config(dev, 0);
447                 rte_event_dev_port_config(dev, 0);
448         }
449
450         dev->data->event_dev_cap = info.event_dev_cap;
451         return diag;
452 }
453
454 static inline int
455 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
456 {
457         if (queue_id < dev->data->nb_queues && queue_id <
458                                 RTE_EVENT_MAX_QUEUES_PER_DEV)
459                 return 1;
460         else
461                 return 0;
462 }
463
464 int
465 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
466                                  struct rte_event_queue_conf *queue_conf)
467 {
468         struct rte_eventdev *dev;
469
470         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
471         dev = &rte_eventdevs[dev_id];
472
473         if (queue_conf == NULL)
474                 return -EINVAL;
475
476         if (!is_valid_queue(dev, queue_id)) {
477                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
478                 return -EINVAL;
479         }
480
481         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
482         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
483         (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
484         return 0;
485 }
486
487 static inline int
488 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
489 {
490         if (queue_conf &&
491                 !(queue_conf->event_queue_cfg &
492                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
493                 ((queue_conf->event_queue_cfg &
494                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
495                 (queue_conf->schedule_type
496                         == RTE_SCHED_TYPE_ATOMIC)
497                 ))
498                 return 1;
499         else
500                 return 0;
501 }
502
503 static inline int
504 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
505 {
506         if (queue_conf &&
507                 !(queue_conf->event_queue_cfg &
508                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
509                 ((queue_conf->event_queue_cfg &
510                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
511                 (queue_conf->schedule_type
512                         == RTE_SCHED_TYPE_ORDERED)
513                 ))
514                 return 1;
515         else
516                 return 0;
517 }
518
519
520 int
521 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
522                       const struct rte_event_queue_conf *queue_conf)
523 {
524         struct rte_eventdev *dev;
525         struct rte_event_queue_conf def_conf;
526
527         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
528         dev = &rte_eventdevs[dev_id];
529
530         if (!is_valid_queue(dev, queue_id)) {
531                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
532                 return -EINVAL;
533         }
534
535         /* Check nb_atomic_flows limit */
536         if (is_valid_atomic_queue_conf(queue_conf)) {
537                 if (queue_conf->nb_atomic_flows == 0 ||
538                     queue_conf->nb_atomic_flows >
539                         dev->data->dev_conf.nb_event_queue_flows) {
540                         RTE_EDEV_LOG_ERR(
541                 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
542                         dev_id, queue_id, queue_conf->nb_atomic_flows,
543                         dev->data->dev_conf.nb_event_queue_flows);
544                         return -EINVAL;
545                 }
546         }
547
548         /* Check nb_atomic_order_sequences limit */
549         if (is_valid_ordered_queue_conf(queue_conf)) {
550                 if (queue_conf->nb_atomic_order_sequences == 0 ||
551                     queue_conf->nb_atomic_order_sequences >
552                         dev->data->dev_conf.nb_event_queue_flows) {
553                         RTE_EDEV_LOG_ERR(
554                 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
555                         dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
556                         dev->data->dev_conf.nb_event_queue_flows);
557                         return -EINVAL;
558                 }
559         }
560
561         if (dev->data->dev_started) {
562                 RTE_EDEV_LOG_ERR(
563                     "device %d must be stopped to allow queue setup", dev_id);
564                 return -EBUSY;
565         }
566
567         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
568
569         if (queue_conf == NULL) {
570                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
571                                         -ENOTSUP);
572                 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
573                 queue_conf = &def_conf;
574         }
575
576         dev->data->queues_cfg[queue_id] = *queue_conf;
577         return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
578 }
579
580 static inline int
581 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
582 {
583         if (port_id < dev->data->nb_ports)
584                 return 1;
585         else
586                 return 0;
587 }
588
589 int
590 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
591                                  struct rte_event_port_conf *port_conf)
592 {
593         struct rte_eventdev *dev;
594
595         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
596         dev = &rte_eventdevs[dev_id];
597
598         if (port_conf == NULL)
599                 return -EINVAL;
600
601         if (!is_valid_port(dev, port_id)) {
602                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
603                 return -EINVAL;
604         }
605
606         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
607         memset(port_conf, 0, sizeof(struct rte_event_port_conf));
608         (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
609         return 0;
610 }
611
612 int
613 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
614                      const struct rte_event_port_conf *port_conf)
615 {
616         struct rte_eventdev *dev;
617         struct rte_event_port_conf def_conf;
618         int diag;
619
620         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
621         dev = &rte_eventdevs[dev_id];
622
623         if (!is_valid_port(dev, port_id)) {
624                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
625                 return -EINVAL;
626         }
627
628         /* Check new_event_threshold limit */
629         if ((port_conf && !port_conf->new_event_threshold) ||
630                         (port_conf && port_conf->new_event_threshold >
631                                  dev->data->dev_conf.nb_events_limit)) {
632                 RTE_EDEV_LOG_ERR(
633                    "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
634                         dev_id, port_id, port_conf->new_event_threshold,
635                         dev->data->dev_conf.nb_events_limit);
636                 return -EINVAL;
637         }
638
639         /* Check dequeue_depth limit */
640         if ((port_conf && !port_conf->dequeue_depth) ||
641                         (port_conf && port_conf->dequeue_depth >
642                 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
643                 RTE_EDEV_LOG_ERR(
644                    "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
645                         dev_id, port_id, port_conf->dequeue_depth,
646                         dev->data->dev_conf.nb_event_port_dequeue_depth);
647                 return -EINVAL;
648         }
649
650         /* Check enqueue_depth limit */
651         if ((port_conf && !port_conf->enqueue_depth) ||
652                         (port_conf && port_conf->enqueue_depth >
653                 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
654                 RTE_EDEV_LOG_ERR(
655                    "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
656                         dev_id, port_id, port_conf->enqueue_depth,
657                         dev->data->dev_conf.nb_event_port_enqueue_depth);
658                 return -EINVAL;
659         }
660
661         if (port_conf && port_conf->disable_implicit_release &&
662             !(dev->data->event_dev_cap &
663               RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
664                 RTE_EDEV_LOG_ERR(
665                    "dev%d port%d Implicit release disable not supported",
666                         dev_id, port_id);
667                 return -EINVAL;
668         }
669
670         if (dev->data->dev_started) {
671                 RTE_EDEV_LOG_ERR(
672                     "device %d must be stopped to allow port setup", dev_id);
673                 return -EBUSY;
674         }
675
676         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
677
678         if (port_conf == NULL) {
679                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
680                                         -ENOTSUP);
681                 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
682                 port_conf = &def_conf;
683         }
684
685         dev->data->ports_cfg[port_id] = *port_conf;
686
687         diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
688
689         /* Unlink all the queues from this port(default state after setup) */
690         if (!diag)
691                 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
692
693         if (diag < 0)
694                 return diag;
695
696         return 0;
697 }
698
699 int
700 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
701                        uint32_t *attr_value)
702 {
703         struct rte_eventdev *dev;
704
705         if (!attr_value)
706                 return -EINVAL;
707         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
708         dev = &rte_eventdevs[dev_id];
709
710         switch (attr_id) {
711         case RTE_EVENT_DEV_ATTR_PORT_COUNT:
712                 *attr_value = dev->data->nb_ports;
713                 break;
714         case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
715                 *attr_value = dev->data->nb_queues;
716                 break;
717         case RTE_EVENT_DEV_ATTR_STARTED:
718                 *attr_value = dev->data->dev_started;
719                 break;
720         default:
721                 return -EINVAL;
722         }
723
724         return 0;
725 }
726
727 int
728 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
729                         uint32_t *attr_value)
730 {
731         struct rte_eventdev *dev;
732
733         if (!attr_value)
734                 return -EINVAL;
735
736         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
737         dev = &rte_eventdevs[dev_id];
738         if (!is_valid_port(dev, port_id)) {
739                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
740                 return -EINVAL;
741         }
742
743         switch (attr_id) {
744         case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
745                 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
746                 break;
747         case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
748                 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
749                 break;
750         case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
751                 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
752                 break;
753         default:
754                 return -EINVAL;
755         };
756         return 0;
757 }
758
759 int
760 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
761                         uint32_t *attr_value)
762 {
763         struct rte_event_queue_conf *conf;
764         struct rte_eventdev *dev;
765
766         if (!attr_value)
767                 return -EINVAL;
768
769         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
770         dev = &rte_eventdevs[dev_id];
771         if (!is_valid_queue(dev, queue_id)) {
772                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
773                 return -EINVAL;
774         }
775
776         conf = &dev->data->queues_cfg[queue_id];
777
778         switch (attr_id) {
779         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
780                 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
781                 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
782                         *attr_value = conf->priority;
783                 break;
784         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
785                 *attr_value = conf->nb_atomic_flows;
786                 break;
787         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
788                 *attr_value = conf->nb_atomic_order_sequences;
789                 break;
790         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
791                 *attr_value = conf->event_queue_cfg;
792                 break;
793         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
794                 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
795                         return -EOVERFLOW;
796
797                 *attr_value = conf->schedule_type;
798                 break;
799         default:
800                 return -EINVAL;
801         };
802         return 0;
803 }
804
805 int
806 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
807                     const uint8_t queues[], const uint8_t priorities[],
808                     uint16_t nb_links)
809 {
810         struct rte_eventdev *dev;
811         uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
812         uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
813         uint16_t *links_map;
814         int i, diag;
815
816         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
817         dev = &rte_eventdevs[dev_id];
818
819         if (*dev->dev_ops->port_link == NULL) {
820                 RTE_PMD_DEBUG_TRACE("Function not supported\n");
821                 rte_errno = -ENOTSUP;
822                 return 0;
823         }
824
825         if (!is_valid_port(dev, port_id)) {
826                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
827                 rte_errno = -EINVAL;
828                 return 0;
829         }
830
831         if (queues == NULL) {
832                 for (i = 0; i < dev->data->nb_queues; i++)
833                         queues_list[i] = i;
834
835                 queues = queues_list;
836                 nb_links = dev->data->nb_queues;
837         }
838
839         if (priorities == NULL) {
840                 for (i = 0; i < nb_links; i++)
841                         priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
842
843                 priorities = priorities_list;
844         }
845
846         for (i = 0; i < nb_links; i++)
847                 if (queues[i] >= dev->data->nb_queues) {
848                         rte_errno = -EINVAL;
849                         return 0;
850                 }
851
852         diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
853                                                 queues, priorities, nb_links);
854         if (diag < 0)
855                 return diag;
856
857         links_map = dev->data->links_map;
858         /* Point links_map to this port specific area */
859         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
860         for (i = 0; i < diag; i++)
861                 links_map[queues[i]] = (uint8_t)priorities[i];
862
863         return diag;
864 }
865
866 int
867 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
868                       uint8_t queues[], uint16_t nb_unlinks)
869 {
870         struct rte_eventdev *dev;
871         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
872         int i, diag, j;
873         uint16_t *links_map;
874
875         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
876         dev = &rte_eventdevs[dev_id];
877
878         if (*dev->dev_ops->port_unlink == NULL) {
879                 RTE_PMD_DEBUG_TRACE("Function not supported\n");
880                 rte_errno = -ENOTSUP;
881                 return 0;
882         }
883
884         if (!is_valid_port(dev, port_id)) {
885                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
886                 rte_errno = -EINVAL;
887                 return 0;
888         }
889
890         links_map = dev->data->links_map;
891         /* Point links_map to this port specific area */
892         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
893
894         if (queues == NULL) {
895                 j = 0;
896                 for (i = 0; i < dev->data->nb_queues; i++) {
897                         if (links_map[i] !=
898                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
899                                 all_queues[j] = i;
900                                 j++;
901                         }
902                 }
903                 queues = all_queues;
904         } else {
905                 for (j = 0; j < nb_unlinks; j++) {
906                         if (links_map[queues[j]] ==
907                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
908                                 break;
909                 }
910         }
911
912         nb_unlinks = j;
913         for (i = 0; i < nb_unlinks; i++)
914                 if (queues[i] >= dev->data->nb_queues) {
915                         rte_errno = -EINVAL;
916                         return 0;
917                 }
918
919         diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
920                                         queues, nb_unlinks);
921
922         if (diag < 0)
923                 return diag;
924
925         for (i = 0; i < diag; i++)
926                 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
927
928         return diag;
929 }
930
931 int
932 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
933                          uint8_t queues[], uint8_t priorities[])
934 {
935         struct rte_eventdev *dev;
936         uint16_t *links_map;
937         int i, count = 0;
938
939         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
940         dev = &rte_eventdevs[dev_id];
941         if (!is_valid_port(dev, port_id)) {
942                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
943                 return -EINVAL;
944         }
945
946         links_map = dev->data->links_map;
947         /* Point links_map to this port specific area */
948         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
949         for (i = 0; i < dev->data->nb_queues; i++) {
950                 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
951                         queues[count] = i;
952                         priorities[count] = (uint8_t)links_map[i];
953                         ++count;
954                 }
955         }
956         return count;
957 }
958
959 int
960 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
961                                  uint64_t *timeout_ticks)
962 {
963         struct rte_eventdev *dev;
964
965         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
966         dev = &rte_eventdevs[dev_id];
967         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
968
969         if (timeout_ticks == NULL)
970                 return -EINVAL;
971
972         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
973 }
974
975 int
976 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
977 {
978         struct rte_eventdev *dev;
979
980         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
981         dev = &rte_eventdevs[dev_id];
982
983         if (service_id == NULL)
984                 return -EINVAL;
985
986         if (dev->data->service_inited)
987                 *service_id = dev->data->service_id;
988
989         return dev->data->service_inited ? 0 : -ESRCH;
990 }
991
992 int
993 rte_event_dev_dump(uint8_t dev_id, FILE *f)
994 {
995         struct rte_eventdev *dev;
996
997         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
998         dev = &rte_eventdevs[dev_id];
999         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1000
1001         (*dev->dev_ops->dump)(dev, f);
1002         return 0;
1003
1004 }
1005
1006 static int
1007 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1008                 uint8_t queue_port_id)
1009 {
1010         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1011         if (dev->dev_ops->xstats_get_names != NULL)
1012                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1013                                                         queue_port_id,
1014                                                         NULL, NULL, 0);
1015         return 0;
1016 }
1017
1018 int
1019 rte_event_dev_xstats_names_get(uint8_t dev_id,
1020                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1021                 struct rte_event_dev_xstats_name *xstats_names,
1022                 unsigned int *ids, unsigned int size)
1023 {
1024         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1025         const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1026                                                           queue_port_id);
1027         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1028                         (int)size < cnt_expected_entries)
1029                 return cnt_expected_entries;
1030
1031         /* dev_id checked above */
1032         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1033
1034         if (dev->dev_ops->xstats_get_names != NULL)
1035                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1036                                 queue_port_id, xstats_names, ids, size);
1037
1038         return -ENOTSUP;
1039 }
1040
1041 /* retrieve eventdev extended statistics */
1042 int
1043 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1044                 uint8_t queue_port_id, const unsigned int ids[],
1045                 uint64_t values[], unsigned int n)
1046 {
1047         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1048         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1049
1050         /* implemented by the driver */
1051         if (dev->dev_ops->xstats_get != NULL)
1052                 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1053                                 ids, values, n);
1054         return -ENOTSUP;
1055 }
1056
1057 uint64_t
1058 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1059                 unsigned int *id)
1060 {
1061         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1062         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1063         unsigned int temp = -1;
1064
1065         if (id != NULL)
1066                 *id = (unsigned int)-1;
1067         else
1068                 id = &temp; /* ensure driver never gets a NULL value */
1069
1070         /* implemented by driver */
1071         if (dev->dev_ops->xstats_get_by_name != NULL)
1072                 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1073         return -ENOTSUP;
1074 }
1075
1076 int rte_event_dev_xstats_reset(uint8_t dev_id,
1077                 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1078                 const uint32_t ids[], uint32_t nb_ids)
1079 {
1080         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1081         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1082
1083         if (dev->dev_ops->xstats_reset != NULL)
1084                 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1085                                                         ids, nb_ids);
1086         return -ENOTSUP;
1087 }
1088
1089 int rte_event_dev_selftest(uint8_t dev_id)
1090 {
1091         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1092         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1093
1094         if (dev->dev_ops->dev_selftest != NULL)
1095                 return (*dev->dev_ops->dev_selftest)();
1096         return -ENOTSUP;
1097 }
1098
1099 int
1100 rte_event_dev_start(uint8_t dev_id)
1101 {
1102         struct rte_eventdev *dev;
1103         int diag;
1104
1105         RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1106
1107         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1108         dev = &rte_eventdevs[dev_id];
1109         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1110
1111         if (dev->data->dev_started != 0) {
1112                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1113                         dev_id);
1114                 return 0;
1115         }
1116
1117         diag = (*dev->dev_ops->dev_start)(dev);
1118         if (diag == 0)
1119                 dev->data->dev_started = 1;
1120         else
1121                 return diag;
1122
1123         return 0;
1124 }
1125
1126 void
1127 rte_event_dev_stop(uint8_t dev_id)
1128 {
1129         struct rte_eventdev *dev;
1130
1131         RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1132
1133         RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1134         dev = &rte_eventdevs[dev_id];
1135         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1136
1137         if (dev->data->dev_started == 0) {
1138                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1139                         dev_id);
1140                 return;
1141         }
1142
1143         dev->data->dev_started = 0;
1144         (*dev->dev_ops->dev_stop)(dev);
1145 }
1146
1147 int
1148 rte_event_dev_close(uint8_t dev_id)
1149 {
1150         struct rte_eventdev *dev;
1151
1152         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1153         dev = &rte_eventdevs[dev_id];
1154         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1155
1156         /* Device must be stopped before it can be closed */
1157         if (dev->data->dev_started == 1) {
1158                 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1159                                 dev_id);
1160                 return -EBUSY;
1161         }
1162
1163         return (*dev->dev_ops->dev_close)(dev);
1164 }
1165
1166 static inline int
1167 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1168                 int socket_id)
1169 {
1170         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1171         const struct rte_memzone *mz;
1172         int n;
1173
1174         /* Generate memzone name */
1175         n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1176         if (n >= (int)sizeof(mz_name))
1177                 return -EINVAL;
1178
1179         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1180                 mz = rte_memzone_reserve(mz_name,
1181                                 sizeof(struct rte_eventdev_data),
1182                                 socket_id, 0);
1183         } else
1184                 mz = rte_memzone_lookup(mz_name);
1185
1186         if (mz == NULL)
1187                 return -ENOMEM;
1188
1189         *data = mz->addr;
1190         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1191                 memset(*data, 0, sizeof(struct rte_eventdev_data));
1192
1193         return 0;
1194 }
1195
1196 static inline uint8_t
1197 rte_eventdev_find_free_device_index(void)
1198 {
1199         uint8_t dev_id;
1200
1201         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1202                 if (rte_eventdevs[dev_id].attached ==
1203                                 RTE_EVENTDEV_DETACHED)
1204                         return dev_id;
1205         }
1206         return RTE_EVENT_MAX_DEVS;
1207 }
1208
1209 struct rte_eventdev *
1210 rte_event_pmd_allocate(const char *name, int socket_id)
1211 {
1212         struct rte_eventdev *eventdev;
1213         uint8_t dev_id;
1214
1215         if (rte_event_pmd_get_named_dev(name) != NULL) {
1216                 RTE_EDEV_LOG_ERR("Event device with name %s already "
1217                                 "allocated!", name);
1218                 return NULL;
1219         }
1220
1221         dev_id = rte_eventdev_find_free_device_index();
1222         if (dev_id == RTE_EVENT_MAX_DEVS) {
1223                 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1224                 return NULL;
1225         }
1226
1227         eventdev = &rte_eventdevs[dev_id];
1228
1229         if (eventdev->data == NULL) {
1230                 struct rte_eventdev_data *eventdev_data = NULL;
1231
1232                 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1233                                 socket_id);
1234
1235                 if (retval < 0 || eventdev_data == NULL)
1236                         return NULL;
1237
1238                 eventdev->data = eventdev_data;
1239
1240                 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1241                                 "%s", name);
1242
1243                 eventdev->data->dev_id = dev_id;
1244                 eventdev->data->socket_id = socket_id;
1245                 eventdev->data->dev_started = 0;
1246
1247                 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1248
1249                 eventdev_globals.nb_devs++;
1250         }
1251
1252         return eventdev;
1253 }
1254
1255 int
1256 rte_event_pmd_release(struct rte_eventdev *eventdev)
1257 {
1258         int ret;
1259         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1260         const struct rte_memzone *mz;
1261
1262         if (eventdev == NULL)
1263                 return -EINVAL;
1264
1265         eventdev->attached = RTE_EVENTDEV_DETACHED;
1266         eventdev_globals.nb_devs--;
1267
1268         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1269                 rte_free(eventdev->data->dev_private);
1270
1271                 /* Generate memzone name */
1272                 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1273                                 eventdev->data->dev_id);
1274                 if (ret >= (int)sizeof(mz_name))
1275                         return -EINVAL;
1276
1277                 mz = rte_memzone_lookup(mz_name);
1278                 if (mz == NULL)
1279                         return -ENOMEM;
1280
1281                 ret = rte_memzone_free(mz);
1282                 if (ret)
1283                         return ret;
1284         }
1285
1286         eventdev->data = NULL;
1287         return 0;
1288 }