bcd20556812013ff3f5e8ffd50fea9d8f964a766
[deb_dpdk.git] / drivers / event / skeleton / skeleton_eventdev.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium, Inc. 2016.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium, Inc nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <assert.h>
34 #include <stdio.h>
35 #include <stdbool.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39
40 #include <rte_byteorder.h>
41 #include <rte_common.h>
42 #include <rte_debug.h>
43 #include <rte_dev.h>
44 #include <rte_eal.h>
45 #include <rte_log.h>
46 #include <rte_malloc.h>
47 #include <rte_memory.h>
48 #include <rte_memzone.h>
49 #include <rte_lcore.h>
50 #include <rte_vdev.h>
51
52 #include "skeleton_eventdev.h"
53
54 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton
55 /**< Skeleton event device PMD name */
56
57 static uint16_t
58 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
59 {
60         struct skeleton_port *sp = port;
61
62         RTE_SET_USED(sp);
63         RTE_SET_USED(ev);
64         RTE_SET_USED(port);
65
66         return 0;
67 }
68
69 static uint16_t
70 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
71                         uint16_t nb_events)
72 {
73         struct skeleton_port *sp = port;
74
75         RTE_SET_USED(sp);
76         RTE_SET_USED(ev);
77         RTE_SET_USED(port);
78         RTE_SET_USED(nb_events);
79
80         return 0;
81 }
82
83 static uint16_t
84 skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
85                                 uint64_t timeout_ticks)
86 {
87         struct skeleton_port *sp = port;
88
89         RTE_SET_USED(sp);
90         RTE_SET_USED(ev);
91         RTE_SET_USED(timeout_ticks);
92
93         return 0;
94 }
95
96 static uint16_t
97 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
98                 uint16_t nb_events, uint64_t timeout_ticks)
99 {
100         struct skeleton_port *sp = port;
101
102         RTE_SET_USED(sp);
103         RTE_SET_USED(ev);
104         RTE_SET_USED(nb_events);
105         RTE_SET_USED(timeout_ticks);
106
107         return 0;
108 }
109
110 static void
111 skeleton_eventdev_info_get(struct rte_eventdev *dev,
112                 struct rte_event_dev_info *dev_info)
113 {
114         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
115
116         PMD_DRV_FUNC_TRACE();
117
118         RTE_SET_USED(skel);
119
120         dev_info->min_dequeue_timeout_ns = 1;
121         dev_info->max_dequeue_timeout_ns = 10000;
122         dev_info->dequeue_timeout_ns = 25;
123         dev_info->max_event_queues = 64;
124         dev_info->max_event_queue_flows = (1ULL << 20);
125         dev_info->max_event_queue_priority_levels = 8;
126         dev_info->max_event_priority_levels = 8;
127         dev_info->max_event_ports = 32;
128         dev_info->max_event_port_dequeue_depth = 16;
129         dev_info->max_event_port_enqueue_depth = 16;
130         dev_info->max_num_events = (1ULL << 20);
131         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
132                                         RTE_EVENT_DEV_CAP_BURST_MODE |
133                                         RTE_EVENT_DEV_CAP_EVENT_QOS;
134 }
135
136 static int
137 skeleton_eventdev_configure(const struct rte_eventdev *dev)
138 {
139         struct rte_eventdev_data *data = dev->data;
140         struct rte_event_dev_config *conf = &data->dev_conf;
141         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
142
143         PMD_DRV_FUNC_TRACE();
144
145         RTE_SET_USED(conf);
146         RTE_SET_USED(skel);
147
148         PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
149         return 0;
150 }
151
152 static int
153 skeleton_eventdev_start(struct rte_eventdev *dev)
154 {
155         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
156
157         PMD_DRV_FUNC_TRACE();
158
159         RTE_SET_USED(skel);
160
161         return 0;
162 }
163
164 static void
165 skeleton_eventdev_stop(struct rte_eventdev *dev)
166 {
167         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
168
169         PMD_DRV_FUNC_TRACE();
170
171         RTE_SET_USED(skel);
172 }
173
174 static int
175 skeleton_eventdev_close(struct rte_eventdev *dev)
176 {
177         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
178
179         PMD_DRV_FUNC_TRACE();
180
181         RTE_SET_USED(skel);
182
183         return 0;
184 }
185
186 static void
187 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
188                                  struct rte_event_queue_conf *queue_conf)
189 {
190         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
191
192         PMD_DRV_FUNC_TRACE();
193
194         RTE_SET_USED(skel);
195         RTE_SET_USED(queue_id);
196
197         queue_conf->nb_atomic_flows = (1ULL << 20);
198         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
199         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
200         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
201 }
202
203 static void
204 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
205 {
206         PMD_DRV_FUNC_TRACE();
207
208         RTE_SET_USED(dev);
209         RTE_SET_USED(queue_id);
210 }
211
212 static int
213 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
214                               const struct rte_event_queue_conf *queue_conf)
215 {
216         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
217
218         PMD_DRV_FUNC_TRACE();
219
220         RTE_SET_USED(skel);
221         RTE_SET_USED(queue_conf);
222         RTE_SET_USED(queue_id);
223
224         return 0;
225 }
226
227 static void
228 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
229                                  struct rte_event_port_conf *port_conf)
230 {
231         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
232
233         PMD_DRV_FUNC_TRACE();
234
235         RTE_SET_USED(skel);
236         RTE_SET_USED(port_id);
237
238         port_conf->new_event_threshold = 32 * 1024;
239         port_conf->dequeue_depth = 16;
240         port_conf->enqueue_depth = 16;
241 }
242
243 static void
244 skeleton_eventdev_port_release(void *port)
245 {
246         struct skeleton_port *sp = port;
247         PMD_DRV_FUNC_TRACE();
248
249         rte_free(sp);
250 }
251
252 static int
253 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
254                                 const struct rte_event_port_conf *port_conf)
255 {
256         struct skeleton_port *sp;
257         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
258
259         PMD_DRV_FUNC_TRACE();
260
261         RTE_SET_USED(skel);
262         RTE_SET_USED(port_conf);
263
264         /* Free memory prior to re-allocation if needed */
265         if (dev->data->ports[port_id] != NULL) {
266                 PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
267                                 port_id);
268                 skeleton_eventdev_port_release(dev->data->ports[port_id]);
269                 dev->data->ports[port_id] = NULL;
270         }
271
272         /* Allocate event port memory */
273         sp = rte_zmalloc_socket("eventdev port",
274                         sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
275                         dev->data->socket_id);
276         if (sp == NULL) {
277                 PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
278                 return -ENOMEM;
279         }
280
281         sp->port_id = port_id;
282
283         PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
284
285         dev->data->ports[port_id] = sp;
286         return 0;
287 }
288
289 static int
290 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
291                         const uint8_t queues[], const uint8_t priorities[],
292                         uint16_t nb_links)
293 {
294         struct skeleton_port *sp = port;
295         PMD_DRV_FUNC_TRACE();
296
297         RTE_SET_USED(dev);
298         RTE_SET_USED(sp);
299         RTE_SET_USED(queues);
300         RTE_SET_USED(priorities);
301
302         /* Linked all the queues */
303         return (int)nb_links;
304 }
305
306 static int
307 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
308                                  uint8_t queues[], uint16_t nb_unlinks)
309 {
310         struct skeleton_port *sp = port;
311         PMD_DRV_FUNC_TRACE();
312
313         RTE_SET_USED(dev);
314         RTE_SET_USED(sp);
315         RTE_SET_USED(queues);
316
317         /* Unlinked all the queues */
318         return (int)nb_unlinks;
319
320 }
321
322 static int
323 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
324                                  uint64_t *timeout_ticks)
325 {
326         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
327         uint32_t scale = 1;
328
329         PMD_DRV_FUNC_TRACE();
330
331         RTE_SET_USED(skel);
332         *timeout_ticks = ns * scale;
333
334         return 0;
335 }
336
337 static void
338 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
339 {
340         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
341
342         PMD_DRV_FUNC_TRACE();
343
344         RTE_SET_USED(skel);
345         RTE_SET_USED(f);
346 }
347
348
349 /* Initialize and register event driver with DPDK Application */
350 static const struct rte_eventdev_ops skeleton_eventdev_ops = {
351         .dev_infos_get    = skeleton_eventdev_info_get,
352         .dev_configure    = skeleton_eventdev_configure,
353         .dev_start        = skeleton_eventdev_start,
354         .dev_stop         = skeleton_eventdev_stop,
355         .dev_close        = skeleton_eventdev_close,
356         .queue_def_conf   = skeleton_eventdev_queue_def_conf,
357         .queue_setup      = skeleton_eventdev_queue_setup,
358         .queue_release    = skeleton_eventdev_queue_release,
359         .port_def_conf    = skeleton_eventdev_port_def_conf,
360         .port_setup       = skeleton_eventdev_port_setup,
361         .port_release     = skeleton_eventdev_port_release,
362         .port_link        = skeleton_eventdev_port_link,
363         .port_unlink      = skeleton_eventdev_port_unlink,
364         .timeout_ticks    = skeleton_eventdev_timeout_ticks,
365         .dump             = skeleton_eventdev_dump
366 };
367
368 static int
369 skeleton_eventdev_init(struct rte_eventdev *eventdev)
370 {
371         struct rte_pci_device *pci_dev;
372         struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
373         int ret = 0;
374
375         PMD_DRV_FUNC_TRACE();
376
377         eventdev->dev_ops       = &skeleton_eventdev_ops;
378         eventdev->schedule      = NULL;
379         eventdev->enqueue       = skeleton_eventdev_enqueue;
380         eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
381         eventdev->dequeue       = skeleton_eventdev_dequeue;
382         eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
383
384         /* For secondary processes, the primary has done all the work */
385         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
386                 return 0;
387
388         pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
389
390         skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
391         if (!skel->reg_base) {
392                 PMD_DRV_ERR("Failed to map BAR0");
393                 ret = -ENODEV;
394                 goto fail;
395         }
396
397         skel->device_id = pci_dev->id.device_id;
398         skel->vendor_id = pci_dev->id.vendor_id;
399         skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
400         skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
401
402         PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
403                         pci_dev->id.vendor_id, pci_dev->id.device_id,
404                         pci_dev->addr.domain, pci_dev->addr.bus,
405                         pci_dev->addr.devid, pci_dev->addr.function);
406
407         PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
408                 eventdev->data->dev_id, eventdev->data->socket_id,
409                 skel->vendor_id, skel->device_id);
410
411 fail:
412         return ret;
413 }
414
415 /* PCI based event device */
416
417 #define EVENTDEV_SKEL_VENDOR_ID         0x177d
418 #define EVENTDEV_SKEL_PRODUCT_ID        0x0001
419
420 static const struct rte_pci_id pci_id_skeleton_map[] = {
421         {
422                 RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
423                                EVENTDEV_SKEL_PRODUCT_ID)
424         },
425         {
426                 .vendor_id = 0,
427         },
428 };
429
430 static int
431 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
432                          struct rte_pci_device *pci_dev)
433 {
434         return rte_event_pmd_pci_probe(pci_drv, pci_dev,
435                 sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
436 }
437
438 static int
439 event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
440 {
441         return rte_event_pmd_pci_remove(pci_dev, NULL);
442 }
443
444 static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
445         .id_table = pci_id_skeleton_map,
446         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
447         .probe = event_skeleton_pci_probe,
448         .remove = event_skeleton_pci_remove,
449 };
450
451 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
452 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
453
454 /* VDEV based event device */
455
456 static int
457 skeleton_eventdev_create(const char *name, int socket_id)
458 {
459         struct rte_eventdev *eventdev;
460
461         eventdev = rte_event_pmd_vdev_init(name,
462                         sizeof(struct skeleton_eventdev), socket_id);
463         if (eventdev == NULL) {
464                 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
465                 goto fail;
466         }
467
468         eventdev->dev_ops       = &skeleton_eventdev_ops;
469         eventdev->schedule      = NULL;
470         eventdev->enqueue       = skeleton_eventdev_enqueue;
471         eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
472         eventdev->dequeue       = skeleton_eventdev_dequeue;
473         eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
474
475         return 0;
476 fail:
477         return -EFAULT;
478 }
479
480 static int
481 skeleton_eventdev_probe(struct rte_vdev_device *vdev)
482 {
483         const char *name;
484
485         name = rte_vdev_device_name(vdev);
486         RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
487                         rte_socket_id());
488         return skeleton_eventdev_create(name, rte_socket_id());
489 }
490
491 static int
492 skeleton_eventdev_remove(struct rte_vdev_device *vdev)
493 {
494         const char *name;
495
496         name = rte_vdev_device_name(vdev);
497         PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
498
499         return rte_event_pmd_vdev_uninit(name);
500 }
501
502 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
503         .probe = skeleton_eventdev_probe,
504         .remove = skeleton_eventdev_remove
505 };
506
507 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);