New upstream version 18.02
[deb_dpdk.git] / drivers / bus / dpaa / dpaa_bus.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017 NXP
4  *
5  */
6 /* System headers */
7 #include <stdio.h>
8 #include <inttypes.h>
9 #include <unistd.h>
10 #include <limits.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <pthread.h>
14 #include <sys/types.h>
15 #include <sys/syscall.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_malloc.h>
32 #include <rte_ring.h>
33 #include <rte_bus.h>
34 #include <rte_mbuf_pool_ops.h>
35
36 #include <rte_dpaa_bus.h>
37 #include <rte_dpaa_logs.h>
38
39 #include <fsl_usd.h>
40 #include <fsl_qman.h>
41 #include <fsl_bman.h>
42 #include <of.h>
43 #include <netcfg.h>
44
45 int dpaa_logtype_bus;
46 int dpaa_logtype_mempool;
47 int dpaa_logtype_pmd;
48 int dpaa_logtype_eventdev;
49
50 struct rte_dpaa_bus rte_dpaa_bus;
51 struct netcfg_info *dpaa_netcfg;
52
53 /* define a variable to hold the portal_key, once created.*/
54 pthread_key_t dpaa_portal_key;
55
56 unsigned int dpaa_svr_family;
57
58 RTE_DEFINE_PER_LCORE(bool, dpaa_io);
59 RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
60
61 static int
62 compare_dpaa_devices(struct rte_dpaa_device *dev1,
63                      struct rte_dpaa_device *dev2)
64 {
65         int comp = 0;
66
67         /* Segragating ETH from SEC devices */
68         if (dev1->device_type > dev2->device_type)
69                 comp = 1;
70         else if (dev1->device_type < dev2->device_type)
71                 comp = -1;
72         else
73                 comp = 0;
74
75         if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
76                 return comp;
77
78         if (dev1->id.fman_id > dev2->id.fman_id) {
79                 comp = 1;
80         } else if (dev1->id.fman_id < dev2->id.fman_id) {
81                 comp = -1;
82         } else {
83                 /* FMAN ids match, check for mac_id */
84                 if (dev1->id.mac_id > dev2->id.mac_id)
85                         comp = 1;
86                 else if (dev1->id.mac_id < dev2->id.mac_id)
87                         comp = -1;
88                 else
89                         comp = 0;
90         }
91
92         return comp;
93 }
94
95 static inline void
96 dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
97 {
98         int comp, inserted = 0;
99         struct rte_dpaa_device *dev = NULL;
100         struct rte_dpaa_device *tdev = NULL;
101
102         TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
103                 comp = compare_dpaa_devices(newdev, dev);
104                 if (comp < 0) {
105                         TAILQ_INSERT_BEFORE(dev, newdev, next);
106                         inserted = 1;
107                         break;
108                 }
109         }
110
111         if (!inserted)
112                 TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
113 }
114
115 /*
116  * Reads the SEC device from DTS
117  * Returns -1 if SEC devices not available, 0 otherwise
118  */
119 static inline int
120 dpaa_sec_available(void)
121 {
122         const struct device_node *caam_node;
123
124         for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
125                 return 0;
126         }
127
128         return -1;
129 }
130
131 static void dpaa_clean_device_list(void);
132
133 static int
134 dpaa_create_device_list(void)
135 {
136         int i;
137         int ret;
138         struct rte_dpaa_device *dev;
139         struct fm_eth_port_cfg *cfg;
140         struct fman_if *fman_intf;
141
142         /* Creating Ethernet Devices */
143         for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
144                 dev = calloc(1, sizeof(struct rte_dpaa_device));
145                 if (!dev) {
146                         DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
147                         ret = -ENOMEM;
148                         goto cleanup;
149                 }
150
151                 cfg = &dpaa_netcfg->port_cfg[i];
152                 fman_intf = cfg->fman_if;
153
154                 /* Device identifiers */
155                 dev->id.fman_id = fman_intf->fman_idx + 1;
156                 dev->id.mac_id = fman_intf->mac_idx;
157                 dev->device_type = FSL_DPAA_ETH;
158                 dev->id.dev_id = i;
159
160                 /* Create device name */
161                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
162                 sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
163                         fman_intf->mac_idx);
164                 DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
165                 dev->device.name = dev->name;
166
167                 dpaa_add_to_device_list(dev);
168         }
169
170         rte_dpaa_bus.device_count = i;
171
172         /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
173          * constantly created only if "sec" property is found in the device
174          * tree. Logically there is no limit for number of devices (QI
175          * interfaces) that can be created.
176          */
177
178         if (dpaa_sec_available()) {
179                 DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
180                 return 0;
181         }
182
183         /* Creating SEC Devices */
184         for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
185                 dev = calloc(1, sizeof(struct rte_dpaa_device));
186                 if (!dev) {
187                         DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
188                         ret = -1;
189                         goto cleanup;
190                 }
191
192                 dev->device_type = FSL_DPAA_CRYPTO;
193                 dev->id.dev_id = rte_dpaa_bus.device_count + i;
194
195                 /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
196                  * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
197                  * allocated for dev->name/
198                  */
199                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
200                 sprintf(dev->name, "dpaa-sec%d", i);
201                 DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
202
203                 dpaa_add_to_device_list(dev);
204         }
205
206         rte_dpaa_bus.device_count += i;
207
208         return 0;
209
210 cleanup:
211         dpaa_clean_device_list();
212         return ret;
213 }
214
215 static void
216 dpaa_clean_device_list(void)
217 {
218         struct rte_dpaa_device *dev = NULL;
219         struct rte_dpaa_device *tdev = NULL;
220
221         TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
222                 TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
223                 free(dev);
224                 dev = NULL;
225         }
226 }
227
228 int rte_dpaa_portal_init(void *arg)
229 {
230         cpu_set_t cpuset;
231         pthread_t id;
232         uint32_t cpu = rte_lcore_id();
233         int ret;
234         struct dpaa_portal *dpaa_io_portal;
235
236         BUS_INIT_FUNC_TRACE();
237
238         if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY)
239                 cpu = rte_get_master_lcore();
240         /* if the core id is not supported */
241         else
242                 if (cpu >= RTE_MAX_LCORE)
243                         return -1;
244
245         /* Set CPU affinity for this thread */
246         CPU_ZERO(&cpuset);
247         CPU_SET(cpu, &cpuset);
248         id = pthread_self();
249         ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
250         if (ret) {
251                 DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on "
252                         "core :%d with ret: %d", cpu, ret);
253                 return ret;
254         }
255
256         /* Initialise bman thread portals */
257         ret = bman_thread_init();
258         if (ret) {
259                 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
260                         "core %d with ret: %d", cpu, ret);
261                 return ret;
262         }
263
264         DPAA_BUS_LOG(DEBUG, "BMAN thread initialized");
265
266         /* Initialise qman thread portals */
267         ret = qman_thread_init();
268         if (ret) {
269                 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
270                         "core %d with ret: %d", cpu, ret);
271                 bman_thread_finish();
272                 return ret;
273         }
274
275         DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
276
277         dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
278                                     RTE_CACHE_LINE_SIZE);
279         if (!dpaa_io_portal) {
280                 DPAA_BUS_LOG(ERR, "Unable to allocate memory");
281                 bman_thread_finish();
282                 qman_thread_finish();
283                 return -ENOMEM;
284         }
285
286         dpaa_io_portal->qman_idx = qman_get_portal_index();
287         dpaa_io_portal->bman_idx = bman_get_portal_index();
288         dpaa_io_portal->tid = syscall(SYS_gettid);
289
290         ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
291         if (ret) {
292                 DPAA_BUS_LOG(ERR, "pthread_setspecific failed on "
293                             "core %d with ret: %d", cpu, ret);
294                 dpaa_portal_finish(NULL);
295
296                 return ret;
297         }
298
299         RTE_PER_LCORE(dpaa_io) = true;
300
301         DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
302
303         return 0;
304 }
305
306 int
307 rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
308 {
309         /* Affine above created portal with channel*/
310         u32 sdqcr;
311         struct qman_portal *qp;
312
313         if (unlikely(!RTE_PER_LCORE(dpaa_io)))
314                 rte_dpaa_portal_init(arg);
315
316         /* Initialise qman specific portals */
317         qp = fsl_qman_portal_create();
318         if (!qp) {
319                 DPAA_BUS_LOG(ERR, "Unable to alloc fq portal");
320                 return -1;
321         }
322         fq->qp = qp;
323         sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
324         qman_static_dequeue_add(sdqcr, qp);
325
326         return 0;
327 }
328
329 int rte_dpaa_portal_fq_close(struct qman_fq *fq)
330 {
331         return fsl_qman_portal_destroy(fq->qp);
332 }
333
334 void
335 dpaa_portal_finish(void *arg)
336 {
337         struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
338
339         if (!dpaa_io_portal) {
340                 DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
341                 return;
342         }
343
344         bman_thread_finish();
345         qman_thread_finish();
346
347         pthread_setspecific(dpaa_portal_key, NULL);
348
349         rte_free(dpaa_io_portal);
350         dpaa_io_portal = NULL;
351
352         RTE_PER_LCORE(dpaa_io) = false;
353 }
354
355 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
356 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
357
358 static int
359 rte_dpaa_bus_scan(void)
360 {
361         int ret;
362
363         BUS_INIT_FUNC_TRACE();
364
365         if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
366             (access(DPAA_DEV_PATH2, F_OK) != 0)) {
367                 RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
368                 return 0;
369         }
370
371         /* Load the device-tree driver */
372         ret = of_init();
373         if (ret) {
374                 DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
375                 return -1;
376         }
377
378         /* Get the interface configurations from device-tree */
379         dpaa_netcfg = netcfg_acquire();
380         if (!dpaa_netcfg) {
381                 DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
382                 return -EINVAL;
383         }
384
385         RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
386
387         if (!dpaa_netcfg->num_ethports) {
388                 DPAA_BUS_LOG(INFO, "no network interfaces available");
389                 /* This is not an error */
390                 return 0;
391         }
392
393         DPAA_BUS_LOG(DEBUG, "Bus: Address of netcfg=%p, Ethports=%d",
394                      dpaa_netcfg, dpaa_netcfg->num_ethports);
395
396 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
397         dump_netcfg(dpaa_netcfg);
398 #endif
399
400         DPAA_BUS_LOG(DEBUG, "Number of devices = %d\n",
401                      dpaa_netcfg->num_ethports);
402         ret = dpaa_create_device_list();
403         if (ret) {
404                 DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
405                 return ret;
406         }
407
408         /* create the key, supplying a function that'll be invoked
409          * when a portal affined thread will be deleted.
410          */
411         ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
412         if (ret) {
413                 DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
414                 dpaa_clean_device_list();
415                 return ret;
416         }
417
418         DPAA_BUS_LOG(DEBUG, "dpaa_portal_key=%u, ret=%d\n",
419                     (unsigned int)dpaa_portal_key, ret);
420
421         return 0;
422 }
423
424 /* register a dpaa bus based dpaa driver */
425 void
426 rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
427 {
428         RTE_VERIFY(driver);
429
430         BUS_INIT_FUNC_TRACE();
431
432         TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
433         /* Update Bus references */
434         driver->dpaa_bus = &rte_dpaa_bus;
435 }
436
437 /* un-register a dpaa bus based dpaa driver */
438 void
439 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
440 {
441         struct rte_dpaa_bus *dpaa_bus;
442
443         BUS_INIT_FUNC_TRACE();
444
445         dpaa_bus = driver->dpaa_bus;
446
447         TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
448         /* Update Bus references */
449         driver->dpaa_bus = NULL;
450 }
451
452 static int
453 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
454                       struct rte_dpaa_device *dev)
455 {
456         int ret = -1;
457
458         BUS_INIT_FUNC_TRACE();
459
460         if (!drv || !dev) {
461                 DPAA_BUS_DEBUG("Invalid drv or dev received.");
462                 return ret;
463         }
464
465         if (drv->drv_type == dev->device_type) {
466                 DPAA_BUS_INFO("Device: %s matches for driver: %s",
467                               dev->name, drv->driver.name);
468                 ret = 0; /* Found a match */
469         }
470
471         return ret;
472 }
473
474 static int
475 rte_dpaa_bus_probe(void)
476 {
477         int ret = -1;
478         struct rte_dpaa_device *dev;
479         struct rte_dpaa_driver *drv;
480         FILE *svr_file = NULL;
481         unsigned int svr_ver;
482
483         BUS_INIT_FUNC_TRACE();
484
485         /* For each registered driver, and device, call the driver->probe */
486         TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
487                 TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
488                         ret = rte_dpaa_device_match(drv, dev);
489                         if (ret)
490                                 continue;
491
492                         if (!drv->probe)
493                                 continue;
494
495                         ret = drv->probe(drv, dev);
496                         if (ret)
497                                 DPAA_BUS_ERR("Unable to probe.\n");
498
499                         break;
500                 }
501         }
502
503         /* Register DPAA mempool ops only if any DPAA device has
504          * been detected.
505          */
506         if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list))
507                 rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
508
509         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
510         if (svr_file) {
511                 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
512                         dpaa_svr_family = svr_ver & SVR_MASK;
513                 fclose(svr_file);
514         }
515
516         return 0;
517 }
518
519 static struct rte_device *
520 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
521                      const void *data)
522 {
523         struct rte_dpaa_device *dev;
524
525         TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
526                 if (start && &dev->device == start) {
527                         start = NULL;  /* starting point found */
528                         continue;
529                 }
530
531                 if (cmp(&dev->device, data) == 0)
532                         return &dev->device;
533         }
534
535         return NULL;
536 }
537
538 /*
539  * Get iommu class of DPAA2 devices on the bus.
540  */
541 static enum rte_iova_mode
542 rte_dpaa_get_iommu_class(void)
543 {
544         if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
545             (access(DPAA_DEV_PATH2, F_OK) != 0)) {
546                 return RTE_IOVA_DC;
547         }
548         return RTE_IOVA_PA;
549 }
550
551 struct rte_dpaa_bus rte_dpaa_bus = {
552         .bus = {
553                 .scan = rte_dpaa_bus_scan,
554                 .probe = rte_dpaa_bus_probe,
555                 .find_device = rte_dpaa_find_device,
556                 .get_iommu_class = rte_dpaa_get_iommu_class,
557         },
558         .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
559         .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
560         .device_count = 0,
561 };
562
563 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
564
565 RTE_INIT(dpaa_init_log);
566 static void
567 dpaa_init_log(void)
568 {
569         dpaa_logtype_bus = rte_log_register("bus.dpaa");
570         if (dpaa_logtype_bus >= 0)
571                 rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
572
573         dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
574         if (dpaa_logtype_mempool >= 0)
575                 rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
576
577         dpaa_logtype_pmd = rte_log_register("pmd.dpaa");
578         if (dpaa_logtype_pmd >= 0)
579                 rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
580
581         dpaa_logtype_eventdev = rte_log_register("eventdev.dpaa");
582         if (dpaa_logtype_eventdev >= 0)
583                 rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);
584 }