New upstream version 18.11.2
[deb_dpdk.git] / drivers / raw / dpaa2_qdma / dpaa2_qdma.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <string.h>
6
7 #include <rte_eal.h>
8 #include <rte_fslmc.h>
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
14 #include <rte_ring.h>
15 #include <rte_mempool.h>
16
17 #include <mc/fsl_dpdmai.h>
18 #include <portal/dpaa2_hw_pvt.h>
19 #include <portal/dpaa2_hw_dpio.h>
20
21 #include "dpaa2_qdma.h"
22 #include "dpaa2_qdma_logs.h"
23 #include "rte_pmd_dpaa2_qdma.h"
24
25 /* Dynamic log type identifier */
26 int dpaa2_qdma_logtype;
27
28 /* QDMA device */
29 static struct qdma_device qdma_dev;
30
31 /* QDMA H/W queues list */
32 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
33 static struct qdma_hw_queue_list qdma_queue_list
34         = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
35
36 /* QDMA Virtual Queues */
37 static struct qdma_virt_queue *qdma_vqs;
38
39 /* QDMA per core data */
40 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
41
42 static struct qdma_hw_queue *
43 alloc_hw_queue(uint32_t lcore_id)
44 {
45         struct qdma_hw_queue *queue = NULL;
46
47         DPAA2_QDMA_FUNC_TRACE();
48
49         /* Get a free queue from the list */
50         TAILQ_FOREACH(queue, &qdma_queue_list, next) {
51                 if (queue->num_users == 0) {
52                         queue->lcore_id = lcore_id;
53                         queue->num_users++;
54                         break;
55                 }
56         }
57
58         return queue;
59 }
60
61 static void
62 free_hw_queue(struct qdma_hw_queue *queue)
63 {
64         DPAA2_QDMA_FUNC_TRACE();
65
66         queue->num_users--;
67 }
68
69
70 static struct qdma_hw_queue *
71 get_hw_queue(uint32_t lcore_id)
72 {
73         struct qdma_per_core_info *core_info;
74         struct qdma_hw_queue *queue, *temp;
75         uint32_t least_num_users;
76         int num_hw_queues, i;
77
78         DPAA2_QDMA_FUNC_TRACE();
79
80         core_info = &qdma_core_info[lcore_id];
81         num_hw_queues = core_info->num_hw_queues;
82
83         /*
84          * Allocate a HW queue if there are less queues
85          * than maximum per core queues configured
86          */
87         if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
88                 queue = alloc_hw_queue(lcore_id);
89                 if (queue) {
90                         core_info->hw_queues[num_hw_queues] = queue;
91                         core_info->num_hw_queues++;
92                         return queue;
93                 }
94         }
95
96         queue = core_info->hw_queues[0];
97         /* In case there is no queue associated with the core return NULL */
98         if (!queue)
99                 return NULL;
100
101         /* Fetch the least loaded H/W queue */
102         least_num_users = core_info->hw_queues[0]->num_users;
103         for (i = 0; i < num_hw_queues; i++) {
104                 temp = core_info->hw_queues[i];
105                 if (temp->num_users < least_num_users)
106                         queue = temp;
107         }
108
109         if (queue)
110                 queue->num_users++;
111
112         return queue;
113 }
114
115 static void
116 put_hw_queue(struct qdma_hw_queue *queue)
117 {
118         struct qdma_per_core_info *core_info;
119         int lcore_id, num_hw_queues, i;
120
121         DPAA2_QDMA_FUNC_TRACE();
122
123         /*
124          * If this is the last user of the queue free it.
125          * Also remove it from QDMA core info.
126          */
127         if (queue->num_users == 1) {
128                 free_hw_queue(queue);
129
130                 /* Remove the physical queue from core info */
131                 lcore_id = queue->lcore_id;
132                 core_info = &qdma_core_info[lcore_id];
133                 num_hw_queues = core_info->num_hw_queues;
134                 for (i = 0; i < num_hw_queues; i++) {
135                         if (queue == core_info->hw_queues[i])
136                                 break;
137                 }
138                 for (; i < num_hw_queues - 1; i++)
139                         core_info->hw_queues[i] = core_info->hw_queues[i + 1];
140                 core_info->hw_queues[i] = NULL;
141         } else {
142                 queue->num_users--;
143         }
144 }
145
146 int __rte_experimental
147 rte_qdma_init(void)
148 {
149         DPAA2_QDMA_FUNC_TRACE();
150
151         rte_spinlock_init(&qdma_dev.lock);
152
153         return 0;
154 }
155
156 void __rte_experimental
157 rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
158 {
159         DPAA2_QDMA_FUNC_TRACE();
160
161         qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
162 }
163
164 int __rte_experimental
165 rte_qdma_reset(void)
166 {
167         struct qdma_hw_queue *queue;
168         int i;
169
170         DPAA2_QDMA_FUNC_TRACE();
171
172         /* In case QDMA device is not in stopped state, return -EBUSY */
173         if (qdma_dev.state == 1) {
174                 DPAA2_QDMA_ERR(
175                         "Device is in running state. Stop before reset.");
176                 return -EBUSY;
177         }
178
179         /* In case there are pending jobs on any VQ, return -EBUSY */
180         for (i = 0; i < qdma_dev.max_vqs; i++) {
181                 if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
182                     qdma_vqs[i].num_dequeues))
183                         DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
184                         return -EBUSY;
185         }
186
187         /* Reset HW queues */
188         TAILQ_FOREACH(queue, &qdma_queue_list, next)
189                 queue->num_users = 0;
190
191         /* Reset and free virtual queues */
192         for (i = 0; i < qdma_dev.max_vqs; i++) {
193                 if (qdma_vqs[i].status_ring)
194                         rte_ring_free(qdma_vqs[i].status_ring);
195         }
196         if (qdma_vqs)
197                 rte_free(qdma_vqs);
198         qdma_vqs = NULL;
199
200         /* Reset per core info */
201         memset(&qdma_core_info, 0,
202                 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
203
204         /* Free the FLE pool */
205         if (qdma_dev.fle_pool)
206                 rte_mempool_free(qdma_dev.fle_pool);
207
208         /* Reset QDMA device structure */
209         qdma_dev.mode = RTE_QDMA_MODE_HW;
210         qdma_dev.max_hw_queues_per_core = 0;
211         qdma_dev.fle_pool = NULL;
212         qdma_dev.fle_pool_count = 0;
213         qdma_dev.max_vqs = 0;
214
215         return 0;
216 }
217
218 int __rte_experimental
219 rte_qdma_configure(struct rte_qdma_config *qdma_config)
220 {
221         int ret;
222         char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
223
224         DPAA2_QDMA_FUNC_TRACE();
225
226         /* In case QDMA device is not in stopped state, return -EBUSY */
227         if (qdma_dev.state == 1) {
228                 DPAA2_QDMA_ERR(
229                         "Device is in running state. Stop before config.");
230                 return -1;
231         }
232
233         /* Reset the QDMA device */
234         ret = rte_qdma_reset();
235         if (ret) {
236                 DPAA2_QDMA_ERR("Resetting QDMA failed");
237                 return ret;
238         }
239
240         /* Set mode */
241         qdma_dev.mode = qdma_config->mode;
242
243         /* Set max HW queue per core */
244         if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
245                 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
246                                MAX_HW_QUEUE_PER_CORE);
247                 return -EINVAL;
248         }
249         qdma_dev.max_hw_queues_per_core =
250                 qdma_config->max_hw_queues_per_core;
251
252         /* Allocate Virtual Queues */
253         qdma_vqs = rte_malloc("qdma_virtual_queues",
254                         (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
255                         RTE_CACHE_LINE_SIZE);
256         if (!qdma_vqs) {
257                 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
258                 return -ENOMEM;
259         }
260         qdma_dev.max_vqs = qdma_config->max_vqs;
261
262         /* Allocate FLE pool; just append PID so that in case of
263          * multiprocess, the pool's don't collide.
264          */
265         snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
266                  getpid());
267         qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
268                         qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
269                         QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
270                         NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
271         if (!qdma_dev.fle_pool) {
272                 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
273                 rte_free(qdma_vqs);
274                 qdma_vqs = NULL;
275                 return -ENOMEM;
276         }
277         qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
278
279         return 0;
280 }
281
282 int __rte_experimental
283 rte_qdma_start(void)
284 {
285         DPAA2_QDMA_FUNC_TRACE();
286
287         qdma_dev.state = 1;
288
289         return 0;
290 }
291
292 int __rte_experimental
293 rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
294 {
295         char ring_name[32];
296         int i;
297
298         DPAA2_QDMA_FUNC_TRACE();
299
300         rte_spinlock_lock(&qdma_dev.lock);
301
302         /* Get a free Virtual Queue */
303         for (i = 0; i < qdma_dev.max_vqs; i++) {
304                 if (qdma_vqs[i].in_use == 0)
305                         break;
306         }
307
308         /* Return in case no VQ is free */
309         if (i == qdma_dev.max_vqs) {
310                 rte_spinlock_unlock(&qdma_dev.lock);
311                 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
312                 return -ENODEV;
313         }
314
315         if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
316                         (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
317                 /* Allocate HW queue for a VQ */
318                 qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
319                 qdma_vqs[i].exclusive_hw_queue = 1;
320         } else {
321                 /* Allocate a Ring for Virutal Queue in VQ mode */
322                 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
323                 qdma_vqs[i].status_ring = rte_ring_create(ring_name,
324                         qdma_dev.fle_pool_count, rte_socket_id(), 0);
325                 if (!qdma_vqs[i].status_ring) {
326                         DPAA2_QDMA_ERR("Status ring creation failed for vq");
327                         rte_spinlock_unlock(&qdma_dev.lock);
328                         return rte_errno;
329                 }
330
331                 /* Get a HW queue (shared) for a VQ */
332                 qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
333                 qdma_vqs[i].exclusive_hw_queue = 0;
334         }
335
336         if (qdma_vqs[i].hw_queue == NULL) {
337                 DPAA2_QDMA_ERR("No H/W queue available for VQ");
338                 if (qdma_vqs[i].status_ring)
339                         rte_ring_free(qdma_vqs[i].status_ring);
340                 qdma_vqs[i].status_ring = NULL;
341                 rte_spinlock_unlock(&qdma_dev.lock);
342                 return -ENODEV;
343         }
344
345         qdma_vqs[i].in_use = 1;
346         qdma_vqs[i].lcore_id = lcore_id;
347
348         rte_spinlock_unlock(&qdma_dev.lock);
349
350         return i;
351 }
352
353 static void
354 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
355                         uint64_t src, uint64_t dest,
356                         size_t len, uint32_t flags)
357 {
358         struct qdma_sdd *sdd;
359
360         DPAA2_QDMA_FUNC_TRACE();
361
362         sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
363                 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
364
365         /* first frame list to source descriptor */
366         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
367         DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
368
369         /* source and destination descriptor */
370         DPAA2_SET_SDD_RD_COHERENT(sdd); /* source descriptor CMD */
371         sdd++;
372         DPAA2_SET_SDD_WR_COHERENT(sdd); /* dest descriptor CMD */
373
374         fle++;
375         /* source frame list to source buffer */
376         if (flags & RTE_QDMA_JOB_SRC_PHY) {
377                 DPAA2_SET_FLE_ADDR(fle, src);
378                 DPAA2_SET_FLE_BMT(fle);
379         } else {
380                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
381         }
382         DPAA2_SET_FLE_LEN(fle, len);
383
384         fle++;
385         /* destination frame list to destination buffer */
386         if (flags & RTE_QDMA_JOB_DEST_PHY) {
387                 DPAA2_SET_FLE_BMT(fle);
388                 DPAA2_SET_FLE_ADDR(fle, dest);
389         } else {
390                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
391         }
392         DPAA2_SET_FLE_LEN(fle, len);
393
394         /* Final bit: 1, for last frame list */
395         DPAA2_SET_FLE_FIN(fle);
396 }
397
398 static int
399 dpdmai_dev_enqueue(struct dpaa2_dpdmai_dev *dpdmai_dev,
400                    uint16_t txq_id,
401                    uint16_t vq_id,
402                    struct rte_qdma_job *job)
403 {
404         struct qdma_io_meta *io_meta;
405         struct qbman_fd fd;
406         struct dpaa2_queue *txq;
407         struct qbman_fle *fle;
408         struct qbman_eq_desc eqdesc;
409         struct qbman_swp *swp;
410         int ret;
411
412         DPAA2_QDMA_FUNC_TRACE();
413
414         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
415                 ret = dpaa2_affine_qbman_swp();
416                 if (ret) {
417                         DPAA2_QDMA_ERR("Failure in affining portal");
418                         return 0;
419                 }
420         }
421         swp = DPAA2_PER_LCORE_PORTAL;
422
423         txq = &(dpdmai_dev->tx_queue[txq_id]);
424
425         /* Prepare enqueue descriptor */
426         qbman_eq_desc_clear(&eqdesc);
427         qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
428         qbman_eq_desc_set_no_orp(&eqdesc, 0);
429         qbman_eq_desc_set_response(&eqdesc, 0, 0);
430
431         /*
432          * Get an FLE/SDD from FLE pool.
433          * Note: IO metadata is before the FLE and SDD memory.
434          */
435         ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));
436         if (ret) {
437                 DPAA2_QDMA_DP_WARN("Memory alloc failed for FLE");
438                 return ret;
439         }
440
441         /* Set the metadata */
442         io_meta->cnxt = (size_t)job;
443         io_meta->id = vq_id;
444
445         fle = (struct qbman_fle *)(io_meta + 1);
446
447         /* populate Frame descriptor */
448         memset(&fd, 0, sizeof(struct qbman_fd));
449         DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(fle));
450         DPAA2_SET_FD_COMPOUND_FMT(&fd);
451         DPAA2_SET_FD_FRC(&fd, QDMA_SER_CTX);
452
453         /* Populate FLE */
454         memset(fle, 0, QDMA_FLE_POOL_SIZE);
455         dpaa2_qdma_populate_fle(fle, job->src, job->dest, job->len, job->flags);
456
457         /* Enqueue the packet to the QBMAN */
458         do {
459                 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
460                 if (ret < 0 && ret != -EBUSY)
461                         DPAA2_QDMA_ERR("Transmit failure with err: %d", ret);
462         } while (ret == -EBUSY);
463
464         DPAA2_QDMA_DP_DEBUG("Successfully transmitted a packet");
465
466         return ret;
467 }
468
469 int __rte_experimental
470 rte_qdma_vq_enqueue_multi(uint16_t vq_id,
471                           struct rte_qdma_job **job,
472                           uint16_t nb_jobs)
473 {
474         int i, ret;
475
476         DPAA2_QDMA_FUNC_TRACE();
477
478         for (i = 0; i < nb_jobs; i++) {
479                 ret = rte_qdma_vq_enqueue(vq_id, job[i]);
480                 if (ret < 0)
481                         break;
482         }
483
484         return i;
485 }
486
487 int __rte_experimental
488 rte_qdma_vq_enqueue(uint16_t vq_id,
489                     struct rte_qdma_job *job)
490 {
491         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
492         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
493         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
494         int ret;
495
496         DPAA2_QDMA_FUNC_TRACE();
497
498         /* Return error in case of wrong lcore_id */
499         if (rte_lcore_id() != qdma_vq->lcore_id) {
500                 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
501                                 vq_id);
502                 return -EINVAL;
503         }
504
505         ret = dpdmai_dev_enqueue(dpdmai_dev, qdma_pq->queue_id, vq_id, job);
506         if (ret < 0) {
507                 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
508                 return ret;
509         }
510
511         qdma_vq->num_enqueues++;
512
513         return 1;
514 }
515
516 /* Function to receive a QDMA job for a given device and queue*/
517 static int
518 dpdmai_dev_dequeue(struct dpaa2_dpdmai_dev *dpdmai_dev,
519                    uint16_t rxq_id,
520                    uint16_t *vq_id,
521                    struct rte_qdma_job **job)
522 {
523         struct qdma_io_meta *io_meta;
524         struct dpaa2_queue *rxq;
525         struct qbman_result *dq_storage;
526         struct qbman_pull_desc pulldesc;
527         const struct qbman_fd *fd;
528         struct qbman_swp *swp;
529         struct qbman_fle *fle;
530         uint32_t fqid;
531         uint8_t status;
532         int ret;
533
534         DPAA2_QDMA_FUNC_TRACE();
535
536         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
537                 ret = dpaa2_affine_qbman_swp();
538                 if (ret) {
539                         DPAA2_QDMA_ERR("Failure in affining portal");
540                         return 0;
541                 }
542         }
543         swp = DPAA2_PER_LCORE_PORTAL;
544
545         rxq = &(dpdmai_dev->rx_queue[rxq_id]);
546         dq_storage = rxq->q_storage->dq_storage[0];
547         fqid = rxq->fqid;
548
549         /* Prepare dequeue descriptor */
550         qbman_pull_desc_clear(&pulldesc);
551         qbman_pull_desc_set_fq(&pulldesc, fqid);
552         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
553                 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
554         qbman_pull_desc_set_numframes(&pulldesc, 1);
555
556         while (1) {
557                 if (qbman_swp_pull(swp, &pulldesc)) {
558                         DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
559                         continue;
560                 }
561                 break;
562         }
563
564         /* Check if previous issued command is completed. */
565         while (!qbman_check_command_complete(dq_storage))
566                 ;
567         /* Loop until dq_storage is updated with new token by QBMAN */
568         while (!qbman_check_new_result(dq_storage))
569                 ;
570
571         /* Check for valid frame. */
572         status = qbman_result_DQ_flags(dq_storage);
573         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
574                 DPAA2_QDMA_DP_DEBUG("No frame is delivered");
575                 return 0;
576         }
577
578         /* Get the FD */
579         fd = qbman_result_DQ_fd(dq_storage);
580
581         /*
582          * Fetch metadata from FLE. job and vq_id were set
583          * in metadata in the enqueue operation.
584          */
585         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
586         io_meta = (struct qdma_io_meta *)(fle) - 1;
587         if (vq_id)
588                 *vq_id = io_meta->id;
589
590         *job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
591         (*job)->status = DPAA2_GET_FD_ERR(fd);
592
593         /* Free FLE to the pool */
594         rte_mempool_put(qdma_dev.fle_pool, io_meta);
595
596         DPAA2_QDMA_DP_DEBUG("packet received");
597
598         return 1;
599 }
600
601 int __rte_experimental
602 rte_qdma_vq_dequeue_multi(uint16_t vq_id,
603                           struct rte_qdma_job **job,
604                           uint16_t nb_jobs)
605 {
606         int i;
607
608         DPAA2_QDMA_FUNC_TRACE();
609
610         for (i = 0; i < nb_jobs; i++) {
611                 job[i] = rte_qdma_vq_dequeue(vq_id);
612                 if (!job[i])
613                         break;
614         }
615
616         return i;
617 }
618
619 struct rte_qdma_job * __rte_experimental
620 rte_qdma_vq_dequeue(uint16_t vq_id)
621 {
622         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
623         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
624         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
625         struct rte_qdma_job *job = NULL;
626         struct qdma_virt_queue *temp_qdma_vq;
627         int dequeue_budget = QDMA_DEQUEUE_BUDGET;
628         int ring_count, ret, i;
629         uint16_t temp_vq_id;
630
631         DPAA2_QDMA_FUNC_TRACE();
632
633         /* Return error in case of wrong lcore_id */
634         if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
635                 DPAA2_QDMA_ERR("QDMA dequeue for vqid %d on wrong core",
636                                 vq_id);
637                 return NULL;
638         }
639
640         /* Only dequeue when there are pending jobs on VQ */
641         if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
642                 return NULL;
643
644         if (qdma_vq->exclusive_hw_queue) {
645                 /* In case of exclusive queue directly fetch from HW queue */
646                 ret = dpdmai_dev_dequeue(dpdmai_dev, qdma_pq->queue_id,
647                                          NULL, &job);
648                 if (ret < 0) {
649                         DPAA2_QDMA_ERR(
650                                 "Dequeue from DPDMAI device failed: %d", ret);
651                         return NULL;
652                 }
653         } else {
654                 /*
655                  * Get the QDMA completed jobs from the software ring.
656                  * In case they are not available on the ring poke the HW
657                  * to fetch completed jobs from corresponding HW queues
658                  */
659                 ring_count = rte_ring_count(qdma_vq->status_ring);
660                 if (ring_count == 0) {
661                         /* TODO - How to have right budget */
662                         for (i = 0; i < dequeue_budget; i++) {
663                                 ret = dpdmai_dev_dequeue(dpdmai_dev,
664                                         qdma_pq->queue_id, &temp_vq_id, &job);
665                                 if (ret == 0)
666                                         break;
667                                 temp_qdma_vq = &qdma_vqs[temp_vq_id];
668                                 rte_ring_enqueue(temp_qdma_vq->status_ring,
669                                         (void *)(job));
670                                 ring_count = rte_ring_count(
671                                         qdma_vq->status_ring);
672                                 if (ring_count)
673                                         break;
674                         }
675                 }
676
677                 /* Dequeue job from the software ring to provide to the user */
678                 rte_ring_dequeue(qdma_vq->status_ring, (void **)&job);
679                 if (job)
680                         qdma_vq->num_dequeues++;
681         }
682
683         return job;
684 }
685
686 void __rte_experimental
687 rte_qdma_vq_stats(uint16_t vq_id,
688                   struct rte_qdma_vq_stats *vq_status)
689 {
690         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
691
692         DPAA2_QDMA_FUNC_TRACE();
693
694         if (qdma_vq->in_use) {
695                 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
696                 vq_status->lcore_id = qdma_vq->lcore_id;
697                 vq_status->num_enqueues = qdma_vq->num_enqueues;
698                 vq_status->num_dequeues = qdma_vq->num_dequeues;
699                 vq_status->num_pending_jobs = vq_status->num_enqueues -
700                                 vq_status->num_dequeues;
701         }
702 }
703
704 int __rte_experimental
705 rte_qdma_vq_destroy(uint16_t vq_id)
706 {
707         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
708
709         DPAA2_QDMA_FUNC_TRACE();
710
711         /* In case there are pending jobs on any VQ, return -EBUSY */
712         if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
713                 return -EBUSY;
714
715         rte_spinlock_lock(&qdma_dev.lock);
716
717         if (qdma_vq->exclusive_hw_queue)
718                 free_hw_queue(qdma_vq->hw_queue);
719         else {
720                 if (qdma_vqs->status_ring)
721                         rte_ring_free(qdma_vqs->status_ring);
722
723                 put_hw_queue(qdma_vq->hw_queue);
724         }
725
726         memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
727
728         rte_spinlock_unlock(&qdma_dev.lock);
729
730         return 0;
731 }
732
733 void __rte_experimental
734 rte_qdma_stop(void)
735 {
736         DPAA2_QDMA_FUNC_TRACE();
737
738         qdma_dev.state = 0;
739 }
740
741 void __rte_experimental
742 rte_qdma_destroy(void)
743 {
744         DPAA2_QDMA_FUNC_TRACE();
745
746         rte_qdma_reset();
747 }
748
749 static const struct rte_rawdev_ops dpaa2_qdma_ops;
750
751 static int
752 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
753 {
754         struct qdma_hw_queue *queue;
755         int i;
756
757         DPAA2_QDMA_FUNC_TRACE();
758
759         for (i = 0; i < dpdmai_dev->num_queues; i++) {
760                 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
761                 if (!queue) {
762                         DPAA2_QDMA_ERR(
763                                 "Memory allocation failed for QDMA queue");
764                         return -ENOMEM;
765                 }
766
767                 queue->dpdmai_dev = dpdmai_dev;
768                 queue->queue_id = i;
769
770                 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
771                 qdma_dev.num_hw_queues++;
772         }
773
774         return 0;
775 }
776
777 static void
778 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
779 {
780         struct qdma_hw_queue *queue = NULL;
781         struct qdma_hw_queue *tqueue = NULL;
782
783         DPAA2_QDMA_FUNC_TRACE();
784
785         TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
786                 if (queue->dpdmai_dev == dpdmai_dev) {
787                         TAILQ_REMOVE(&qdma_queue_list, queue, next);
788                         rte_free(queue);
789                         queue = NULL;
790                 }
791         }
792 }
793
794 static int
795 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
796 {
797         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
798         int ret, i;
799
800         DPAA2_QDMA_FUNC_TRACE();
801
802         /* Remove HW queues from global list */
803         remove_hw_queues_from_list(dpdmai_dev);
804
805         ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
806                              dpdmai_dev->token);
807         if (ret)
808                 DPAA2_QDMA_ERR("dmdmai disable failed");
809
810         /* Set up the DQRR storage for Rx */
811         for (i = 0; i < dpdmai_dev->num_queues; i++) {
812                 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
813
814                 if (rxq->q_storage) {
815                         dpaa2_free_dq_storage(rxq->q_storage);
816                         rte_free(rxq->q_storage);
817                 }
818         }
819
820         /* Close the device at underlying layer*/
821         ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
822         if (ret)
823                 DPAA2_QDMA_ERR("Failure closing dpdmai device");
824
825         return 0;
826 }
827
828 static int
829 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
830 {
831         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
832         struct dpdmai_rx_queue_cfg rx_queue_cfg;
833         struct dpdmai_attr attr;
834         struct dpdmai_rx_queue_attr rx_attr;
835         struct dpdmai_tx_queue_attr tx_attr;
836         int ret, i;
837
838         DPAA2_QDMA_FUNC_TRACE();
839
840         /* Open DPDMAI device */
841         dpdmai_dev->dpdmai_id = dpdmai_id;
842         dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
843         ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
844                           dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
845         if (ret) {
846                 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
847                 return ret;
848         }
849
850         /* Get DPDMAI attributes */
851         ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
852                                     dpdmai_dev->token, &attr);
853         if (ret) {
854                 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
855                                ret);
856                 goto init_err;
857         }
858         dpdmai_dev->num_queues = attr.num_of_queues;
859
860         /* Set up Rx Queues */
861         for (i = 0; i < dpdmai_dev->num_queues; i++) {
862                 struct dpaa2_queue *rxq;
863
864                 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
865                 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
866                                           CMD_PRI_LOW,
867                                           dpdmai_dev->token,
868                                           i, 0, &rx_queue_cfg);
869                 if (ret) {
870                         DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
871                                        ret);
872                         goto init_err;
873                 }
874
875                 /* Allocate DQ storage for the DPDMAI Rx queues */
876                 rxq = &(dpdmai_dev->rx_queue[i]);
877                 rxq->q_storage = rte_malloc("dq_storage",
878                                             sizeof(struct queue_storage_info_t),
879                                             RTE_CACHE_LINE_SIZE);
880                 if (!rxq->q_storage) {
881                         DPAA2_QDMA_ERR("q_storage allocation failed");
882                         ret = -ENOMEM;
883                         goto init_err;
884                 }
885
886                 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
887                 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
888                 if (ret) {
889                         DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
890                         goto init_err;
891                 }
892         }
893
894         /* Get Rx and Tx queues FQID's */
895         for (i = 0; i < dpdmai_dev->num_queues; i++) {
896                 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
897                                           dpdmai_dev->token, i, 0, &rx_attr);
898                 if (ret) {
899                         DPAA2_QDMA_ERR("Reading device failed with err: %d",
900                                        ret);
901                         goto init_err;
902                 }
903                 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
904
905                 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
906                                           dpdmai_dev->token, i, 0, &tx_attr);
907                 if (ret) {
908                         DPAA2_QDMA_ERR("Reading device failed with err: %d",
909                                        ret);
910                         goto init_err;
911                 }
912                 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
913         }
914
915         /* Enable the device */
916         ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
917                             dpdmai_dev->token);
918         if (ret) {
919                 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
920                 goto init_err;
921         }
922
923         /* Add the HW queue to the global list */
924         ret = add_hw_queues_to_list(dpdmai_dev);
925         if (ret) {
926                 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
927                 goto init_err;
928         }
929         DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
930
931         return 0;
932 init_err:
933         dpaa2_dpdmai_dev_uninit(rawdev);
934         return ret;
935 }
936
937 static int
938 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
939                      struct rte_dpaa2_device *dpaa2_dev)
940 {
941         struct rte_rawdev *rawdev;
942         int ret;
943
944         DPAA2_QDMA_FUNC_TRACE();
945
946         rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
947                         sizeof(struct dpaa2_dpdmai_dev),
948                         rte_socket_id());
949         if (!rawdev) {
950                 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
951                 return -EINVAL;
952         }
953
954         dpaa2_dev->rawdev = rawdev;
955         rawdev->dev_ops = &dpaa2_qdma_ops;
956         rawdev->device = &dpaa2_dev->device;
957         rawdev->driver_name = dpaa2_drv->driver.name;
958
959         /* Invoke PMD device initialization function */
960         ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
961         if (ret) {
962                 rte_rawdev_pmd_release(rawdev);
963                 return ret;
964         }
965
966         return 0;
967 }
968
969 static int
970 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
971 {
972         struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
973         int ret;
974
975         DPAA2_QDMA_FUNC_TRACE();
976
977         dpaa2_dpdmai_dev_uninit(rawdev);
978
979         ret = rte_rawdev_pmd_release(rawdev);
980         if (ret)
981                 DPAA2_QDMA_ERR("Device cleanup failed");
982
983         return 0;
984 }
985
986 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
987         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
988         .drv_type = DPAA2_QDMA,
989         .probe = rte_dpaa2_qdma_probe,
990         .remove = rte_dpaa2_qdma_remove,
991 };
992
993 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
994
995 RTE_INIT(dpaa2_qdma_init_log)
996 {
997         dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
998         if (dpaa2_qdma_logtype >= 0)
999                 rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);
1000 }