New upstream version 18.02
[deb_dpdk.git] / drivers / mempool / dpaa2 / dpaa2_hw_mempool.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <sys/types.h>
11 #include <string.h>
12 #include <stdlib.h>
13 #include <fcntl.h>
14 #include <errno.h>
15
16 #include <rte_mbuf.h>
17 #include <rte_ethdev_driver.h>
18 #include <rte_malloc.h>
19 #include <rte_memcpy.h>
20 #include <rte_string_fns.h>
21 #include <rte_cycles.h>
22 #include <rte_kvargs.h>
23 #include <rte_dev.h>
24
25 #include <fslmc_logs.h>
26 #include <mc/fsl_dpbp.h>
27 #include <portal/dpaa2_hw_pvt.h>
28 #include <portal/dpaa2_hw_dpio.h>
29 #include "dpaa2_hw_mempool.h"
30
31 struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
32 static struct dpaa2_bp_list *h_bp_list;
33
34 static int
35 rte_hw_mbuf_create_pool(struct rte_mempool *mp)
36 {
37         struct dpaa2_bp_list *bp_list;
38         struct dpaa2_dpbp_dev *avail_dpbp;
39         struct dpaa2_bp_info *bp_info;
40         struct dpbp_attr dpbp_attr;
41         uint32_t bpid;
42         int ret;
43
44         avail_dpbp = dpaa2_alloc_dpbp_dev();
45
46         if (!avail_dpbp) {
47                 PMD_DRV_LOG(ERR, "DPAA2 resources not available");
48                 return -ENOENT;
49         }
50
51         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
52                 ret = dpaa2_affine_qbman_swp();
53                 if (ret) {
54                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
55                         goto err1;
56                 }
57         }
58
59         ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
60         if (ret != 0) {
61                 PMD_INIT_LOG(ERR, "Resource enable failure with"
62                         " err code: %d\n", ret);
63                 goto err1;
64         }
65
66         ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
67                                   avail_dpbp->token, &dpbp_attr);
68         if (ret != 0) {
69                 PMD_INIT_LOG(ERR, "Resource read failure with"
70                              " err code: %d\n", ret);
71                 goto err2;
72         }
73
74         bp_info = rte_malloc(NULL,
75                              sizeof(struct dpaa2_bp_info),
76                              RTE_CACHE_LINE_SIZE);
77         if (!bp_info) {
78                 PMD_INIT_LOG(ERR, "No heap memory available for bp_info");
79                 ret = -ENOMEM;
80                 goto err2;
81         }
82
83         /* Allocate the bp_list which will be added into global_bp_list */
84         bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
85                              RTE_CACHE_LINE_SIZE);
86         if (!bp_list) {
87                 PMD_INIT_LOG(ERR, "No heap memory available");
88                 ret = -ENOMEM;
89                 goto err3;
90         }
91
92         /* Set parameters of buffer pool list */
93         bp_list->buf_pool.num_bufs = mp->size;
94         bp_list->buf_pool.size = mp->elt_size
95                         - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
96         bp_list->buf_pool.bpid = dpbp_attr.bpid;
97         bp_list->buf_pool.h_bpool_mem = NULL;
98         bp_list->buf_pool.dpbp_node = avail_dpbp;
99         /* Identification for our offloaded pool_data structure */
100         bp_list->dpaa2_ops_index = mp->ops_index;
101         bp_list->next = h_bp_list;
102         bp_list->mp = mp;
103
104         bpid = dpbp_attr.bpid;
105
106         rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
107                                 + rte_pktmbuf_priv_size(mp);
108         rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
109         rte_dpaa2_bpid_info[bpid].bpid = bpid;
110
111         rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
112                    sizeof(struct dpaa2_bp_info));
113         mp->pool_data = (void *)bp_info;
114
115         PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
116
117         h_bp_list = bp_list;
118         return 0;
119 err3:
120         rte_free(bp_info);
121 err2:
122         dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
123 err1:
124         dpaa2_free_dpbp_dev(avail_dpbp);
125
126         return ret;
127 }
128
129 static void
130 rte_hw_mbuf_free_pool(struct rte_mempool *mp)
131 {
132         struct dpaa2_bp_info *bpinfo;
133         struct dpaa2_bp_list *bp;
134         struct dpaa2_dpbp_dev *dpbp_node;
135
136         if (!mp->pool_data) {
137                 PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool");
138                 return;
139         }
140
141         bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
142         bp = bpinfo->bp_list;
143         dpbp_node = bp->buf_pool.dpbp_node;
144
145         dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
146
147         if (h_bp_list == bp) {
148                 h_bp_list = h_bp_list->next;
149         } else { /* if it is not the first node */
150                 struct dpaa2_bp_list *prev = h_bp_list, *temp;
151                 temp = h_bp_list->next;
152                 while (temp) {
153                         if (temp == bp) {
154                                 prev->next = temp->next;
155                                 rte_free(bp);
156                                 break;
157                         }
158                         prev = temp;
159                         temp = temp->next;
160                 }
161         }
162
163         rte_free(mp->pool_data);
164         dpaa2_free_dpbp_dev(dpbp_node);
165 }
166
167 static void
168 rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
169                         void * const *obj_table,
170                         uint32_t bpid,
171                         uint32_t meta_data_size,
172                         int count)
173 {
174         struct qbman_release_desc releasedesc;
175         struct qbman_swp *swp;
176         int ret;
177         int i, n;
178         uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
179
180         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
181                 ret = dpaa2_affine_qbman_swp();
182                 if (ret != 0) {
183                         RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
184                         return;
185                 }
186         }
187         swp = DPAA2_PER_LCORE_PORTAL;
188
189         /* Create a release descriptor required for releasing
190          * buffers into QBMAN
191          */
192         qbman_release_desc_clear(&releasedesc);
193         qbman_release_desc_set_bpid(&releasedesc, bpid);
194
195         n = count % DPAA2_MBUF_MAX_ACQ_REL;
196         if (unlikely(!n))
197                 goto aligned;
198
199         /* convert mbuf to buffers for the remainder */
200         for (i = 0; i < n ; i++) {
201 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
202                 bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
203                                 + meta_data_size;
204 #else
205                 bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
206 #endif
207         }
208
209         /* feed them to bman */
210         do {
211                 ret = qbman_swp_release(swp, &releasedesc, bufs, n);
212         } while (ret == -EBUSY);
213
214 aligned:
215         /* if there are more buffers to free */
216         while (n < count) {
217                 /* convert mbuf to buffers */
218                 for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
219 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
220                         bufs[i] = (uint64_t)
221                                   rte_mempool_virt2iova(obj_table[n + i])
222                                   + meta_data_size;
223 #else
224                         bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
225 #endif
226                 }
227
228                 do {
229                         ret = qbman_swp_release(swp, &releasedesc, bufs,
230                                                 DPAA2_MBUF_MAX_ACQ_REL);
231                 } while (ret == -EBUSY);
232                 n += DPAA2_MBUF_MAX_ACQ_REL;
233         }
234 }
235
236 int
237 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
238                           void **obj_table, unsigned int count)
239 {
240 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
241         static int alloc;
242 #endif
243         struct qbman_swp *swp;
244         uint16_t bpid;
245         uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
246         int i, ret;
247         unsigned int n = 0;
248         struct dpaa2_bp_info *bp_info;
249
250         bp_info = mempool_to_bpinfo(pool);
251
252         if (!(bp_info->bp_list)) {
253                 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
254                 return -ENOENT;
255         }
256
257         bpid = bp_info->bpid;
258
259         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
260                 ret = dpaa2_affine_qbman_swp();
261                 if (ret != 0) {
262                         RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
263                         return ret;
264                 }
265         }
266         swp = DPAA2_PER_LCORE_PORTAL;
267
268         while (n < count) {
269                 /* Acquire is all-or-nothing, so we drain in 7s,
270                  * then the remainder.
271                  */
272                 if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
273                         ret = qbman_swp_acquire(swp, bpid, bufs,
274                                                 DPAA2_MBUF_MAX_ACQ_REL);
275                 } else {
276                         ret = qbman_swp_acquire(swp, bpid, bufs,
277                                                 count - n);
278                 }
279                 /* In case of less than requested number of buffers available
280                  * in pool, qbman_swp_acquire returns 0
281                  */
282                 if (ret <= 0) {
283                         PMD_TX_LOG(ERR, "Buffer acquire failed with"
284                                    " err code: %d", ret);
285                         /* The API expect the exact number of requested bufs */
286                         /* Releasing all buffers allocated */
287                         rte_dpaa2_mbuf_release(pool, obj_table, bpid,
288                                            bp_info->meta_data_size, n);
289                         return -ENOBUFS;
290                 }
291                 /* assigning mbuf from the acquired objects */
292                 for (i = 0; (i < ret) && bufs[i]; i++) {
293                         DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
294                         obj_table[n] = (struct rte_mbuf *)
295                                        (bufs[i] - bp_info->meta_data_size);
296                         PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
297                                    (void *)bufs[i], (void *)obj_table[n]);
298                         n++;
299                 }
300         }
301
302 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
303         alloc += n;
304         PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d",
305                    alloc, count, n);
306 #endif
307         return 0;
308 }
309
310 static int
311 rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
312                   void * const *obj_table, unsigned int n)
313 {
314         struct dpaa2_bp_info *bp_info;
315
316         bp_info = mempool_to_bpinfo(pool);
317         if (!(bp_info->bp_list)) {
318                 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
319                 return -ENOENT;
320         }
321         rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
322                            bp_info->meta_data_size, n);
323
324         return 0;
325 }
326
327 static unsigned int
328 rte_hw_mbuf_get_count(const struct rte_mempool *mp)
329 {
330         int ret;
331         unsigned int num_of_bufs = 0;
332         struct dpaa2_bp_info *bp_info;
333         struct dpaa2_dpbp_dev *dpbp_node;
334
335         if (!mp || !mp->pool_data) {
336                 RTE_LOG(ERR, PMD, "Invalid mempool provided\n");
337                 return 0;
338         }
339
340         bp_info = (struct dpaa2_bp_info *)mp->pool_data;
341         dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
342
343         ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
344                                      dpbp_node->token, &num_of_bufs);
345         if (ret) {
346                 RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)\n",
347                         ret);
348                 return 0;
349         }
350
351         RTE_LOG(DEBUG, PMD, "Free bufs = %u\n", num_of_bufs);
352
353         return num_of_bufs;
354 }
355
356 struct rte_mempool_ops dpaa2_mpool_ops = {
357         .name = DPAA2_MEMPOOL_OPS_NAME,
358         .alloc = rte_hw_mbuf_create_pool,
359         .free = rte_hw_mbuf_free_pool,
360         .enqueue = rte_hw_mbuf_free_bulk,
361         .dequeue = rte_dpaa2_mbuf_alloc_bulk,
362         .get_count = rte_hw_mbuf_get_count,
363 };
364
365 MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);