New upstream version 18.08
[deb_dpdk.git] / drivers / mempool / dpaa / dpaa_mempool.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017 NXP
4  *
5  */
6
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_memory.h>
23 #include <rte_tailq.h>
24 #include <rte_eal.h>
25 #include <rte_malloc.h>
26 #include <rte_ring.h>
27
28 #include <dpaa_mempool.h>
29
30 /* List of all the memseg information locally maintained in dpaa driver. This
31  * is to optimize the PA_to_VA searches until a better mechanism (algo) is
32  * available.
33  */
34 struct dpaa_memseg_list rte_dpaa_memsegs
35         = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
36
37 struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
38
39 static int
40 dpaa_mbuf_create_pool(struct rte_mempool *mp)
41 {
42         struct bman_pool *bp;
43         struct bm_buffer bufs[8];
44         struct dpaa_bp_info *bp_info;
45         uint8_t bpid;
46         int num_bufs = 0, ret = 0;
47         struct bman_pool_params params = {
48                 .flags = BMAN_POOL_FLAG_DYNAMIC_BPID
49         };
50
51         MEMPOOL_INIT_FUNC_TRACE();
52
53         bp = bman_new_pool(&params);
54         if (!bp) {
55                 DPAA_MEMPOOL_ERR("bman_new_pool() failed");
56                 return -ENODEV;
57         }
58         bpid = bman_get_params(bp)->bpid;
59
60         /* Drain the pool of anything already in it. */
61         do {
62                 /* Acquire is all-or-nothing, so we drain in 8s,
63                  * then in 1s for the remainder.
64                  */
65                 if (ret != 1)
66                         ret = bman_acquire(bp, bufs, 8, 0);
67                 if (ret < 8)
68                         ret = bman_acquire(bp, bufs, 1, 0);
69                 if (ret > 0)
70                         num_bufs += ret;
71         } while (ret > 0);
72         if (num_bufs)
73                 DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
74                                   num_bufs, bpid);
75
76         rte_dpaa_bpid_info[bpid].mp = mp;
77         rte_dpaa_bpid_info[bpid].bpid = bpid;
78         rte_dpaa_bpid_info[bpid].size = mp->elt_size;
79         rte_dpaa_bpid_info[bpid].bp = bp;
80         rte_dpaa_bpid_info[bpid].meta_data_size =
81                 sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
82         rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
83         rte_dpaa_bpid_info[bpid].ptov_off = 0;
84         rte_dpaa_bpid_info[bpid].flags = 0;
85
86         bp_info = rte_malloc(NULL,
87                              sizeof(struct dpaa_bp_info),
88                              RTE_CACHE_LINE_SIZE);
89         if (!bp_info) {
90                 DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
91                 bman_free_pool(bp);
92                 return -ENOMEM;
93         }
94
95         rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
96                    sizeof(struct dpaa_bp_info));
97         mp->pool_data = (void *)bp_info;
98
99         DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
100         return 0;
101 }
102
103 static void
104 dpaa_mbuf_free_pool(struct rte_mempool *mp)
105 {
106         struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
107
108         MEMPOOL_INIT_FUNC_TRACE();
109
110         if (bp_info) {
111                 bman_free_pool(bp_info->bp);
112                 DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
113                                   bp_info->bpid);
114                 rte_free(mp->pool_data);
115                 mp->pool_data = NULL;
116         }
117 }
118
119 static void
120 dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
121 {
122         struct bm_buffer buf;
123         int ret;
124
125         DPAA_MEMPOOL_DEBUG("Free 0x%" PRIx64 " to bpid: %d",
126                            addr, bp_info->bpid);
127
128         bm_buffer_set64(&buf, addr);
129 retry:
130         ret = bman_release(bp_info->bp, &buf, 1, 0);
131         if (ret) {
132                 DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
133                 cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
134                 goto retry;
135         }
136 }
137
138 static int
139 dpaa_mbuf_free_bulk(struct rte_mempool *pool,
140                     void *const *obj_table,
141                     unsigned int n)
142 {
143         struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
144         int ret;
145         unsigned int i = 0;
146
147         DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
148                              n, bp_info->bpid);
149
150         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
151                 ret = rte_dpaa_portal_init((void *)0);
152                 if (ret) {
153                         DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
154                                          ret);
155                         return 0;
156                 }
157         }
158
159         while (i < n) {
160                 uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
161
162                 if (unlikely(!bp_info->ptov_off)) {
163                         /* buffers are from single mem segment */
164                         if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
165                                 bp_info->ptov_off = (size_t)obj_table[i] - phy;
166                                 rte_dpaa_bpid_info[bp_info->bpid].ptov_off
167                                                 = bp_info->ptov_off;
168                         }
169                 }
170
171                 dpaa_buf_free(bp_info,
172                               (uint64_t)phy + bp_info->meta_data_size);
173                 i = i + 1;
174         }
175
176         DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
177                              n, bp_info->bpid);
178
179         return 0;
180 }
181
182 static int
183 dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
184                      void **obj_table,
185                      unsigned int count)
186 {
187         struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
188         struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
189         struct dpaa_bp_info *bp_info;
190         void *bufaddr;
191         int i, ret;
192         unsigned int n = 0;
193
194         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
195
196         DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
197                              count, bp_info->bpid);
198
199         if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
200                 DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
201                                  count);
202                 return -1;
203         }
204
205         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
206                 ret = rte_dpaa_portal_init((void *)0);
207                 if (ret) {
208                         DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
209                                          ret);
210                         return -1;
211                 }
212         }
213
214         while (n < count) {
215                 /* Acquire is all-or-nothing, so we drain in 7s,
216                  * then the remainder.
217                  */
218                 if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
219                         ret = bman_acquire(bp_info->bp, bufs,
220                                            DPAA_MBUF_MAX_ACQ_REL, 0);
221                 } else {
222                         ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
223                 }
224                 /* In case of less than requested number of buffers available
225                  * in pool, qbman_swp_acquire returns 0
226                  */
227                 if (ret <= 0) {
228                         DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
229                                              ret);
230                         /* The API expect the exact number of requested
231                          * buffers. Releasing all buffers allocated
232                          */
233                         dpaa_mbuf_free_bulk(pool, obj_table, n);
234                         return -ENOBUFS;
235                 }
236                 /* assigning mbuf from the acquired objects */
237                 for (i = 0; (i < ret) && bufs[i].addr; i++) {
238                         /* TODO-errata - objerved that bufs may be null
239                          * i.e. first buffer is valid, remaining 6 buffers
240                          * may be null.
241                          */
242                         bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
243                         m[n] = (struct rte_mbuf *)((char *)bufaddr
244                                                 - bp_info->meta_data_size);
245                         DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
246                                              (void *)bufaddr, (void *)m[n]);
247                         n++;
248                 }
249         }
250
251         DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
252                              n, bp_info->bpid);
253         return 0;
254 }
255
256 static unsigned int
257 dpaa_mbuf_get_count(const struct rte_mempool *mp)
258 {
259         struct dpaa_bp_info *bp_info;
260
261         MEMPOOL_INIT_FUNC_TRACE();
262
263         if (!mp || !mp->pool_data) {
264                 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
265                 return 0;
266         }
267
268         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
269
270         return bman_query_free_buffers(bp_info->bp);
271 }
272
273 static int
274 dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
275               void *vaddr, rte_iova_t paddr, size_t len,
276               rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
277 {
278         struct dpaa_bp_info *bp_info;
279         unsigned int total_elt_sz;
280
281         MEMPOOL_INIT_FUNC_TRACE();
282
283         if (!mp || !mp->pool_data) {
284                 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
285                 return 0;
286         }
287
288         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
289         total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
290
291         DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n",
292                            (uint64_t)len, total_elt_sz * mp->size);
293
294         /* Detect pool area has sufficient space for elements in this memzone */
295         if (len >= total_elt_sz * mp->size)
296                 bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
297         struct dpaa_memseg *ms;
298
299         /* For each memory chunk pinned to the Mempool, a linked list of the
300          * contained memsegs is created for searching when PA to VA
301          * conversion is required.
302          */
303         ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
304         if (!ms) {
305                 DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
306                 DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
307                 /* If the element is not added, it would only lead to failure
308                  * in searching for the element and the logic would Fallback
309                  * to traditional DPDK memseg traversal code. So, this is not
310                  * a blocking error - but, error would be printed on screen.
311                  */
312                 return 0;
313         }
314
315         ms->vaddr = vaddr;
316         ms->iova = paddr;
317         ms->len = len;
318         /* Head insertions are generally faster than tail insertions as the
319          * buffers pinned are picked from rear end.
320          */
321         TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
322
323         return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
324                                                obj_cb, obj_cb_arg);
325 }
326
327 struct rte_mempool_ops dpaa_mpool_ops = {
328         .name = DPAA_MEMPOOL_OPS_NAME,
329         .alloc = dpaa_mbuf_create_pool,
330         .free = dpaa_mbuf_free_pool,
331         .enqueue = dpaa_mbuf_free_bulk,
332         .dequeue = dpaa_mbuf_alloc_bulk,
333         .get_count = dpaa_mbuf_get_count,
334         .populate = dpaa_populate,
335 };
336
337 MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);