New upstream version 18.02
[deb_dpdk.git] / drivers / bus / dpaa / base / qbman / bman.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2008-2016 Freescale Semiconductor Inc.
4  * Copyright 2017 NXP
5  *
6  */
7
8 #include "bman.h"
9 #include <rte_branch_prediction.h>
10
11 /* Compilation constants */
12 #define RCR_THRESH      2       /* reread h/w CI when running out of space */
13 #define IRQNAME         "BMan portal %d"
14 #define MAX_IRQNAME     16      /* big enough for "BMan portal %d" */
15
16 struct bman_portal {
17         struct bm_portal p;
18         /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
19         struct bman_depletion *pools;
20         int thresh_set;
21         unsigned long irq_sources;
22         u32 slowpoll;   /* only used when interrupts are off */
23         /* When the cpu-affine portal is activated, this is non-NULL */
24         const struct bm_portal_config *config;
25         char irqname[MAX_IRQNAME];
26 };
27
28 static cpumask_t affine_mask;
29 static DEFINE_SPINLOCK(affine_mask_lock);
30 static RTE_DEFINE_PER_LCORE(struct bman_portal, bman_affine_portal);
31
32 static inline struct bman_portal *get_affine_portal(void)
33 {
34         return &RTE_PER_LCORE(bman_affine_portal);
35 }
36
37 /*
38  * This object type refers to a pool, it isn't *the* pool. There may be
39  * more than one such object per BMan buffer pool, eg. if different users of
40  * the pool are operating via different portals.
41  */
42 struct bman_pool {
43         struct bman_pool_params params;
44         /* Used for hash-table admin when using depletion notifications. */
45         struct bman_portal *portal;
46         struct bman_pool *next;
47 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
48         atomic_t in_use;
49 #endif
50 };
51
52 static inline
53 struct bman_portal *bman_create_portal(struct bman_portal *portal,
54                                        const struct bm_portal_config *c)
55 {
56         struct bm_portal *p;
57         const struct bman_depletion *pools = &c->mask;
58         int ret;
59         u8 bpid = 0;
60
61         p = &portal->p;
62         /*
63          * prep the low-level portal struct with the mapped addresses from the
64          * config, everything that follows depends on it and "config" is more
65          * for (de)reference...
66          */
67         p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
68         p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
69         if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
70                 pr_err("Bman RCR initialisation failed\n");
71                 return NULL;
72         }
73         if (bm_mc_init(p)) {
74                 pr_err("Bman MC initialisation failed\n");
75                 goto fail_mc;
76         }
77         portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
78         if (!portal->pools)
79                 goto fail_pools;
80         portal->pools[0] = *pools;
81         bman_depletion_init(portal->pools + 1);
82         while (bpid < bman_pool_max) {
83                 /*
84                  * Default to all BPIDs disabled, we enable as required at
85                  * run-time.
86                  */
87                 bm_isr_bscn_mask(p, bpid, 0);
88                 bpid++;
89         }
90         portal->slowpoll = 0;
91         /* Write-to-clear any stale interrupt status bits */
92         bm_isr_disable_write(p, 0xffffffff);
93         portal->irq_sources = 0;
94         bm_isr_enable_write(p, portal->irq_sources);
95         bm_isr_status_clear(p, 0xffffffff);
96         snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
97         if (request_irq(c->irq, NULL, 0, portal->irqname,
98                         portal)) {
99                 pr_err("request_irq() failed\n");
100                 goto fail_irq;
101         }
102
103         /* Need RCR to be empty before continuing */
104         ret = bm_rcr_get_fill(p);
105         if (ret) {
106                 pr_err("Bman RCR unclean\n");
107                 goto fail_rcr_empty;
108         }
109         /* Success */
110         portal->config = c;
111
112         bm_isr_disable_write(p, 0);
113         bm_isr_uninhibit(p);
114         return portal;
115 fail_rcr_empty:
116         free_irq(c->irq, portal);
117 fail_irq:
118         kfree(portal->pools);
119 fail_pools:
120         bm_mc_finish(p);
121 fail_mc:
122         bm_rcr_finish(p);
123         return NULL;
124 }
125
126 struct bman_portal *
127 bman_create_affine_portal(const struct bm_portal_config *c)
128 {
129         struct bman_portal *portal = get_affine_portal();
130
131         /*This function is called from the context which is already affine to
132          *CPU or in other words this in non-migratable to other CPUs.
133          */
134         portal = bman_create_portal(portal, c);
135         if (portal) {
136                 spin_lock(&affine_mask_lock);
137                 CPU_SET(c->cpu, &affine_mask);
138                 spin_unlock(&affine_mask_lock);
139         }
140         return portal;
141 }
142
143 static inline
144 void bman_destroy_portal(struct bman_portal *bm)
145 {
146         const struct bm_portal_config *pcfg;
147
148         pcfg = bm->config;
149         bm_rcr_cce_update(&bm->p);
150         bm_rcr_cce_update(&bm->p);
151
152         free_irq(pcfg->irq, bm);
153
154         kfree(bm->pools);
155         bm_mc_finish(&bm->p);
156         bm_rcr_finish(&bm->p);
157         bm->config = NULL;
158 }
159
160 const struct
161 bm_portal_config *bman_destroy_affine_portal(void)
162 {
163         struct bman_portal *bm = get_affine_portal();
164         const struct bm_portal_config *pcfg;
165
166         pcfg = bm->config;
167         bman_destroy_portal(bm);
168         spin_lock(&affine_mask_lock);
169         CPU_CLR(pcfg->cpu, &affine_mask);
170         spin_unlock(&affine_mask_lock);
171         return pcfg;
172 }
173
174 int
175 bman_get_portal_index(void)
176 {
177         struct bman_portal *p = get_affine_portal();
178         return p->config->index;
179 }
180
181 static const u32 zero_thresholds[4] = {0, 0, 0, 0};
182
183 struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
184 {
185         struct bman_pool *pool = NULL;
186         u32 bpid;
187
188         if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
189                 int ret = bman_alloc_bpid(&bpid);
190
191                 if (ret)
192                         return NULL;
193         } else {
194                 if (params->bpid >= bman_pool_max)
195                         return NULL;
196                 bpid = params->bpid;
197         }
198         if (params->flags & BMAN_POOL_FLAG_THRESH) {
199                 int ret = bm_pool_set(bpid, params->thresholds);
200
201                 if (ret)
202                         goto err;
203         }
204
205         pool = kmalloc(sizeof(*pool), GFP_KERNEL);
206         if (!pool)
207                 goto err;
208         pool->params = *params;
209 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
210         atomic_set(&pool->in_use, 1);
211 #endif
212         if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
213                 pool->params.bpid = bpid;
214
215         return pool;
216 err:
217         if (params->flags & BMAN_POOL_FLAG_THRESH)
218                 bm_pool_set(bpid, zero_thresholds);
219
220         if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
221                 bman_release_bpid(bpid);
222         kfree(pool);
223
224         return NULL;
225 }
226
227 void bman_free_pool(struct bman_pool *pool)
228 {
229         if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
230                 bm_pool_set(pool->params.bpid, zero_thresholds);
231         if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
232                 bman_release_bpid(pool->params.bpid);
233         kfree(pool);
234 }
235
236 const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
237 {
238         return &pool->params;
239 }
240
241 static void update_rcr_ci(struct bman_portal *p, int avail)
242 {
243         if (avail)
244                 bm_rcr_cce_prefetch(&p->p);
245         else
246                 bm_rcr_cce_update(&p->p);
247 }
248
249 #define BMAN_BUF_MASK 0x0000fffffffffffful
250 int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
251                  u32 flags __maybe_unused)
252 {
253         struct bman_portal *p;
254         struct bm_rcr_entry *r;
255         u32 i = num - 1;
256         u8 avail;
257
258 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
259         if (!num || (num > 8))
260                 return -EINVAL;
261         if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
262                 return -EINVAL;
263 #endif
264
265         p = get_affine_portal();
266         avail = bm_rcr_get_avail(&p->p);
267         if (avail < 2)
268                 update_rcr_ci(p, avail);
269         r = bm_rcr_start(&p->p);
270         if (unlikely(!r))
271                 return -EBUSY;
272
273         /*
274          * we can copy all but the first entry, as this can trigger badness
275          * with the valid-bit
276          */
277         r->bufs[0].opaque =
278                 cpu_to_be64(((u64)pool->params.bpid << 48) |
279                             (bufs[0].opaque & BMAN_BUF_MASK));
280         if (i) {
281                 for (i = 1; i < num; i++)
282                         r->bufs[i].opaque =
283                                 cpu_to_be64(bufs[i].opaque & BMAN_BUF_MASK);
284         }
285
286         bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
287                           (num & BM_RCR_VERB_BUFCOUNT_MASK));
288
289         return 0;
290 }
291
292 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
293                  u32 flags __maybe_unused)
294 {
295         struct bman_portal *p = get_affine_portal();
296         struct bm_mc_command *mcc;
297         struct bm_mc_result *mcr;
298         int ret, i;
299
300 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
301         if (!num || (num > 8))
302                 return -EINVAL;
303         if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
304                 return -EINVAL;
305 #endif
306
307         mcc = bm_mc_start(&p->p);
308         mcc->acquire.bpid = pool->params.bpid;
309         bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
310                         (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
311         while (!(mcr = bm_mc_result(&p->p)))
312                 cpu_relax();
313         ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
314         if (bufs) {
315                 for (i = 0; i < num; i++)
316                         bufs[i].opaque =
317                                 be64_to_cpu(mcr->acquire.bufs[i].opaque);
318         }
319         if (ret != num)
320                 ret = -ENOMEM;
321         return ret;
322 }
323
324 int bman_query_pools(struct bm_pool_state *state)
325 {
326         struct bman_portal *p = get_affine_portal();
327         struct bm_mc_result *mcr;
328
329         bm_mc_start(&p->p);
330         bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
331         while (!(mcr = bm_mc_result(&p->p)))
332                 cpu_relax();
333         DPAA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) ==
334                     BM_MCR_VERB_CMD_QUERY);
335         *state = mcr->query;
336         state->as.state.state[0] = be32_to_cpu(state->as.state.state[0]);
337         state->as.state.state[1] = be32_to_cpu(state->as.state.state[1]);
338         state->ds.state.state[0] = be32_to_cpu(state->ds.state.state[0]);
339         state->ds.state.state[1] = be32_to_cpu(state->ds.state.state[1]);
340         return 0;
341 }
342
343 u32 bman_query_free_buffers(struct bman_pool *pool)
344 {
345         return bm_pool_free_buffers(pool->params.bpid);
346 }
347
348 int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
349 {
350         u32 bpid;
351
352         bpid = bman_get_params(pool)->bpid;
353
354         return bm_pool_set(bpid, thresholds);
355 }
356
357 int bman_shutdown_pool(u32 bpid)
358 {
359         struct bman_portal *p = get_affine_portal();
360         return bm_shutdown_pool(&p->p, bpid);
361 }