2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright 2008-2016 Freescale Semiconductor Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * * Neither the name of the above-listed copyright holders nor the
18 * names of any contributors may be used to endorse or promote products
19 * derived from this software without specific prior written permission.
23 * ALTERNATIVELY, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") as published by the Free Software
25 * Foundation, either version 2 of that License or (at your option) any
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_branch_prediction.h>
44 /* Compilation constants */
45 #define RCR_THRESH 2 /* reread h/w CI when running out of space */
46 #define IRQNAME "BMan portal %d"
47 #define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
51 /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
52 struct bman_depletion *pools;
54 unsigned long irq_sources;
55 u32 slowpoll; /* only used when interrupts are off */
56 /* When the cpu-affine portal is activated, this is non-NULL */
57 const struct bm_portal_config *config;
58 char irqname[MAX_IRQNAME];
61 static cpumask_t affine_mask;
62 static DEFINE_SPINLOCK(affine_mask_lock);
63 static RTE_DEFINE_PER_LCORE(struct bman_portal, bman_affine_portal);
65 static inline struct bman_portal *get_affine_portal(void)
67 return &RTE_PER_LCORE(bman_affine_portal);
71 * This object type refers to a pool, it isn't *the* pool. There may be
72 * more than one such object per BMan buffer pool, eg. if different users of
73 * the pool are operating via different portals.
76 struct bman_pool_params params;
77 /* Used for hash-table admin when using depletion notifications. */
78 struct bman_portal *portal;
79 struct bman_pool *next;
80 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
86 struct bman_portal *bman_create_portal(struct bman_portal *portal,
87 const struct bm_portal_config *c)
90 const struct bman_depletion *pools = &c->mask;
96 * prep the low-level portal struct with the mapped addresses from the
97 * config, everything that follows depends on it and "config" is more
98 * for (de)reference...
100 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
101 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
102 if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
103 pr_err("Bman RCR initialisation failed\n");
107 pr_err("Bman MC initialisation failed\n");
110 portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
113 portal->pools[0] = *pools;
114 bman_depletion_init(portal->pools + 1);
115 while (bpid < bman_pool_max) {
117 * Default to all BPIDs disabled, we enable as required at
120 bm_isr_bscn_mask(p, bpid, 0);
123 portal->slowpoll = 0;
124 /* Write-to-clear any stale interrupt status bits */
125 bm_isr_disable_write(p, 0xffffffff);
126 portal->irq_sources = 0;
127 bm_isr_enable_write(p, portal->irq_sources);
128 bm_isr_status_clear(p, 0xffffffff);
129 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
130 if (request_irq(c->irq, NULL, 0, portal->irqname,
132 pr_err("request_irq() failed\n");
136 /* Need RCR to be empty before continuing */
137 ret = bm_rcr_get_fill(p);
139 pr_err("Bman RCR unclean\n");
145 bm_isr_disable_write(p, 0);
149 free_irq(c->irq, portal);
151 kfree(portal->pools);
160 bman_create_affine_portal(const struct bm_portal_config *c)
162 struct bman_portal *portal = get_affine_portal();
164 /*This function is called from the context which is already affine to
165 *CPU or in other words this in non-migratable to other CPUs.
167 portal = bman_create_portal(portal, c);
169 spin_lock(&affine_mask_lock);
170 CPU_SET(c->cpu, &affine_mask);
171 spin_unlock(&affine_mask_lock);
177 void bman_destroy_portal(struct bman_portal *bm)
179 const struct bm_portal_config *pcfg;
182 bm_rcr_cce_update(&bm->p);
183 bm_rcr_cce_update(&bm->p);
185 free_irq(pcfg->irq, bm);
188 bm_mc_finish(&bm->p);
189 bm_rcr_finish(&bm->p);
194 bm_portal_config *bman_destroy_affine_portal(void)
196 struct bman_portal *bm = get_affine_portal();
197 const struct bm_portal_config *pcfg;
200 bman_destroy_portal(bm);
201 spin_lock(&affine_mask_lock);
202 CPU_CLR(pcfg->cpu, &affine_mask);
203 spin_unlock(&affine_mask_lock);
208 bman_get_portal_index(void)
210 struct bman_portal *p = get_affine_portal();
211 return p->config->index;
214 static const u32 zero_thresholds[4] = {0, 0, 0, 0};
216 struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
218 struct bman_pool *pool = NULL;
221 if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
222 int ret = bman_alloc_bpid(&bpid);
227 if (params->bpid >= bman_pool_max)
231 if (params->flags & BMAN_POOL_FLAG_THRESH) {
232 int ret = bm_pool_set(bpid, params->thresholds);
238 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
241 pool->params = *params;
242 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
243 atomic_set(&pool->in_use, 1);
245 if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
246 pool->params.bpid = bpid;
250 if (params->flags & BMAN_POOL_FLAG_THRESH)
251 bm_pool_set(bpid, zero_thresholds);
253 if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
254 bman_release_bpid(bpid);
260 void bman_free_pool(struct bman_pool *pool)
262 if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
263 bm_pool_set(pool->params.bpid, zero_thresholds);
264 if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
265 bman_release_bpid(pool->params.bpid);
269 const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
271 return &pool->params;
274 static void update_rcr_ci(struct bman_portal *p, int avail)
277 bm_rcr_cce_prefetch(&p->p);
279 bm_rcr_cce_update(&p->p);
282 #define BMAN_BUF_MASK 0x0000fffffffffffful
283 int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
284 u32 flags __maybe_unused)
286 struct bman_portal *p;
287 struct bm_rcr_entry *r;
291 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
292 if (!num || (num > 8))
294 if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
298 p = get_affine_portal();
299 avail = bm_rcr_get_avail(&p->p);
301 update_rcr_ci(p, avail);
302 r = bm_rcr_start(&p->p);
307 * we can copy all but the first entry, as this can trigger badness
311 cpu_to_be64(((u64)pool->params.bpid << 48) |
312 (bufs[0].opaque & BMAN_BUF_MASK));
314 for (i = 1; i < num; i++)
316 cpu_to_be64(bufs[i].opaque & BMAN_BUF_MASK);
319 bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
320 (num & BM_RCR_VERB_BUFCOUNT_MASK));
325 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
326 u32 flags __maybe_unused)
328 struct bman_portal *p = get_affine_portal();
329 struct bm_mc_command *mcc;
330 struct bm_mc_result *mcr;
333 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
334 if (!num || (num > 8))
336 if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
340 mcc = bm_mc_start(&p->p);
341 mcc->acquire.bpid = pool->params.bpid;
342 bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
343 (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
344 while (!(mcr = bm_mc_result(&p->p)))
346 ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
348 for (i = 0; i < num; i++)
350 be64_to_cpu(mcr->acquire.bufs[i].opaque);
357 int bman_query_pools(struct bm_pool_state *state)
359 struct bman_portal *p = get_affine_portal();
360 struct bm_mc_result *mcr;
363 bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
364 while (!(mcr = bm_mc_result(&p->p)))
366 DPAA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) ==
367 BM_MCR_VERB_CMD_QUERY);
369 state->as.state.state[0] = be32_to_cpu(state->as.state.state[0]);
370 state->as.state.state[1] = be32_to_cpu(state->as.state.state[1]);
371 state->ds.state.state[0] = be32_to_cpu(state->ds.state.state[0]);
372 state->ds.state.state[1] = be32_to_cpu(state->ds.state.state[1]);
376 u32 bman_query_free_buffers(struct bman_pool *pool)
378 return bm_pool_free_buffers(pool->params.bpid);
381 int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
385 bpid = bman_get_params(pool)->bpid;
387 return bm_pool_set(bpid, thresholds);
390 int bman_shutdown_pool(u32 bpid)
392 struct bman_portal *p = get_affine_portal();
393 return bm_shutdown_pool(&p->p, bpid);