New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *     * Redistributions of source code must retain the above copyright
9  *       notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above copyright
11  *       notice, this list of conditions and the following disclaimer in the
12  *       documentation and/or other materials provided with the distribution.
13  *     * Neither the name of Freescale Semiconductor nor the
14  *       names of its contributors may be used to endorse or promote products
15  *       derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include "qbman_portal.h"
30
31 /* QBMan portal management command codes */
32 #define QBMAN_MC_ACQUIRE       0x30
33 #define QBMAN_WQCHAN_CONFIGURE 0x46
34
35 /* CINH register offsets */
36 #define QBMAN_CINH_SWP_EQCR_PI 0x800
37 #define QBMAN_CINH_SWP_EQCR_CI 0x840
38 #define QBMAN_CINH_SWP_EQAR    0x8c0
39 #define QBMAN_CINH_SWP_DQPI    0xa00
40 #define QBMAN_CINH_SWP_DCAP    0xac0
41 #define QBMAN_CINH_SWP_SDQCR   0xb00
42 #define QBMAN_CINH_SWP_RAR     0xcc0
43 #define QBMAN_CINH_SWP_ISR     0xe00
44 #define QBMAN_CINH_SWP_IER     0xe40
45 #define QBMAN_CINH_SWP_ISDR    0xe80
46 #define QBMAN_CINH_SWP_IIR     0xec0
47 #define QBMAN_CINH_SWP_DQRR_ITR    0xa80
48 #define QBMAN_CINH_SWP_ITPR    0xf40
49
50 /* CENA register offsets */
51 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
53 #define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((uint32_t)(n) << 6))
54 #define QBMAN_CENA_SWP_CR      0x600
55 #define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((uint32_t)(vb) >> 1))
56 #define QBMAN_CENA_SWP_VDQCR   0x780
57 #define QBMAN_CENA_SWP_EQCR_CI 0x840
58
59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
61
62 /* QBMan FQ management command codes */
63 #define QBMAN_FQ_SCHEDULE       0x48
64 #define QBMAN_FQ_FORCE          0x49
65 #define QBMAN_FQ_XON            0x4d
66 #define QBMAN_FQ_XOFF           0x4e
67
68 /*******************************/
69 /* Pre-defined attribute codes */
70 /*******************************/
71
72 #define QBMAN_RESPONSE_VERB_MASK   0x7f
73
74 /*************************/
75 /* SDQCR attribute codes */
76 /*************************/
77 #define QB_SDQCR_FC_SHIFT   29
78 #define QB_SDQCR_FC_MASK    0x1
79 #define QB_SDQCR_DCT_SHIFT  24
80 #define QB_SDQCR_DCT_MASK   0x3
81 #define QB_SDQCR_TOK_SHIFT  16
82 #define QB_SDQCR_TOK_MASK   0xff
83 #define QB_SDQCR_SRC_SHIFT  0
84 #define QB_SDQCR_SRC_MASK   0xffff
85
86 /* opaque token for static dequeues */
87 #define QMAN_SDQCR_TOKEN    0xbb
88
89 enum qbman_sdqcr_dct {
90         qbman_sdqcr_dct_null = 0,
91         qbman_sdqcr_dct_prio_ics,
92         qbman_sdqcr_dct_active_ics,
93         qbman_sdqcr_dct_active
94 };
95
96 enum qbman_sdqcr_fc {
97         qbman_sdqcr_fc_one = 0,
98         qbman_sdqcr_fc_up_to_3 = 1
99 };
100
101 /* We need to keep track of which SWP triggered a pull command
102  * so keep an array of portal IDs and use the token field to
103  * be able to find the proper portal
104  */
105 #define MAX_QBMAN_PORTALS  64
106 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
107
108 /*********************************/
109 /* Portal constructor/destructor */
110 /*********************************/
111
112 /* Software portals should always be in the power-on state when we initialise,
113  * due to the CCSR-based portal reset functionality that MC has.
114  *
115  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
116  * valid-bits, so we need to support a workaround where we don't trust
117  * valid-bits when detecting new entries until any stale ring entries have been
118  * overwritten at least once. The idea is that we read PI for the first few
119  * entries, then switch to valid-bit after that. The trick is to clear the
120  * bug-work-around boolean once the PI wraps around the ring for the first time.
121  *
122  * Note: this still carries a slight additional cost once the decrementer hits
123  * zero.
124  */
125 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
126 {
127         int ret;
128         uint32_t eqcr_pi;
129         struct qbman_swp *p = malloc(sizeof(*p));
130
131         if (!p)
132                 return NULL;
133         p->desc = *d;
134 #ifdef QBMAN_CHECKING
135         p->mc.check = swp_mc_can_start;
136 #endif
137         p->mc.valid_bit = QB_VALID_BIT;
138         p->sdq = 0;
139         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
140         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
141         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
142
143         atomic_set(&p->vdq.busy, 1);
144         p->vdq.valid_bit = QB_VALID_BIT;
145         p->dqrr.next_idx = 0;
146         p->dqrr.valid_bit = QB_VALID_BIT;
147         qman_version = p->desc.qman_version;
148         if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
149                 p->dqrr.dqrr_size = 4;
150                 p->dqrr.reset_bug = 1;
151         } else {
152                 p->dqrr.dqrr_size = 8;
153                 p->dqrr.reset_bug = 0;
154         }
155
156         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
157         if (ret) {
158                 free(p);
159                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
160                 return NULL;
161         }
162         /* SDQCR needs to be initialized to 0 when no channels are
163          * being dequeued from or else the QMan HW will indicate an
164          * error.  The values that were calculated above will be
165          * applied when dequeues from a specific channel are enabled.
166          */
167         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
168         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
169         p->eqcr.pi = eqcr_pi & 0xF;
170         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
171         p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
172         p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
173                                                 p->eqcr.ci, p->eqcr.pi);
174
175         portal_idx_map[p->desc.idx] = p;
176         return p;
177 }
178
179 void qbman_swp_finish(struct qbman_swp *p)
180 {
181 #ifdef QBMAN_CHECKING
182         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
183 #endif
184         qbman_swp_sys_finish(&p->sys);
185         portal_idx_map[p->desc.idx] = NULL;
186         free(p);
187 }
188
189 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
190 {
191         return &p->desc;
192 }
193
194 /**************/
195 /* Interrupts */
196 /**************/
197
198 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
199 {
200         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
201 }
202
203 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
204 {
205         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
206 }
207
208 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
209 {
210         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
211 }
212
213 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
214 {
215         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
216 }
217
218 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
219 {
220         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
221 }
222
223 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
224 {
225         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
226 }
227
228 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
229 {
230         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
231 }
232
233 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
234 {
235         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
236 }
237
238 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
239 {
240         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
241 }
242
243 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
244 {
245         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
246 }
247
248 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
249 {
250         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
251 }
252
253 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
254 {
255         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
256 }
257
258 /***********************/
259 /* Management commands */
260 /***********************/
261
262 /*
263  * Internal code common to all types of management commands.
264  */
265
266 void *qbman_swp_mc_start(struct qbman_swp *p)
267 {
268         void *ret;
269 #ifdef QBMAN_CHECKING
270         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
271 #endif
272         ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
273 #ifdef QBMAN_CHECKING
274         if (!ret)
275                 p->mc.check = swp_mc_can_submit;
276 #endif
277         return ret;
278 }
279
280 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
281 {
282         uint8_t *v = cmd;
283 #ifdef QBMAN_CHECKING
284         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
285 #endif
286         /* TBD: "|=" is going to hurt performance. Need to move as many fields
287          * out of word zero, and for those that remain, the "OR" needs to occur
288          * at the caller side. This debug check helps to catch cases where the
289          * caller wants to OR but has forgotten to do so.
290          */
291         QBMAN_BUG_ON((*v & cmd_verb) != *v);
292         *v = cmd_verb | p->mc.valid_bit;
293         qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
294 #ifdef QBMAN_CHECKING
295         p->mc.check = swp_mc_can_poll;
296 #endif
297 }
298
299 void *qbman_swp_mc_result(struct qbman_swp *p)
300 {
301         uint32_t *ret, verb;
302 #ifdef QBMAN_CHECKING
303         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
304 #endif
305         qbman_cena_invalidate_prefetch(&p->sys,
306                                        QBMAN_CENA_SWP_RR(p->mc.valid_bit));
307         ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
308         /* Remove the valid-bit - command completed if the rest is non-zero */
309         verb = ret[0] & ~QB_VALID_BIT;
310         if (!verb)
311                 return NULL;
312 #ifdef QBMAN_CHECKING
313         p->mc.check = swp_mc_can_start;
314 #endif
315         p->mc.valid_bit ^= QB_VALID_BIT;
316         return ret;
317 }
318
319 /***********/
320 /* Enqueue */
321 /***********/
322
323 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
324 enum qb_enqueue_commands {
325         enqueue_empty = 0,
326         enqueue_response_always = 1,
327         enqueue_rejects_to_fq = 2
328 };
329
330 #define QB_ENQUEUE_CMD_EC_OPTION_MASK        0x3
331 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
332 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
333 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
334 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT          6
335 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
336 #define QB_ENQUEUE_CMD_NLIS_SHIFT            14
337 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT         15
338
339 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
340 {
341         memset(d, 0, sizeof(*d));
342 }
343
344 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
345 {
346         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
347         if (respond_success)
348                 d->eq.verb |= enqueue_response_always;
349         else
350                 d->eq.verb |= enqueue_rejects_to_fq;
351 }
352
353 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
354                            uint16_t opr_id, uint16_t seqnum, int incomplete)
355 {
356         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
357         if (respond_success)
358                 d->eq.verb |= enqueue_response_always;
359         else
360                 d->eq.verb |= enqueue_rejects_to_fq;
361
362         d->eq.orpid = opr_id;
363         d->eq.seqnum = seqnum;
364         if (incomplete)
365                 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
366         else
367                 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
368 }
369
370 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
371                                 uint16_t seqnum)
372 {
373         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
374         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
375         d->eq.orpid = opr_id;
376         d->eq.seqnum = seqnum;
377         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
378         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
379 }
380
381 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
382                                 uint16_t seqnum)
383 {
384         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
385         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
386         d->eq.orpid = opr_id;
387         d->eq.seqnum = seqnum;
388         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
389         d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
390 }
391
392 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
393                                 dma_addr_t storage_phys,
394                                 int stash)
395 {
396         d->eq.rsp_addr = storage_phys;
397         d->eq.wae = stash;
398 }
399
400 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
401 {
402         d->eq.rspid = token;
403 }
404
405 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
406 {
407         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
408         d->eq.tgtid = fqid;
409 }
410
411 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
412                           uint16_t qd_bin, uint8_t qd_prio)
413 {
414         d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
415         d->eq.tgtid = qdid;
416         d->eq.qdbin = qd_bin;
417         d->eq.qpri = qd_prio;
418 }
419
420 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
421 {
422         if (enable)
423                 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
424         else
425                 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
426 }
427
428 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
429                            uint8_t dqrr_idx, int park)
430 {
431         if (enable) {
432                 d->eq.dca = dqrr_idx;
433                 if (park)
434                         d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
435                 else
436                         d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
437                 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
438         } else {
439                 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
440         }
441 }
442
443 #define EQAR_IDX(eqar)     ((eqar) & 0x7)
444 #define EQAR_VB(eqar)      ((eqar) & 0x80)
445 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
446
447 static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
448                                         const struct qbman_eq_desc *d,
449                                         const struct qbman_fd *fd)
450 {
451         uint32_t *p;
452         const uint32_t *cl = qb_cl(d);
453         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
454
455         pr_debug("EQAR=%08x\n", eqar);
456         if (!EQAR_SUCCESS(eqar))
457                 return -EBUSY;
458         p = qbman_cena_write_start_wo_shadow(&s->sys,
459                                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
460         memcpy(&p[1], &cl[1], 28);
461         memcpy(&p[8], fd, sizeof(*fd));
462         /* Set the verb byte, have to substitute in the valid-bit */
463         lwsync();
464         p[0] = cl[0] | EQAR_VB(eqar);
465         qbman_cena_write_complete_wo_shadow(&s->sys,
466                                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
467         return 0;
468 }
469
470 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
471                                        const struct qbman_eq_desc *d,
472                                        const struct qbman_fd *fd)
473 {
474         uint32_t *p;
475         const uint32_t *cl = qb_cl(d);
476         uint32_t eqcr_ci;
477         uint8_t diff;
478
479         if (!s->eqcr.available) {
480                 eqcr_ci = s->eqcr.ci;
481                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
482                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
483                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
484                                    eqcr_ci, s->eqcr.ci);
485                 s->eqcr.available += diff;
486                 if (!diff)
487                         return -EBUSY;
488         }
489
490         p = qbman_cena_write_start_wo_shadow(&s->sys,
491                                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
492         memcpy(&p[1], &cl[1], 28);
493         memcpy(&p[8], fd, sizeof(*fd));
494         lwsync();
495
496         /* Set the verb byte, have to substitute in the valid-bit */
497         p[0] = cl[0] | s->eqcr.pi_vb;
498         qbman_cena_write_complete_wo_shadow(&s->sys,
499                                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
500         s->eqcr.pi++;
501         s->eqcr.pi &= 0xF;
502         s->eqcr.available--;
503         if (!(s->eqcr.pi & 7))
504                 s->eqcr.pi_vb ^= QB_VALID_BIT;
505
506         return 0;
507 }
508
509 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
510                       const struct qbman_fd *fd)
511 {
512         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
513                 return qbman_swp_enqueue_array_mode(s, d, fd);
514         else    /* Use ring mode by default */
515                 return qbman_swp_enqueue_ring_mode(s, d, fd);
516 }
517
518 int qbman_swp_enqueue_multiple(struct qbman_swp *s,
519                                const struct qbman_eq_desc *d,
520                                const struct qbman_fd *fd,
521                                int num_frames)
522 {
523         uint32_t *p;
524         const uint32_t *cl = qb_cl(d);
525         uint32_t eqcr_ci, eqcr_pi;
526         uint8_t diff;
527         int i, num_enqueued = 0;
528         uint64_t addr_cena;
529
530         if (!s->eqcr.available) {
531                 eqcr_ci = s->eqcr.ci;
532                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
533                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
534                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
535                                    eqcr_ci, s->eqcr.ci);
536                 s->eqcr.available += diff;
537                 if (!diff)
538                         return 0;
539         }
540
541         eqcr_pi = s->eqcr.pi;
542         num_enqueued = (s->eqcr.available < num_frames) ?
543                         s->eqcr.available : num_frames;
544         s->eqcr.available -= num_enqueued;
545         /* Fill in the EQCR ring */
546         for (i = 0; i < num_enqueued; i++) {
547                 p = qbman_cena_write_start_wo_shadow(&s->sys,
548                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
549                 memcpy(&p[1], &cl[1], 28);
550                 memcpy(&p[8], &fd[i], sizeof(*fd));
551                 eqcr_pi++;
552                 eqcr_pi &= 0xF;
553         }
554
555         lwsync();
556
557         /* Set the verb byte, have to substitute in the valid-bit */
558         eqcr_pi = s->eqcr.pi;
559         for (i = 0; i < num_enqueued; i++) {
560                 p = qbman_cena_write_start_wo_shadow(&s->sys,
561                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
562                 p[0] = cl[0] | s->eqcr.pi_vb;
563                 eqcr_pi++;
564                 eqcr_pi &= 0xF;
565                 if (!(eqcr_pi & 7))
566                         s->eqcr.pi_vb ^= QB_VALID_BIT;
567         }
568
569         /* Flush all the cacheline without load/store in between */
570         eqcr_pi = s->eqcr.pi;
571         addr_cena = (uint64_t)s->sys.addr_cena;
572         for (i = 0; i < num_enqueued; i++) {
573                 dcbf((uint64_t *)(addr_cena +
574                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
575                 eqcr_pi++;
576                 eqcr_pi &= 0xF;
577         }
578         s->eqcr.pi = eqcr_pi;
579
580         return num_enqueued;
581 }
582
583 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
584                                     const struct qbman_eq_desc *d,
585                                     const struct qbman_fd *fd,
586                                     int num_frames)
587 {
588         uint32_t *p;
589         const uint32_t *cl;
590         uint32_t eqcr_ci, eqcr_pi;
591         uint8_t diff;
592         int i, num_enqueued = 0;
593         uint64_t addr_cena;
594
595         if (!s->eqcr.available) {
596                 eqcr_ci = s->eqcr.ci;
597                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
598                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
599                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
600                                    eqcr_ci, s->eqcr.ci);
601                 s->eqcr.available += diff;
602                 if (!diff)
603                         return 0;
604         }
605
606         eqcr_pi = s->eqcr.pi;
607         num_enqueued = (s->eqcr.available < num_frames) ?
608                         s->eqcr.available : num_frames;
609         s->eqcr.available -= num_enqueued;
610         /* Fill in the EQCR ring */
611         for (i = 0; i < num_enqueued; i++) {
612                 p = qbman_cena_write_start_wo_shadow(&s->sys,
613                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
614                 cl = qb_cl(&d[i]);
615                 memcpy(&p[1], &cl[1], 28);
616                 memcpy(&p[8], &fd[i], sizeof(*fd));
617                 eqcr_pi++;
618                 eqcr_pi &= 0xF;
619         }
620
621         lwsync();
622
623         /* Set the verb byte, have to substitute in the valid-bit */
624         eqcr_pi = s->eqcr.pi;
625         for (i = 0; i < num_enqueued; i++) {
626                 p = qbman_cena_write_start_wo_shadow(&s->sys,
627                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
628                 cl = qb_cl(&d[i]);
629                 p[0] = cl[0] | s->eqcr.pi_vb;
630                 eqcr_pi++;
631                 eqcr_pi &= 0xF;
632                 if (!(eqcr_pi & 7))
633                         s->eqcr.pi_vb ^= QB_VALID_BIT;
634         }
635
636         /* Flush all the cacheline without load/store in between */
637         eqcr_pi = s->eqcr.pi;
638         addr_cena = (uint64_t)s->sys.addr_cena;
639         for (i = 0; i < num_enqueued; i++) {
640                 dcbf((uint64_t *)(addr_cena +
641                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
642                 eqcr_pi++;
643                 eqcr_pi &= 0xF;
644         }
645         s->eqcr.pi = eqcr_pi;
646
647         return num_enqueued;
648 }
649
650 /*************************/
651 /* Static (push) dequeue */
652 /*************************/
653
654 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
655 {
656         uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
657
658         QBMAN_BUG_ON(channel_idx > 15);
659         *enabled = src | (1 << channel_idx);
660 }
661
662 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
663 {
664         uint16_t dqsrc;
665
666         QBMAN_BUG_ON(channel_idx > 15);
667         if (enable)
668                 s->sdq |= 1 << channel_idx;
669         else
670                 s->sdq &= ~(1 << channel_idx);
671
672         /* Read make the complete src map.  If no channels are enabled
673          * the SDQCR must be 0 or else QMan will assert errors
674          */
675         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
676         if (dqsrc != 0)
677                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
678         else
679                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
680 }
681
682 /***************************/
683 /* Volatile (pull) dequeue */
684 /***************************/
685
686 /* These should be const, eventually */
687 #define QB_VDQCR_VERB_DCT_SHIFT    0
688 #define QB_VDQCR_VERB_DT_SHIFT     2
689 #define QB_VDQCR_VERB_RLS_SHIFT    4
690 #define QB_VDQCR_VERB_WAE_SHIFT    5
691
692 enum qb_pull_dt_e {
693         qb_pull_dt_channel,
694         qb_pull_dt_workqueue,
695         qb_pull_dt_framequeue
696 };
697
698 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
699 {
700         memset(d, 0, sizeof(*d));
701 }
702
703 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
704                                  struct qbman_result *storage,
705                                  dma_addr_t storage_phys,
706                                  int stash)
707 {
708         d->pull.rsp_addr_virt = (uint64_t)storage;
709
710         if (!storage) {
711                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
712                 return;
713         }
714         d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
715         if (stash)
716                 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
717         else
718                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
719
720         d->pull.rsp_addr = storage_phys;
721 }
722
723 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
724 {
725         d->pull.numf = numframes - 1;
726 }
727
728 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
729 {
730         d->pull.tok = token;
731 }
732
733 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
734 {
735         d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
736         d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
737         d->pull.dq_src = fqid;
738 }
739
740 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
741                             enum qbman_pull_type_e dct)
742 {
743         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
744         d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
745         d->pull.dq_src = wqid;
746 }
747
748 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
749                                  enum qbman_pull_type_e dct)
750 {
751         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
752         d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
753         d->pull.dq_src = chid;
754 }
755
756 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
757 {
758         uint32_t *p;
759         uint32_t *cl = qb_cl(d);
760
761         if (!atomic_dec_and_test(&s->vdq.busy)) {
762                 atomic_inc(&s->vdq.busy);
763                 return -EBUSY;
764         }
765
766         d->pull.tok = s->sys.idx + 1;
767         s->vdq.storage = (void *)d->pull.rsp_addr_virt;
768         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
769         memcpy(&p[1], &cl[1], 12);
770
771         /* Set the verb byte, have to substitute in the valid-bit */
772         lwsync();
773         p[0] = cl[0] | s->vdq.valid_bit;
774         s->vdq.valid_bit ^= QB_VALID_BIT;
775         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
776
777         return 0;
778 }
779
780 /****************/
781 /* Polling DQRR */
782 /****************/
783
784 #define QMAN_DQRR_PI_MASK              0xf
785
786 #define QBMAN_RESULT_DQ        0x60
787 #define QBMAN_RESULT_FQRN      0x21
788 #define QBMAN_RESULT_FQRNI     0x22
789 #define QBMAN_RESULT_FQPN      0x24
790 #define QBMAN_RESULT_FQDAN     0x25
791 #define QBMAN_RESULT_CDAN      0x26
792 #define QBMAN_RESULT_CSCN_MEM  0x27
793 #define QBMAN_RESULT_CGCU      0x28
794 #define QBMAN_RESULT_BPSCN     0x29
795 #define QBMAN_RESULT_CSCN_WQ   0x2a
796
797 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
798  * only once, so repeated calls can return a sequence of DQRR entries, without
799  * requiring they be consumed immediately or in any particular order.
800  */
801 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
802 {
803         uint32_t verb;
804         uint32_t response_verb;
805         uint32_t flags;
806         const struct qbman_result *p;
807
808         /* Before using valid-bit to detect if something is there, we have to
809          * handle the case of the DQRR reset bug...
810          */
811         if (unlikely(s->dqrr.reset_bug)) {
812                 /* We pick up new entries by cache-inhibited producer index,
813                  * which means that a non-coherent mapping would require us to
814                  * invalidate and read *only* once that PI has indicated that
815                  * there's an entry here. The first trip around the DQRR ring
816                  * will be much less efficient than all subsequent trips around
817                  * it...
818                  */
819                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
820                              QMAN_DQRR_PI_MASK;
821
822                 /* there are new entries if pi != next_idx */
823                 if (pi == s->dqrr.next_idx)
824                         return NULL;
825
826                 /* if next_idx is/was the last ring index, and 'pi' is
827                  * different, we can disable the workaround as all the ring
828                  * entries have now been DMA'd to so valid-bit checking is
829                  * repaired. Note: this logic needs to be based on next_idx
830                  * (which increments one at a time), rather than on pi (which
831                  * can burst and wrap-around between our snapshots of it).
832                  */
833                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
834                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
835                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
836                                  s->dqrr.next_idx, pi);
837                         s->dqrr.reset_bug = 0;
838                 }
839                 qbman_cena_invalidate_prefetch(&s->sys,
840                                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
841         }
842         p = qbman_cena_read_wo_shadow(&s->sys,
843                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
844         verb = p->dq.verb;
845
846         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
847          * in the DQRR reset bug workaround, we shouldn't need to skip these
848          * check, because we've already determined that a new entry is available
849          * and we've invalidated the cacheline before reading it, so the
850          * valid-bit behaviour is repaired and should tell us what we already
851          * knew from reading PI.
852          */
853         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
854                 return NULL;
855
856         /* There's something there. Move "next_idx" attention to the next ring
857          * entry (and prefetch it) before returning what we found.
858          */
859         s->dqrr.next_idx++;
860         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
861                 s->dqrr.next_idx = 0;
862                 s->dqrr.valid_bit ^= QB_VALID_BIT;
863         }
864         /* If this is the final response to a volatile dequeue command
865          * indicate that the vdq is no longer busy
866          */
867         flags = p->dq.stat;
868         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
869         if ((response_verb == QBMAN_RESULT_DQ) &&
870             (flags & QBMAN_DQ_STAT_VOLATILE) &&
871             (flags & QBMAN_DQ_STAT_EXPIRED))
872                 atomic_inc(&s->vdq.busy);
873
874         return p;
875 }
876
877 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
878 void qbman_swp_dqrr_consume(struct qbman_swp *s,
879                             const struct qbman_result *dq)
880 {
881         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
882 }
883
884 /*********************************/
885 /* Polling user-provided storage */
886 /*********************************/
887 int qbman_result_has_new_result(struct qbman_swp *s,
888                                 struct qbman_result *dq)
889 {
890         if (dq->dq.tok == 0)
891                 return 0;
892
893         /*
894          * Set token to be 0 so we will detect change back to 1
895          * next time the looping is traversed. Const is cast away here
896          * as we want users to treat the dequeue responses as read only.
897          */
898         ((struct qbman_result *)dq)->dq.tok = 0;
899
900         /*
901          * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
902          * fact "VDQCR" shows busy doesn't mean that we hold the result that
903          * makes it available. Eg. we may be looking at our 10th dequeue result,
904          * having released VDQCR after the 1st result and it is now busy due to
905          * some other command!
906          */
907         if (s->vdq.storage == dq) {
908                 s->vdq.storage = NULL;
909                 atomic_inc(&s->vdq.busy);
910         }
911
912         return 1;
913 }
914
915 int qbman_check_new_result(struct qbman_result *dq)
916 {
917         if (dq->dq.tok == 0)
918                 return 0;
919
920         /*
921          * Set token to be 0 so we will detect change back to 1
922          * next time the looping is traversed. Const is cast away here
923          * as we want users to treat the dequeue responses as read only.
924          */
925         ((struct qbman_result *)dq)->dq.tok = 0;
926
927         return 1;
928 }
929
930 int qbman_check_command_complete(struct qbman_result *dq)
931 {
932         struct qbman_swp *s;
933
934         if (dq->dq.tok == 0)
935                 return 0;
936
937         s = portal_idx_map[dq->dq.tok - 1];
938         /*
939          * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
940          * fact "VDQCR" shows busy doesn't mean that we hold the result that
941          * makes it available. Eg. we may be looking at our 10th dequeue result,
942          * having released VDQCR after the 1st result and it is now busy due to
943          * some other command!
944          */
945         if (s->vdq.storage == dq) {
946                 s->vdq.storage = NULL;
947                 atomic_inc(&s->vdq.busy);
948         }
949
950         return 1;
951 }
952
953 /********************************/
954 /* Categorising qbman results   */
955 /********************************/
956
957 static inline int __qbman_result_is_x(const struct qbman_result *dq,
958                                       uint8_t x)
959 {
960         uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
961
962         return (response_verb == x);
963 }
964
965 int qbman_result_is_DQ(const struct qbman_result *dq)
966 {
967         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
968 }
969
970 int qbman_result_is_FQDAN(const struct qbman_result *dq)
971 {
972         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
973 }
974
975 int qbman_result_is_CDAN(const struct qbman_result *dq)
976 {
977         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
978 }
979
980 int qbman_result_is_CSCN(const struct qbman_result *dq)
981 {
982         return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
983                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
984 }
985
986 int qbman_result_is_BPSCN(const struct qbman_result *dq)
987 {
988         return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
989 }
990
991 int qbman_result_is_CGCU(const struct qbman_result *dq)
992 {
993         return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
994 }
995
996 int qbman_result_is_FQRN(const struct qbman_result *dq)
997 {
998         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
999 }
1000
1001 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1002 {
1003         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1004 }
1005
1006 int qbman_result_is_FQPN(const struct qbman_result *dq)
1007 {
1008         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1009 }
1010
1011 /*********************************/
1012 /* Parsing frame dequeue results */
1013 /*********************************/
1014
1015 /* These APIs assume qbman_result_is_DQ() is TRUE */
1016
1017 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1018 {
1019         return dq->dq.stat;
1020 }
1021
1022 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1023 {
1024         return dq->dq.seqnum;
1025 }
1026
1027 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1028 {
1029         return dq->dq.oprid;
1030 }
1031
1032 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1033 {
1034         return dq->dq.fqid;
1035 }
1036
1037 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1038 {
1039         return dq->dq.fq_byte_cnt;
1040 }
1041
1042 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1043 {
1044         return dq->dq.fq_frm_cnt;
1045 }
1046
1047 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1048 {
1049         return dq->dq.fqd_ctx;
1050 }
1051
1052 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1053 {
1054         return (const struct qbman_fd *)&dq->dq.fd[0];
1055 }
1056
1057 /**************************************/
1058 /* Parsing state-change notifications */
1059 /**************************************/
1060 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1061 {
1062         return scn->scn.state;
1063 }
1064
1065 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1066 {
1067         return scn->scn.rid_tok;
1068 }
1069
1070 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1071 {
1072         return scn->scn.ctx;
1073 }
1074
1075 /*****************/
1076 /* Parsing BPSCN */
1077 /*****************/
1078 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1079 {
1080         return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1081 }
1082
1083 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1084 {
1085         return !(int)(qbman_result_SCN_state(scn) & 0x1);
1086 }
1087
1088 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1089 {
1090         return (int)(qbman_result_SCN_state(scn) & 0x2);
1091 }
1092
1093 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1094 {
1095         return (int)(qbman_result_SCN_state(scn) & 0x4);
1096 }
1097
1098 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1099 {
1100         return qbman_result_SCN_ctx(scn);
1101 }
1102
1103 /*****************/
1104 /* Parsing CGCU  */
1105 /*****************/
1106 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1107 {
1108         return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
1109 }
1110
1111 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1112 {
1113         return qbman_result_SCN_ctx(scn);
1114 }
1115
1116 /******************/
1117 /* Buffer release */
1118 /******************/
1119 #define QB_BR_RC_VALID_SHIFT  5
1120 #define QB_BR_RCDI_SHIFT      6
1121
1122 void qbman_release_desc_clear(struct qbman_release_desc *d)
1123 {
1124         memset(d, 0, sizeof(*d));
1125         d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
1126 }
1127
1128 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
1129 {
1130         d->br.bpid = bpid;
1131 }
1132
1133 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1134 {
1135         if (enable)
1136                 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
1137         else
1138                 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
1139 }
1140
1141 #define RAR_IDX(rar)     ((rar) & 0x7)
1142 #define RAR_VB(rar)      ((rar) & 0x80)
1143 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1144
1145 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1146                       const uint64_t *buffers, unsigned int num_buffers)
1147 {
1148         uint32_t *p;
1149         const uint32_t *cl = qb_cl(d);
1150         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1151
1152         pr_debug("RAR=%08x\n", rar);
1153         if (!RAR_SUCCESS(rar))
1154                 return -EBUSY;
1155
1156         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1157
1158         /* Start the release command */
1159         p = qbman_cena_write_start_wo_shadow(&s->sys,
1160                                              QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1161
1162         /* Copy the caller's buffer pointers to the command */
1163         u64_to_le32_copy(&p[2], buffers, num_buffers);
1164
1165         /* Set the verb byte, have to substitute in the valid-bit and the number
1166          * of buffers.
1167          */
1168         lwsync();
1169         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1170         qbman_cena_write_complete_wo_shadow(&s->sys,
1171                                             QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1172
1173         return 0;
1174 }
1175
1176 /*******************/
1177 /* Buffer acquires */
1178 /*******************/
1179 struct qbman_acquire_desc {
1180         uint8_t verb;
1181         uint8_t reserved;
1182         uint16_t bpid;
1183         uint8_t num;
1184         uint8_t reserved2[59];
1185 };
1186
1187 struct qbman_acquire_rslt {
1188         uint8_t verb;
1189         uint8_t rslt;
1190         uint16_t reserved;
1191         uint8_t num;
1192         uint8_t reserved2[3];
1193         uint64_t buf[7];
1194 };
1195
1196 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1197                       unsigned int num_buffers)
1198 {
1199         struct qbman_acquire_desc *p;
1200         struct qbman_acquire_rslt *r;
1201
1202         if (!num_buffers || (num_buffers > 7))
1203                 return -EINVAL;
1204
1205         /* Start the management command */
1206         p = qbman_swp_mc_start(s);
1207
1208         if (!p)
1209                 return -EBUSY;
1210
1211         /* Encode the caller-provided attributes */
1212         p->bpid = bpid;
1213         p->num = num_buffers;
1214
1215         /* Complete the management command */
1216         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1217         if (unlikely(!r)) {
1218                 pr_err("qbman: acquire from BPID %d failed, no response\n",
1219                        bpid);
1220                 return -EIO;
1221         }
1222
1223         /* Decode the outcome */
1224         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
1225
1226         /* Determine success or failure */
1227         if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1228                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1229                        bpid, r->rslt);
1230                 return -EIO;
1231         }
1232
1233         QBMAN_BUG_ON(r->num > num_buffers);
1234
1235         /* Copy the acquired buffers to the caller's array */
1236         u64_from_le32_copy(buffers, &r->buf[0], r->num);
1237
1238         return (int)r->num;
1239 }
1240
1241 /*****************/
1242 /* FQ management */
1243 /*****************/
1244 struct qbman_alt_fq_state_desc {
1245         uint8_t verb;
1246         uint8_t reserved[3];
1247         uint32_t fqid;
1248         uint8_t reserved2[56];
1249 };
1250
1251 struct qbman_alt_fq_state_rslt {
1252         uint8_t verb;
1253         uint8_t rslt;
1254         uint8_t reserved[62];
1255 };
1256
1257 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1258
1259 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1260                                   uint8_t alt_fq_verb)
1261 {
1262         struct qbman_alt_fq_state_desc *p;
1263         struct qbman_alt_fq_state_rslt *r;
1264
1265         /* Start the management command */
1266         p = qbman_swp_mc_start(s);
1267         if (!p)
1268                 return -EBUSY;
1269
1270         p->fqid = fqid & ALT_FQ_FQID_MASK;
1271
1272         /* Complete the management command */
1273         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1274         if (unlikely(!r)) {
1275                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1276                        alt_fq_verb);
1277                 return -EIO;
1278         }
1279
1280         /* Decode the outcome */
1281         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
1282
1283         /* Determine success or failure */
1284         if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1285                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1286                        fqid, alt_fq_verb, r->rslt);
1287                 return -EIO;
1288         }
1289
1290         return 0;
1291 }
1292
1293 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1294 {
1295         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1296 }
1297
1298 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1299 {
1300         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1301 }
1302
1303 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1304 {
1305         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1306 }
1307
1308 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1309 {
1310         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1311 }
1312
1313 /**********************/
1314 /* Channel management */
1315 /**********************/
1316
1317 struct qbman_cdan_ctrl_desc {
1318         uint8_t verb;
1319         uint8_t reserved;
1320         uint16_t ch;
1321         uint8_t we;
1322         uint8_t ctrl;
1323         uint16_t reserved2;
1324         uint64_t cdan_ctx;
1325         uint8_t reserved3[48];
1326
1327 };
1328
1329 struct qbman_cdan_ctrl_rslt {
1330         uint8_t verb;
1331         uint8_t rslt;
1332         uint16_t ch;
1333         uint8_t reserved[60];
1334 };
1335
1336 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1337  * would be irresponsible to expose it.
1338  */
1339 #define CODE_CDAN_WE_EN    0x1
1340 #define CODE_CDAN_WE_CTX   0x4
1341
1342 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1343                               uint8_t we_mask, uint8_t cdan_en,
1344                               uint64_t ctx)
1345 {
1346         struct qbman_cdan_ctrl_desc *p;
1347         struct qbman_cdan_ctrl_rslt *r;
1348
1349         /* Start the management command */
1350         p = qbman_swp_mc_start(s);
1351         if (!p)
1352                 return -EBUSY;
1353
1354         /* Encode the caller-provided attributes */
1355         p->ch = channelid;
1356         p->we = we_mask;
1357         if (cdan_en)
1358                 p->ctrl = 1;
1359         else
1360                 p->ctrl = 0;
1361         p->cdan_ctx = ctx;
1362
1363         /* Complete the management command */
1364         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1365         if (unlikely(!r)) {
1366                 pr_err("qbman: wqchan config failed, no response\n");
1367                 return -EIO;
1368         }
1369
1370         /* Decode the outcome */
1371         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
1372                      != QBMAN_WQCHAN_CONFIGURE);
1373
1374         /* Determine success or failure */
1375         if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1376                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1377                        channelid, r->rslt);
1378                 return -EIO;
1379         }
1380
1381         return 0;
1382 }
1383
1384 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1385                                uint64_t ctx)
1386 {
1387         return qbman_swp_CDAN_set(s, channelid,
1388                                   CODE_CDAN_WE_CTX,
1389                                   0, ctx);
1390 }
1391
1392 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1393 {
1394         return qbman_swp_CDAN_set(s, channelid,
1395                                   CODE_CDAN_WE_EN,
1396                                   1, 0);
1397 }
1398
1399 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1400 {
1401         return qbman_swp_CDAN_set(s, channelid,
1402                                   CODE_CDAN_WE_EN,
1403                                   0, 0);
1404 }
1405
1406 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1407                                       uint64_t ctx)
1408 {
1409         return qbman_swp_CDAN_set(s, channelid,
1410                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1411                                   1, ctx);
1412 }
1413
1414 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1415 {
1416         return QBMAN_IDX_FROM_DQRR(dqrr);
1417 }
1418
1419 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1420 {
1421         struct qbman_result *dq;
1422
1423         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
1424         return dq;
1425 }