New upstream version 18.02
[deb_dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  *
5  */
6
7 #include "qbman_portal.h"
8
9 /* QBMan portal management command codes */
10 #define QBMAN_MC_ACQUIRE       0x30
11 #define QBMAN_WQCHAN_CONFIGURE 0x46
12
13 /* CINH register offsets */
14 #define QBMAN_CINH_SWP_EQCR_PI 0x800
15 #define QBMAN_CINH_SWP_EQCR_CI 0x840
16 #define QBMAN_CINH_SWP_EQAR    0x8c0
17 #define QBMAN_CINH_SWP_DQPI    0xa00
18 #define QBMAN_CINH_SWP_DCAP    0xac0
19 #define QBMAN_CINH_SWP_SDQCR   0xb00
20 #define QBMAN_CINH_SWP_RAR     0xcc0
21 #define QBMAN_CINH_SWP_ISR     0xe00
22 #define QBMAN_CINH_SWP_IER     0xe40
23 #define QBMAN_CINH_SWP_ISDR    0xe80
24 #define QBMAN_CINH_SWP_IIR     0xec0
25 #define QBMAN_CINH_SWP_DQRR_ITR    0xa80
26 #define QBMAN_CINH_SWP_ITPR    0xf40
27
28 /* CENA register offsets */
29 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
30 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
31 #define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((uint32_t)(n) << 6))
32 #define QBMAN_CENA_SWP_CR      0x600
33 #define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((uint32_t)(vb) >> 1))
34 #define QBMAN_CENA_SWP_VDQCR   0x780
35 #define QBMAN_CENA_SWP_EQCR_CI 0x840
36
37 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
38 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
39
40 /* QBMan FQ management command codes */
41 #define QBMAN_FQ_SCHEDULE       0x48
42 #define QBMAN_FQ_FORCE          0x49
43 #define QBMAN_FQ_XON            0x4d
44 #define QBMAN_FQ_XOFF           0x4e
45
46 /*******************************/
47 /* Pre-defined attribute codes */
48 /*******************************/
49
50 #define QBMAN_RESPONSE_VERB_MASK   0x7f
51
52 /*************************/
53 /* SDQCR attribute codes */
54 /*************************/
55 #define QB_SDQCR_FC_SHIFT   29
56 #define QB_SDQCR_FC_MASK    0x1
57 #define QB_SDQCR_DCT_SHIFT  24
58 #define QB_SDQCR_DCT_MASK   0x3
59 #define QB_SDQCR_TOK_SHIFT  16
60 #define QB_SDQCR_TOK_MASK   0xff
61 #define QB_SDQCR_SRC_SHIFT  0
62 #define QB_SDQCR_SRC_MASK   0xffff
63
64 /* opaque token for static dequeues */
65 #define QMAN_SDQCR_TOKEN    0xbb
66
67 enum qbman_sdqcr_dct {
68         qbman_sdqcr_dct_null = 0,
69         qbman_sdqcr_dct_prio_ics,
70         qbman_sdqcr_dct_active_ics,
71         qbman_sdqcr_dct_active
72 };
73
74 enum qbman_sdqcr_fc {
75         qbman_sdqcr_fc_one = 0,
76         qbman_sdqcr_fc_up_to_3 = 1
77 };
78
79 /* We need to keep track of which SWP triggered a pull command
80  * so keep an array of portal IDs and use the token field to
81  * be able to find the proper portal
82  */
83 #define MAX_QBMAN_PORTALS  64
84 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
85
86 /*********************************/
87 /* Portal constructor/destructor */
88 /*********************************/
89
90 /* Software portals should always be in the power-on state when we initialise,
91  * due to the CCSR-based portal reset functionality that MC has.
92  *
93  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
94  * valid-bits, so we need to support a workaround where we don't trust
95  * valid-bits when detecting new entries until any stale ring entries have been
96  * overwritten at least once. The idea is that we read PI for the first few
97  * entries, then switch to valid-bit after that. The trick is to clear the
98  * bug-work-around boolean once the PI wraps around the ring for the first time.
99  *
100  * Note: this still carries a slight additional cost once the decrementer hits
101  * zero.
102  */
103 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
104 {
105         int ret;
106         uint32_t eqcr_pi;
107         struct qbman_swp *p = malloc(sizeof(*p));
108
109         if (!p)
110                 return NULL;
111         p->desc = *d;
112 #ifdef QBMAN_CHECKING
113         p->mc.check = swp_mc_can_start;
114 #endif
115         p->mc.valid_bit = QB_VALID_BIT;
116         p->sdq = 0;
117         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
118         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
119         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
120
121         atomic_set(&p->vdq.busy, 1);
122         p->vdq.valid_bit = QB_VALID_BIT;
123         p->dqrr.next_idx = 0;
124         p->dqrr.valid_bit = QB_VALID_BIT;
125         qman_version = p->desc.qman_version;
126         if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
127                 p->dqrr.dqrr_size = 4;
128                 p->dqrr.reset_bug = 1;
129         } else {
130                 p->dqrr.dqrr_size = 8;
131                 p->dqrr.reset_bug = 0;
132         }
133
134         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
135         if (ret) {
136                 free(p);
137                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
138                 return NULL;
139         }
140         /* SDQCR needs to be initialized to 0 when no channels are
141          * being dequeued from or else the QMan HW will indicate an
142          * error.  The values that were calculated above will be
143          * applied when dequeues from a specific channel are enabled.
144          */
145         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
146         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
147         p->eqcr.pi = eqcr_pi & 0xF;
148         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
149         p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
150         p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
151                                                 p->eqcr.ci, p->eqcr.pi);
152
153         portal_idx_map[p->desc.idx] = p;
154         return p;
155 }
156
157 void qbman_swp_finish(struct qbman_swp *p)
158 {
159 #ifdef QBMAN_CHECKING
160         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
161 #endif
162         qbman_swp_sys_finish(&p->sys);
163         portal_idx_map[p->desc.idx] = NULL;
164         free(p);
165 }
166
167 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
168 {
169         return &p->desc;
170 }
171
172 /**************/
173 /* Interrupts */
174 /**************/
175
176 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
177 {
178         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
179 }
180
181 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
182 {
183         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
184 }
185
186 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
187 {
188         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
189 }
190
191 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
192 {
193         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
194 }
195
196 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
197 {
198         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
199 }
200
201 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
202 {
203         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
204 }
205
206 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
207 {
208         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
209 }
210
211 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
212 {
213         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
214 }
215
216 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
217 {
218         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
219 }
220
221 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
222 {
223         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
224 }
225
226 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
227 {
228         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
229 }
230
231 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
232 {
233         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
234 }
235
236 /***********************/
237 /* Management commands */
238 /***********************/
239
240 /*
241  * Internal code common to all types of management commands.
242  */
243
244 void *qbman_swp_mc_start(struct qbman_swp *p)
245 {
246         void *ret;
247 #ifdef QBMAN_CHECKING
248         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
249 #endif
250         ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
251 #ifdef QBMAN_CHECKING
252         if (!ret)
253                 p->mc.check = swp_mc_can_submit;
254 #endif
255         return ret;
256 }
257
258 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
259 {
260         uint8_t *v = cmd;
261 #ifdef QBMAN_CHECKING
262         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
263 #endif
264         /* TBD: "|=" is going to hurt performance. Need to move as many fields
265          * out of word zero, and for those that remain, the "OR" needs to occur
266          * at the caller side. This debug check helps to catch cases where the
267          * caller wants to OR but has forgotten to do so.
268          */
269         QBMAN_BUG_ON((*v & cmd_verb) != *v);
270         *v = cmd_verb | p->mc.valid_bit;
271         qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
272 #ifdef QBMAN_CHECKING
273         p->mc.check = swp_mc_can_poll;
274 #endif
275 }
276
277 void *qbman_swp_mc_result(struct qbman_swp *p)
278 {
279         uint32_t *ret, verb;
280 #ifdef QBMAN_CHECKING
281         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
282 #endif
283         qbman_cena_invalidate_prefetch(&p->sys,
284                                        QBMAN_CENA_SWP_RR(p->mc.valid_bit));
285         ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
286         /* Remove the valid-bit - command completed if the rest is non-zero */
287         verb = ret[0] & ~QB_VALID_BIT;
288         if (!verb)
289                 return NULL;
290 #ifdef QBMAN_CHECKING
291         p->mc.check = swp_mc_can_start;
292 #endif
293         p->mc.valid_bit ^= QB_VALID_BIT;
294         return ret;
295 }
296
297 /***********/
298 /* Enqueue */
299 /***********/
300
301 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
302 enum qb_enqueue_commands {
303         enqueue_empty = 0,
304         enqueue_response_always = 1,
305         enqueue_rejects_to_fq = 2
306 };
307
308 #define QB_ENQUEUE_CMD_EC_OPTION_MASK        0x3
309 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
310 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
311 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
312 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT          6
313 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
314 #define QB_ENQUEUE_CMD_NLIS_SHIFT            14
315 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT         15
316
317 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
318 {
319         memset(d, 0, sizeof(*d));
320 }
321
322 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
323 {
324         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
325         if (respond_success)
326                 d->eq.verb |= enqueue_response_always;
327         else
328                 d->eq.verb |= enqueue_rejects_to_fq;
329 }
330
331 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
332                            uint16_t opr_id, uint16_t seqnum, int incomplete)
333 {
334         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
335         if (respond_success)
336                 d->eq.verb |= enqueue_response_always;
337         else
338                 d->eq.verb |= enqueue_rejects_to_fq;
339
340         d->eq.orpid = opr_id;
341         d->eq.seqnum = seqnum;
342         if (incomplete)
343                 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
344         else
345                 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
346 }
347
348 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
349                                 uint16_t seqnum)
350 {
351         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
352         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
353         d->eq.orpid = opr_id;
354         d->eq.seqnum = seqnum;
355         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
356         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
357 }
358
359 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
360                                 uint16_t seqnum)
361 {
362         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
363         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
364         d->eq.orpid = opr_id;
365         d->eq.seqnum = seqnum;
366         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
367         d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
368 }
369
370 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
371                                 dma_addr_t storage_phys,
372                                 int stash)
373 {
374         d->eq.rsp_addr = storage_phys;
375         d->eq.wae = stash;
376 }
377
378 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
379 {
380         d->eq.rspid = token;
381 }
382
383 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
384 {
385         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
386         d->eq.tgtid = fqid;
387 }
388
389 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
390                           uint16_t qd_bin, uint8_t qd_prio)
391 {
392         d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
393         d->eq.tgtid = qdid;
394         d->eq.qdbin = qd_bin;
395         d->eq.qpri = qd_prio;
396 }
397
398 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
399 {
400         if (enable)
401                 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
402         else
403                 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
404 }
405
406 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
407                            uint8_t dqrr_idx, int park)
408 {
409         if (enable) {
410                 d->eq.dca = dqrr_idx;
411                 if (park)
412                         d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
413                 else
414                         d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
415                 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
416         } else {
417                 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
418         }
419 }
420
421 #define EQAR_IDX(eqar)     ((eqar) & 0x7)
422 #define EQAR_VB(eqar)      ((eqar) & 0x80)
423 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
424
425 static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
426                                         const struct qbman_eq_desc *d,
427                                         const struct qbman_fd *fd)
428 {
429         uint32_t *p;
430         const uint32_t *cl = qb_cl(d);
431         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
432
433         pr_debug("EQAR=%08x\n", eqar);
434         if (!EQAR_SUCCESS(eqar))
435                 return -EBUSY;
436         p = qbman_cena_write_start_wo_shadow(&s->sys,
437                                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
438         memcpy(&p[1], &cl[1], 28);
439         memcpy(&p[8], fd, sizeof(*fd));
440         /* Set the verb byte, have to substitute in the valid-bit */
441         lwsync();
442         p[0] = cl[0] | EQAR_VB(eqar);
443         qbman_cena_write_complete_wo_shadow(&s->sys,
444                                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
445         return 0;
446 }
447
448 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
449                                        const struct qbman_eq_desc *d,
450                                        const struct qbman_fd *fd)
451 {
452         uint32_t *p;
453         const uint32_t *cl = qb_cl(d);
454         uint32_t eqcr_ci;
455         uint8_t diff;
456
457         if (!s->eqcr.available) {
458                 eqcr_ci = s->eqcr.ci;
459                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
460                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
461                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
462                                    eqcr_ci, s->eqcr.ci);
463                 s->eqcr.available += diff;
464                 if (!diff)
465                         return -EBUSY;
466         }
467
468         p = qbman_cena_write_start_wo_shadow(&s->sys,
469                                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
470         memcpy(&p[1], &cl[1], 28);
471         memcpy(&p[8], fd, sizeof(*fd));
472         lwsync();
473
474         /* Set the verb byte, have to substitute in the valid-bit */
475         p[0] = cl[0] | s->eqcr.pi_vb;
476         qbman_cena_write_complete_wo_shadow(&s->sys,
477                                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
478         s->eqcr.pi++;
479         s->eqcr.pi &= 0xF;
480         s->eqcr.available--;
481         if (!(s->eqcr.pi & 7))
482                 s->eqcr.pi_vb ^= QB_VALID_BIT;
483
484         return 0;
485 }
486
487 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
488                       const struct qbman_fd *fd)
489 {
490         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
491                 return qbman_swp_enqueue_array_mode(s, d, fd);
492         else    /* Use ring mode by default */
493                 return qbman_swp_enqueue_ring_mode(s, d, fd);
494 }
495
496 int qbman_swp_enqueue_multiple(struct qbman_swp *s,
497                                const struct qbman_eq_desc *d,
498                                const struct qbman_fd *fd,
499                                uint32_t *flags,
500                                int num_frames)
501 {
502         uint32_t *p;
503         const uint32_t *cl = qb_cl(d);
504         uint32_t eqcr_ci, eqcr_pi;
505         uint8_t diff;
506         int i, num_enqueued = 0;
507         uint64_t addr_cena;
508
509         if (!s->eqcr.available) {
510                 eqcr_ci = s->eqcr.ci;
511                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
512                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
513                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
514                                    eqcr_ci, s->eqcr.ci);
515                 s->eqcr.available += diff;
516                 if (!diff)
517                         return 0;
518         }
519
520         eqcr_pi = s->eqcr.pi;
521         num_enqueued = (s->eqcr.available < num_frames) ?
522                         s->eqcr.available : num_frames;
523         s->eqcr.available -= num_enqueued;
524         /* Fill in the EQCR ring */
525         for (i = 0; i < num_enqueued; i++) {
526                 p = qbman_cena_write_start_wo_shadow(&s->sys,
527                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
528                 memcpy(&p[1], &cl[1], 28);
529                 memcpy(&p[8], &fd[i], sizeof(*fd));
530                 eqcr_pi++;
531                 eqcr_pi &= 0xF;
532         }
533
534         lwsync();
535
536         /* Set the verb byte, have to substitute in the valid-bit */
537         eqcr_pi = s->eqcr.pi;
538         for (i = 0; i < num_enqueued; i++) {
539                 p = qbman_cena_write_start_wo_shadow(&s->sys,
540                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
541                 p[0] = cl[0] | s->eqcr.pi_vb;
542                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
543                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
544
545                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
546                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
547                 }
548                 eqcr_pi++;
549                 eqcr_pi &= 0xF;
550                 if (!(eqcr_pi & 7))
551                         s->eqcr.pi_vb ^= QB_VALID_BIT;
552         }
553
554         /* Flush all the cacheline without load/store in between */
555         eqcr_pi = s->eqcr.pi;
556         addr_cena = (uint64_t)s->sys.addr_cena;
557         for (i = 0; i < num_enqueued; i++) {
558                 dcbf((uint64_t *)(addr_cena +
559                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
560                 eqcr_pi++;
561                 eqcr_pi &= 0xF;
562         }
563         s->eqcr.pi = eqcr_pi;
564
565         return num_enqueued;
566 }
567
568 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
569                                     const struct qbman_eq_desc *d,
570                                     const struct qbman_fd *fd,
571                                     int num_frames)
572 {
573         uint32_t *p;
574         const uint32_t *cl;
575         uint32_t eqcr_ci, eqcr_pi;
576         uint8_t diff;
577         int i, num_enqueued = 0;
578         uint64_t addr_cena;
579
580         if (!s->eqcr.available) {
581                 eqcr_ci = s->eqcr.ci;
582                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
583                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
584                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
585                                    eqcr_ci, s->eqcr.ci);
586                 s->eqcr.available += diff;
587                 if (!diff)
588                         return 0;
589         }
590
591         eqcr_pi = s->eqcr.pi;
592         num_enqueued = (s->eqcr.available < num_frames) ?
593                         s->eqcr.available : num_frames;
594         s->eqcr.available -= num_enqueued;
595         /* Fill in the EQCR ring */
596         for (i = 0; i < num_enqueued; i++) {
597                 p = qbman_cena_write_start_wo_shadow(&s->sys,
598                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
599                 cl = qb_cl(&d[i]);
600                 memcpy(&p[1], &cl[1], 28);
601                 memcpy(&p[8], &fd[i], sizeof(*fd));
602                 eqcr_pi++;
603                 eqcr_pi &= 0xF;
604         }
605
606         lwsync();
607
608         /* Set the verb byte, have to substitute in the valid-bit */
609         eqcr_pi = s->eqcr.pi;
610         for (i = 0; i < num_enqueued; i++) {
611                 p = qbman_cena_write_start_wo_shadow(&s->sys,
612                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
613                 cl = qb_cl(&d[i]);
614                 p[0] = cl[0] | s->eqcr.pi_vb;
615                 eqcr_pi++;
616                 eqcr_pi &= 0xF;
617                 if (!(eqcr_pi & 7))
618                         s->eqcr.pi_vb ^= QB_VALID_BIT;
619         }
620
621         /* Flush all the cacheline without load/store in between */
622         eqcr_pi = s->eqcr.pi;
623         addr_cena = (uint64_t)s->sys.addr_cena;
624         for (i = 0; i < num_enqueued; i++) {
625                 dcbf((uint64_t *)(addr_cena +
626                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
627                 eqcr_pi++;
628                 eqcr_pi &= 0xF;
629         }
630         s->eqcr.pi = eqcr_pi;
631
632         return num_enqueued;
633 }
634
635 /*************************/
636 /* Static (push) dequeue */
637 /*************************/
638
639 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
640 {
641         uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
642
643         QBMAN_BUG_ON(channel_idx > 15);
644         *enabled = src | (1 << channel_idx);
645 }
646
647 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
648 {
649         uint16_t dqsrc;
650
651         QBMAN_BUG_ON(channel_idx > 15);
652         if (enable)
653                 s->sdq |= 1 << channel_idx;
654         else
655                 s->sdq &= ~(1 << channel_idx);
656
657         /* Read make the complete src map.  If no channels are enabled
658          * the SDQCR must be 0 or else QMan will assert errors
659          */
660         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
661         if (dqsrc != 0)
662                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
663         else
664                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
665 }
666
667 /***************************/
668 /* Volatile (pull) dequeue */
669 /***************************/
670
671 /* These should be const, eventually */
672 #define QB_VDQCR_VERB_DCT_SHIFT    0
673 #define QB_VDQCR_VERB_DT_SHIFT     2
674 #define QB_VDQCR_VERB_RLS_SHIFT    4
675 #define QB_VDQCR_VERB_WAE_SHIFT    5
676
677 enum qb_pull_dt_e {
678         qb_pull_dt_channel,
679         qb_pull_dt_workqueue,
680         qb_pull_dt_framequeue
681 };
682
683 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
684 {
685         memset(d, 0, sizeof(*d));
686 }
687
688 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
689                                  struct qbman_result *storage,
690                                  dma_addr_t storage_phys,
691                                  int stash)
692 {
693         d->pull.rsp_addr_virt = (uint64_t)storage;
694
695         if (!storage) {
696                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
697                 return;
698         }
699         d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
700         if (stash)
701                 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
702         else
703                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
704
705         d->pull.rsp_addr = storage_phys;
706 }
707
708 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
709 {
710         d->pull.numf = numframes - 1;
711 }
712
713 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
714 {
715         d->pull.tok = token;
716 }
717
718 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
719 {
720         d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
721         d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
722         d->pull.dq_src = fqid;
723 }
724
725 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
726                             enum qbman_pull_type_e dct)
727 {
728         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
729         d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
730         d->pull.dq_src = wqid;
731 }
732
733 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
734                                  enum qbman_pull_type_e dct)
735 {
736         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
737         d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
738         d->pull.dq_src = chid;
739 }
740
741 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
742 {
743         uint32_t *p;
744         uint32_t *cl = qb_cl(d);
745
746         if (!atomic_dec_and_test(&s->vdq.busy)) {
747                 atomic_inc(&s->vdq.busy);
748                 return -EBUSY;
749         }
750
751         d->pull.tok = s->sys.idx + 1;
752         s->vdq.storage = (void *)d->pull.rsp_addr_virt;
753         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
754         memcpy(&p[1], &cl[1], 12);
755
756         /* Set the verb byte, have to substitute in the valid-bit */
757         lwsync();
758         p[0] = cl[0] | s->vdq.valid_bit;
759         s->vdq.valid_bit ^= QB_VALID_BIT;
760         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
761
762         return 0;
763 }
764
765 /****************/
766 /* Polling DQRR */
767 /****************/
768
769 #define QMAN_DQRR_PI_MASK              0xf
770
771 #define QBMAN_RESULT_DQ        0x60
772 #define QBMAN_RESULT_FQRN      0x21
773 #define QBMAN_RESULT_FQRNI     0x22
774 #define QBMAN_RESULT_FQPN      0x24
775 #define QBMAN_RESULT_FQDAN     0x25
776 #define QBMAN_RESULT_CDAN      0x26
777 #define QBMAN_RESULT_CSCN_MEM  0x27
778 #define QBMAN_RESULT_CGCU      0x28
779 #define QBMAN_RESULT_BPSCN     0x29
780 #define QBMAN_RESULT_CSCN_WQ   0x2a
781
782 #include <rte_prefetch.h>
783
784 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
785 {
786         const struct qbman_result *p;
787
788         p = qbman_cena_read_wo_shadow(&s->sys,
789                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
790         rte_prefetch0(p);
791 }
792
793 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
794  * only once, so repeated calls can return a sequence of DQRR entries, without
795  * requiring they be consumed immediately or in any particular order.
796  */
797 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
798 {
799         uint32_t verb;
800         uint32_t response_verb;
801         uint32_t flags;
802         const struct qbman_result *p;
803
804         /* Before using valid-bit to detect if something is there, we have to
805          * handle the case of the DQRR reset bug...
806          */
807         if (unlikely(s->dqrr.reset_bug)) {
808                 /* We pick up new entries by cache-inhibited producer index,
809                  * which means that a non-coherent mapping would require us to
810                  * invalidate and read *only* once that PI has indicated that
811                  * there's an entry here. The first trip around the DQRR ring
812                  * will be much less efficient than all subsequent trips around
813                  * it...
814                  */
815                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
816                              QMAN_DQRR_PI_MASK;
817
818                 /* there are new entries if pi != next_idx */
819                 if (pi == s->dqrr.next_idx)
820                         return NULL;
821
822                 /* if next_idx is/was the last ring index, and 'pi' is
823                  * different, we can disable the workaround as all the ring
824                  * entries have now been DMA'd to so valid-bit checking is
825                  * repaired. Note: this logic needs to be based on next_idx
826                  * (which increments one at a time), rather than on pi (which
827                  * can burst and wrap-around between our snapshots of it).
828                  */
829                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
830                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
831                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
832                                  s->dqrr.next_idx, pi);
833                         s->dqrr.reset_bug = 0;
834                 }
835                 qbman_cena_invalidate_prefetch(&s->sys,
836                                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
837         }
838         p = qbman_cena_read_wo_shadow(&s->sys,
839                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
840         verb = p->dq.verb;
841
842         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
843          * in the DQRR reset bug workaround, we shouldn't need to skip these
844          * check, because we've already determined that a new entry is available
845          * and we've invalidated the cacheline before reading it, so the
846          * valid-bit behaviour is repaired and should tell us what we already
847          * knew from reading PI.
848          */
849         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
850                 return NULL;
851
852         /* There's something there. Move "next_idx" attention to the next ring
853          * entry (and prefetch it) before returning what we found.
854          */
855         s->dqrr.next_idx++;
856         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
857                 s->dqrr.next_idx = 0;
858                 s->dqrr.valid_bit ^= QB_VALID_BIT;
859         }
860         /* If this is the final response to a volatile dequeue command
861          * indicate that the vdq is no longer busy
862          */
863         flags = p->dq.stat;
864         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
865         if ((response_verb == QBMAN_RESULT_DQ) &&
866             (flags & QBMAN_DQ_STAT_VOLATILE) &&
867             (flags & QBMAN_DQ_STAT_EXPIRED))
868                 atomic_inc(&s->vdq.busy);
869
870         return p;
871 }
872
873 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
874 void qbman_swp_dqrr_consume(struct qbman_swp *s,
875                             const struct qbman_result *dq)
876 {
877         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
878 }
879
880 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
881 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
882                             uint8_t dqrr_index)
883 {
884         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
885 }
886
887 /*********************************/
888 /* Polling user-provided storage */
889 /*********************************/
890 int qbman_result_has_new_result(struct qbman_swp *s,
891                                 struct qbman_result *dq)
892 {
893         if (dq->dq.tok == 0)
894                 return 0;
895
896         /*
897          * Set token to be 0 so we will detect change back to 1
898          * next time the looping is traversed. Const is cast away here
899          * as we want users to treat the dequeue responses as read only.
900          */
901         ((struct qbman_result *)dq)->dq.tok = 0;
902
903         /*
904          * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
905          * fact "VDQCR" shows busy doesn't mean that we hold the result that
906          * makes it available. Eg. we may be looking at our 10th dequeue result,
907          * having released VDQCR after the 1st result and it is now busy due to
908          * some other command!
909          */
910         if (s->vdq.storage == dq) {
911                 s->vdq.storage = NULL;
912                 atomic_inc(&s->vdq.busy);
913         }
914
915         return 1;
916 }
917
918 int qbman_check_new_result(struct qbman_result *dq)
919 {
920         if (dq->dq.tok == 0)
921                 return 0;
922
923         /*
924          * Set token to be 0 so we will detect change back to 1
925          * next time the looping is traversed. Const is cast away here
926          * as we want users to treat the dequeue responses as read only.
927          */
928         ((struct qbman_result *)dq)->dq.tok = 0;
929
930         return 1;
931 }
932
933 int qbman_check_command_complete(struct qbman_result *dq)
934 {
935         struct qbman_swp *s;
936
937         if (dq->dq.tok == 0)
938                 return 0;
939
940         s = portal_idx_map[dq->dq.tok - 1];
941         /*
942          * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
943          * fact "VDQCR" shows busy doesn't mean that we hold the result that
944          * makes it available. Eg. we may be looking at our 10th dequeue result,
945          * having released VDQCR after the 1st result and it is now busy due to
946          * some other command!
947          */
948         if (s->vdq.storage == dq) {
949                 s->vdq.storage = NULL;
950                 atomic_inc(&s->vdq.busy);
951         }
952
953         return 1;
954 }
955
956 /********************************/
957 /* Categorising qbman results   */
958 /********************************/
959
960 static inline int __qbman_result_is_x(const struct qbman_result *dq,
961                                       uint8_t x)
962 {
963         uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
964
965         return (response_verb == x);
966 }
967
968 int qbman_result_is_DQ(const struct qbman_result *dq)
969 {
970         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
971 }
972
973 int qbman_result_is_FQDAN(const struct qbman_result *dq)
974 {
975         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
976 }
977
978 int qbman_result_is_CDAN(const struct qbman_result *dq)
979 {
980         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
981 }
982
983 int qbman_result_is_CSCN(const struct qbman_result *dq)
984 {
985         return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
986                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
987 }
988
989 int qbman_result_is_BPSCN(const struct qbman_result *dq)
990 {
991         return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
992 }
993
994 int qbman_result_is_CGCU(const struct qbman_result *dq)
995 {
996         return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
997 }
998
999 int qbman_result_is_FQRN(const struct qbman_result *dq)
1000 {
1001         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1002 }
1003
1004 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1005 {
1006         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1007 }
1008
1009 int qbman_result_is_FQPN(const struct qbman_result *dq)
1010 {
1011         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1012 }
1013
1014 /*********************************/
1015 /* Parsing frame dequeue results */
1016 /*********************************/
1017
1018 /* These APIs assume qbman_result_is_DQ() is TRUE */
1019
1020 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1021 {
1022         return dq->dq.stat;
1023 }
1024
1025 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1026 {
1027         return dq->dq.seqnum;
1028 }
1029
1030 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1031 {
1032         return dq->dq.oprid;
1033 }
1034
1035 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1036 {
1037         return dq->dq.fqid;
1038 }
1039
1040 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1041 {
1042         return dq->dq.fq_byte_cnt;
1043 }
1044
1045 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1046 {
1047         return dq->dq.fq_frm_cnt;
1048 }
1049
1050 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1051 {
1052         return dq->dq.fqd_ctx;
1053 }
1054
1055 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1056 {
1057         return (const struct qbman_fd *)&dq->dq.fd[0];
1058 }
1059
1060 /**************************************/
1061 /* Parsing state-change notifications */
1062 /**************************************/
1063 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1064 {
1065         return scn->scn.state;
1066 }
1067
1068 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1069 {
1070         return scn->scn.rid_tok;
1071 }
1072
1073 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1074 {
1075         return scn->scn.ctx;
1076 }
1077
1078 /*****************/
1079 /* Parsing BPSCN */
1080 /*****************/
1081 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1082 {
1083         return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1084 }
1085
1086 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1087 {
1088         return !(int)(qbman_result_SCN_state(scn) & 0x1);
1089 }
1090
1091 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1092 {
1093         return (int)(qbman_result_SCN_state(scn) & 0x2);
1094 }
1095
1096 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1097 {
1098         return (int)(qbman_result_SCN_state(scn) & 0x4);
1099 }
1100
1101 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1102 {
1103         return qbman_result_SCN_ctx(scn);
1104 }
1105
1106 /*****************/
1107 /* Parsing CGCU  */
1108 /*****************/
1109 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1110 {
1111         return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
1112 }
1113
1114 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1115 {
1116         return qbman_result_SCN_ctx(scn);
1117 }
1118
1119 /******************/
1120 /* Buffer release */
1121 /******************/
1122 #define QB_BR_RC_VALID_SHIFT  5
1123 #define QB_BR_RCDI_SHIFT      6
1124
1125 void qbman_release_desc_clear(struct qbman_release_desc *d)
1126 {
1127         memset(d, 0, sizeof(*d));
1128         d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
1129 }
1130
1131 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
1132 {
1133         d->br.bpid = bpid;
1134 }
1135
1136 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1137 {
1138         if (enable)
1139                 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
1140         else
1141                 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
1142 }
1143
1144 #define RAR_IDX(rar)     ((rar) & 0x7)
1145 #define RAR_VB(rar)      ((rar) & 0x80)
1146 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1147
1148 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1149                       const uint64_t *buffers, unsigned int num_buffers)
1150 {
1151         uint32_t *p;
1152         const uint32_t *cl = qb_cl(d);
1153         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1154
1155         pr_debug("RAR=%08x\n", rar);
1156         if (!RAR_SUCCESS(rar))
1157                 return -EBUSY;
1158
1159         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1160
1161         /* Start the release command */
1162         p = qbman_cena_write_start_wo_shadow(&s->sys,
1163                                              QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1164
1165         /* Copy the caller's buffer pointers to the command */
1166         u64_to_le32_copy(&p[2], buffers, num_buffers);
1167
1168         /* Set the verb byte, have to substitute in the valid-bit and the number
1169          * of buffers.
1170          */
1171         lwsync();
1172         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1173         qbman_cena_write_complete_wo_shadow(&s->sys,
1174                                             QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1175
1176         return 0;
1177 }
1178
1179 /*******************/
1180 /* Buffer acquires */
1181 /*******************/
1182 struct qbman_acquire_desc {
1183         uint8_t verb;
1184         uint8_t reserved;
1185         uint16_t bpid;
1186         uint8_t num;
1187         uint8_t reserved2[59];
1188 };
1189
1190 struct qbman_acquire_rslt {
1191         uint8_t verb;
1192         uint8_t rslt;
1193         uint16_t reserved;
1194         uint8_t num;
1195         uint8_t reserved2[3];
1196         uint64_t buf[7];
1197 };
1198
1199 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1200                       unsigned int num_buffers)
1201 {
1202         struct qbman_acquire_desc *p;
1203         struct qbman_acquire_rslt *r;
1204
1205         if (!num_buffers || (num_buffers > 7))
1206                 return -EINVAL;
1207
1208         /* Start the management command */
1209         p = qbman_swp_mc_start(s);
1210
1211         if (!p)
1212                 return -EBUSY;
1213
1214         /* Encode the caller-provided attributes */
1215         p->bpid = bpid;
1216         p->num = num_buffers;
1217
1218         /* Complete the management command */
1219         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1220         if (unlikely(!r)) {
1221                 pr_err("qbman: acquire from BPID %d failed, no response\n",
1222                        bpid);
1223                 return -EIO;
1224         }
1225
1226         /* Decode the outcome */
1227         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
1228
1229         /* Determine success or failure */
1230         if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1231                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1232                        bpid, r->rslt);
1233                 return -EIO;
1234         }
1235
1236         QBMAN_BUG_ON(r->num > num_buffers);
1237
1238         /* Copy the acquired buffers to the caller's array */
1239         u64_from_le32_copy(buffers, &r->buf[0], r->num);
1240
1241         return (int)r->num;
1242 }
1243
1244 /*****************/
1245 /* FQ management */
1246 /*****************/
1247 struct qbman_alt_fq_state_desc {
1248         uint8_t verb;
1249         uint8_t reserved[3];
1250         uint32_t fqid;
1251         uint8_t reserved2[56];
1252 };
1253
1254 struct qbman_alt_fq_state_rslt {
1255         uint8_t verb;
1256         uint8_t rslt;
1257         uint8_t reserved[62];
1258 };
1259
1260 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1261
1262 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1263                                   uint8_t alt_fq_verb)
1264 {
1265         struct qbman_alt_fq_state_desc *p;
1266         struct qbman_alt_fq_state_rslt *r;
1267
1268         /* Start the management command */
1269         p = qbman_swp_mc_start(s);
1270         if (!p)
1271                 return -EBUSY;
1272
1273         p->fqid = fqid & ALT_FQ_FQID_MASK;
1274
1275         /* Complete the management command */
1276         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1277         if (unlikely(!r)) {
1278                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1279                        alt_fq_verb);
1280                 return -EIO;
1281         }
1282
1283         /* Decode the outcome */
1284         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
1285
1286         /* Determine success or failure */
1287         if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1288                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1289                        fqid, alt_fq_verb, r->rslt);
1290                 return -EIO;
1291         }
1292
1293         return 0;
1294 }
1295
1296 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1297 {
1298         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1299 }
1300
1301 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1302 {
1303         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1304 }
1305
1306 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1307 {
1308         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1309 }
1310
1311 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1312 {
1313         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1314 }
1315
1316 /**********************/
1317 /* Channel management */
1318 /**********************/
1319
1320 struct qbman_cdan_ctrl_desc {
1321         uint8_t verb;
1322         uint8_t reserved;
1323         uint16_t ch;
1324         uint8_t we;
1325         uint8_t ctrl;
1326         uint16_t reserved2;
1327         uint64_t cdan_ctx;
1328         uint8_t reserved3[48];
1329
1330 };
1331
1332 struct qbman_cdan_ctrl_rslt {
1333         uint8_t verb;
1334         uint8_t rslt;
1335         uint16_t ch;
1336         uint8_t reserved[60];
1337 };
1338
1339 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1340  * would be irresponsible to expose it.
1341  */
1342 #define CODE_CDAN_WE_EN    0x1
1343 #define CODE_CDAN_WE_CTX   0x4
1344
1345 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1346                               uint8_t we_mask, uint8_t cdan_en,
1347                               uint64_t ctx)
1348 {
1349         struct qbman_cdan_ctrl_desc *p;
1350         struct qbman_cdan_ctrl_rslt *r;
1351
1352         /* Start the management command */
1353         p = qbman_swp_mc_start(s);
1354         if (!p)
1355                 return -EBUSY;
1356
1357         /* Encode the caller-provided attributes */
1358         p->ch = channelid;
1359         p->we = we_mask;
1360         if (cdan_en)
1361                 p->ctrl = 1;
1362         else
1363                 p->ctrl = 0;
1364         p->cdan_ctx = ctx;
1365
1366         /* Complete the management command */
1367         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1368         if (unlikely(!r)) {
1369                 pr_err("qbman: wqchan config failed, no response\n");
1370                 return -EIO;
1371         }
1372
1373         /* Decode the outcome */
1374         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
1375                      != QBMAN_WQCHAN_CONFIGURE);
1376
1377         /* Determine success or failure */
1378         if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1379                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1380                        channelid, r->rslt);
1381                 return -EIO;
1382         }
1383
1384         return 0;
1385 }
1386
1387 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1388                                uint64_t ctx)
1389 {
1390         return qbman_swp_CDAN_set(s, channelid,
1391                                   CODE_CDAN_WE_CTX,
1392                                   0, ctx);
1393 }
1394
1395 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1396 {
1397         return qbman_swp_CDAN_set(s, channelid,
1398                                   CODE_CDAN_WE_EN,
1399                                   1, 0);
1400 }
1401
1402 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1403 {
1404         return qbman_swp_CDAN_set(s, channelid,
1405                                   CODE_CDAN_WE_EN,
1406                                   0, 0);
1407 }
1408
1409 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1410                                       uint64_t ctx)
1411 {
1412         return qbman_swp_CDAN_set(s, channelid,
1413                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1414                                   1, ctx);
1415 }
1416
1417 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1418 {
1419         return QBMAN_IDX_FROM_DQRR(dqrr);
1420 }
1421
1422 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1423 {
1424         struct qbman_result *dq;
1425
1426         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
1427         return dq;
1428 }