New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2018 NXP
5  *
6  */
7
8 #include "qbman_sys.h"
9 #include "qbman_portal.h"
10
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE       0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
14
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
17
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE       0x48
20 #define QBMAN_FQ_FORCE          0x49
21 #define QBMAN_FQ_XON            0x4d
22 #define QBMAN_FQ_XOFF           0x4e
23
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
27
28 #define QBMAN_RESPONSE_VERB_MASK   0x7f
29
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT   29
34 #define QB_SDQCR_FC_MASK    0x1
35 #define QB_SDQCR_DCT_SHIFT  24
36 #define QB_SDQCR_DCT_MASK   0x3
37 #define QB_SDQCR_TOK_SHIFT  16
38 #define QB_SDQCR_TOK_MASK   0xff
39 #define QB_SDQCR_SRC_SHIFT  0
40 #define QB_SDQCR_SRC_MASK   0xffff
41
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN    0xbb
44
45 enum qbman_sdqcr_dct {
46         qbman_sdqcr_dct_null = 0,
47         qbman_sdqcr_dct_prio_ics,
48         qbman_sdqcr_dct_active_ics,
49         qbman_sdqcr_dct_active
50 };
51
52 enum qbman_sdqcr_fc {
53         qbman_sdqcr_fc_one = 0,
54         qbman_sdqcr_fc_up_to_3 = 1
55 };
56
57 /* We need to keep track of which SWP triggered a pull command
58  * so keep an array of portal IDs and use the token field to
59  * be able to find the proper portal
60  */
61 #define MAX_QBMAN_PORTALS  64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
63
64 /* Internal Function declaration */
65 static int
66 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
67                 const struct qbman_eq_desc *d,
68                 const struct qbman_fd *fd);
69 static int
70 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
71                 const struct qbman_eq_desc *d,
72                 const struct qbman_fd *fd);
73
74 static int
75 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
76                 const struct qbman_eq_desc *d,
77                 const struct qbman_fd *fd);
78 static int
79 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
80                 const struct qbman_eq_desc *d,
81                 const struct qbman_fd *fd);
82
83 static int
84 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
85                 const struct qbman_eq_desc *d,
86                 const struct qbman_fd *fd,
87                 uint32_t *flags,
88                 int num_frames);
89 static int
90 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
91                 const struct qbman_eq_desc *d,
92                 const struct qbman_fd *fd,
93                 uint32_t *flags,
94                 int num_frames);
95
96 static int
97 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
98                 const struct qbman_eq_desc *d,
99                 const struct qbman_fd *fd,
100                 int num_frames);
101 static int
102 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
103                 const struct qbman_eq_desc *d,
104                 const struct qbman_fd *fd,
105                 int num_frames);
106
107 static int
108 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
109 static int
110 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
111
112 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
113 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
114
115 static int
116 qbman_swp_release_direct(struct qbman_swp *s,
117                 const struct qbman_release_desc *d,
118                 const uint64_t *buffers, unsigned int num_buffers);
119 static int
120 qbman_swp_release_mem_back(struct qbman_swp *s,
121                 const struct qbman_release_desc *d,
122                 const uint64_t *buffers, unsigned int num_buffers);
123
124 /* Function pointers */
125 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
126                 const struct qbman_eq_desc *d,
127                 const struct qbman_fd *fd)
128         = qbman_swp_enqueue_array_mode_direct;
129
130 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
131                 const struct qbman_eq_desc *d,
132                 const struct qbman_fd *fd)
133         = qbman_swp_enqueue_ring_mode_direct;
134
135 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
136                 const struct qbman_eq_desc *d,
137                 const struct qbman_fd *fd,
138                 uint32_t *flags,
139                 int num_frames)
140         = qbman_swp_enqueue_multiple_direct;
141
142 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
143                 const struct qbman_eq_desc *d,
144                 const struct qbman_fd *fd,
145                 int num_frames)
146         = qbman_swp_enqueue_multiple_desc_direct;
147
148 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
149                 struct qbman_pull_desc *d)
150         = qbman_swp_pull_direct;
151
152 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
153                 = qbman_swp_dqrr_next_direct;
154
155 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
156                         const struct qbman_release_desc *d,
157                         const uint64_t *buffers, unsigned int num_buffers)
158                         = qbman_swp_release_direct;
159
160 /*********************************/
161 /* Portal constructor/destructor */
162 /*********************************/
163
164 /* Software portals should always be in the power-on state when we initialise,
165  * due to the CCSR-based portal reset functionality that MC has.
166  *
167  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
168  * valid-bits, so we need to support a workaround where we don't trust
169  * valid-bits when detecting new entries until any stale ring entries have been
170  * overwritten at least once. The idea is that we read PI for the first few
171  * entries, then switch to valid-bit after that. The trick is to clear the
172  * bug-work-around boolean once the PI wraps around the ring for the first time.
173  *
174  * Note: this still carries a slight additional cost once the decrementer hits
175  * zero.
176  */
177 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
178 {
179         int ret;
180         uint32_t eqcr_pi;
181         uint32_t mask_size;
182         struct qbman_swp *p = malloc(sizeof(*p));
183
184         if (!p)
185                 return NULL;
186
187         memset(p, 0, sizeof(struct qbman_swp));
188
189         p->desc = *d;
190 #ifdef QBMAN_CHECKING
191         p->mc.check = swp_mc_can_start;
192 #endif
193         p->mc.valid_bit = QB_VALID_BIT;
194         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
195         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
196         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
197         if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
198                 p->mr.valid_bit = QB_VALID_BIT;
199
200         atomic_set(&p->vdq.busy, 1);
201         p->vdq.valid_bit = QB_VALID_BIT;
202         p->dqrr.valid_bit = QB_VALID_BIT;
203         qman_version = p->desc.qman_version;
204         if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
205                 p->dqrr.dqrr_size = 4;
206                 p->dqrr.reset_bug = 1;
207         } else {
208                 p->dqrr.dqrr_size = 8;
209                 p->dqrr.reset_bug = 0;
210         }
211
212         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
213         if (ret) {
214                 free(p);
215                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
216                 return NULL;
217         }
218
219         /* Verify that the DQRRPI is 0 - if it is not the portal isn't
220          * in default state which is an error
221          */
222         if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
223                 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
224                 free(p);
225                 return NULL;
226         }
227
228         /* SDQCR needs to be initialized to 0 when no channels are
229          * being dequeued from or else the QMan HW will indicate an
230          * error.  The values that were calculated above will be
231          * applied when dequeues from a specific channel are enabled.
232          */
233         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
234
235         p->eqcr.pi_ring_size = 8;
236         if ((qman_version & 0xFFFF0000) >= QMAN_REV_5000) {
237                 p->eqcr.pi_ring_size = 32;
238                 qbman_swp_enqueue_array_mode_ptr =
239                                 qbman_swp_enqueue_array_mode_mem_back;
240                 qbman_swp_enqueue_ring_mode_ptr =
241                                 qbman_swp_enqueue_ring_mode_mem_back;
242                 qbman_swp_enqueue_multiple_ptr =
243                                 qbman_swp_enqueue_multiple_mem_back;
244                 qbman_swp_enqueue_multiple_desc_ptr =
245                                 qbman_swp_enqueue_multiple_desc_mem_back;
246                 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
247                 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
248                 qbman_swp_release_ptr = qbman_swp_release_mem_back;
249         }
250
251         for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
252                 p->eqcr.pi_mask = (p->eqcr.pi_mask<<1) + 1;
253         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
254         p->eqcr.pi = eqcr_pi & p->eqcr.pi_mask;
255         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
256         if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
257                 p->eqcr.ci = qbman_cinh_read(&p->sys,
258                                 QBMAN_CINH_SWP_EQCR_CI) & p->eqcr.pi_mask;
259         else
260                 p->eqcr.ci = qbman_cinh_read(&p->sys,
261                                 QBMAN_CINH_SWP_EQCR_PI) & p->eqcr.pi_mask;
262         p->eqcr.available = p->eqcr.pi_ring_size -
263                                 qm_cyc_diff(p->eqcr.pi_ring_size,
264                                 p->eqcr.ci & (p->eqcr.pi_mask<<1),
265                                 p->eqcr.pi & (p->eqcr.pi_mask<<1));
266
267         portal_idx_map[p->desc.idx] = p;
268         return p;
269 }
270
271 void qbman_swp_finish(struct qbman_swp *p)
272 {
273 #ifdef QBMAN_CHECKING
274         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
275 #endif
276         qbman_swp_sys_finish(&p->sys);
277         portal_idx_map[p->desc.idx] = NULL;
278         free(p);
279 }
280
281 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
282 {
283         return &p->desc;
284 }
285
286 /**************/
287 /* Interrupts */
288 /**************/
289
290 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
291 {
292         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
293 }
294
295 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
296 {
297         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
298 }
299
300 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
301 {
302         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
303 }
304
305 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
306 {
307         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
308 }
309
310 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
311 {
312         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
313 }
314
315 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
316 {
317         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
318 }
319
320 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
321 {
322         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
323 }
324
325 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
326 {
327         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
328 }
329
330 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
331 {
332         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
333 }
334
335 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
336 {
337         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
338 }
339
340 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
341 {
342         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
343 }
344
345 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
346 {
347         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
348                          inhibit ? 0xffffffff : 0);
349 }
350
351 /***********************/
352 /* Management commands */
353 /***********************/
354
355 /*
356  * Internal code common to all types of management commands.
357  */
358
359 void *qbman_swp_mc_start(struct qbman_swp *p)
360 {
361         void *ret;
362 #ifdef QBMAN_CHECKING
363         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
364 #endif
365         if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
366                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
367         else
368                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
369 #ifdef QBMAN_CHECKING
370         if (!ret)
371                 p->mc.check = swp_mc_can_submit;
372 #endif
373         return ret;
374 }
375
376 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
377 {
378         uint8_t *v = cmd;
379 #ifdef QBMAN_CHECKING
380         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
381 #endif
382         /* TBD: "|=" is going to hurt performance. Need to move as many fields
383          * out of word zero, and for those that remain, the "OR" needs to occur
384          * at the caller side. This debug check helps to catch cases where the
385          * caller wants to OR but has forgotten to do so.
386          */
387         QBMAN_BUG_ON((*v & cmd_verb) != *v);
388         if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
389                 dma_wmb();
390                 *v = cmd_verb | p->mc.valid_bit;
391                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
392                 clean(cmd);
393         } else {
394                 *v = cmd_verb | p->mr.valid_bit;
395                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
396                 dma_wmb();
397                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
398         }
399 #ifdef QBMAN_CHECKING
400         p->mc.check = swp_mc_can_poll;
401 #endif
402 }
403
404 void *qbman_swp_mc_result(struct qbman_swp *p)
405 {
406         uint32_t *ret, verb;
407 #ifdef QBMAN_CHECKING
408         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
409 #endif
410         if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
411                 qbman_cena_invalidate_prefetch(&p->sys,
412                                 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
413                 ret = qbman_cena_read(&p->sys,
414                                 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
415                 /* Remove the valid-bit -
416                  * command completed iff the rest is non-zero
417                  */
418                 verb = ret[0] & ~QB_VALID_BIT;
419                 if (!verb)
420                         return NULL;
421                 p->mc.valid_bit ^= QB_VALID_BIT;
422         } else {
423                 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
424                 /* Command completed if the valid bit is toggled */
425                 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
426                         return NULL;
427                 /* Remove the valid-bit -
428                  * command completed iff the rest is non-zero
429                  */
430                 verb = ret[0] & ~QB_VALID_BIT;
431                 if (!verb)
432                         return NULL;
433                 p->mr.valid_bit ^= QB_VALID_BIT;
434         }
435 #ifdef QBMAN_CHECKING
436         p->mc.check = swp_mc_can_start;
437 #endif
438         return ret;
439 }
440
441 /***********/
442 /* Enqueue */
443 /***********/
444
445 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
446 enum qb_enqueue_commands {
447         enqueue_empty = 0,
448         enqueue_response_always = 1,
449         enqueue_rejects_to_fq = 2
450 };
451
452 #define QB_ENQUEUE_CMD_EC_OPTION_MASK        0x3
453 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
454 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
455 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
456 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT          6
457 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
458 #define QB_ENQUEUE_CMD_NLIS_SHIFT            14
459 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT         15
460
461 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
462 {
463         memset(d, 0, sizeof(*d));
464 }
465
466 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
467 {
468         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
469         if (respond_success)
470                 d->eq.verb |= enqueue_response_always;
471         else
472                 d->eq.verb |= enqueue_rejects_to_fq;
473 }
474
475 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
476                            uint16_t opr_id, uint16_t seqnum, int incomplete)
477 {
478         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
479         if (respond_success)
480                 d->eq.verb |= enqueue_response_always;
481         else
482                 d->eq.verb |= enqueue_rejects_to_fq;
483
484         d->eq.orpid = opr_id;
485         d->eq.seqnum = seqnum;
486         if (incomplete)
487                 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
488         else
489                 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
490 }
491
492 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
493                                 uint16_t seqnum)
494 {
495         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
496         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
497         d->eq.orpid = opr_id;
498         d->eq.seqnum = seqnum;
499         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
500         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
501 }
502
503 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
504                                 uint16_t seqnum)
505 {
506         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
507         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
508         d->eq.orpid = opr_id;
509         d->eq.seqnum = seqnum;
510         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
511         d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
512 }
513
514 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
515                                 dma_addr_t storage_phys,
516                                 int stash)
517 {
518         d->eq.rsp_addr = storage_phys;
519         d->eq.wae = stash;
520 }
521
522 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
523 {
524         d->eq.rspid = token;
525 }
526
527 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
528 {
529         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
530         d->eq.tgtid = fqid;
531 }
532
533 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
534                           uint16_t qd_bin, uint8_t qd_prio)
535 {
536         d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
537         d->eq.tgtid = qdid;
538         d->eq.qdbin = qd_bin;
539         d->eq.qpri = qd_prio;
540 }
541
542 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
543 {
544         if (enable)
545                 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
546         else
547                 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
548 }
549
550 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
551                            uint8_t dqrr_idx, int park)
552 {
553         if (enable) {
554                 d->eq.dca = dqrr_idx;
555                 if (park)
556                         d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
557                 else
558                         d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
559                 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
560         } else {
561                 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
562         }
563 }
564
565 #define EQAR_IDX(eqar)     ((eqar) & 0x1f)
566 #define EQAR_VB(eqar)      ((eqar) & 0x80)
567 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
568
569 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
570                                                    uint8_t idx)
571 {
572         if (idx < 16)
573                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
574                                      QMAN_RT_MODE);
575         else
576                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
577                                      (idx - 16) * 4,
578                                      QMAN_RT_MODE);
579 }
580
581
582 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
583                                                const struct qbman_eq_desc *d,
584                                                const struct qbman_fd *fd)
585 {
586         uint32_t *p;
587         const uint32_t *cl = qb_cl(d);
588         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
589
590         pr_debug("EQAR=%08x\n", eqar);
591         if (!EQAR_SUCCESS(eqar))
592                 return -EBUSY;
593         p = qbman_cena_write_start_wo_shadow(&s->sys,
594                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
595         memcpy(&p[1], &cl[1], 28);
596         memcpy(&p[8], fd, sizeof(*fd));
597
598         /* Set the verb byte, have to substitute in the valid-bit */
599         dma_wmb();
600         p[0] = cl[0] | EQAR_VB(eqar);
601         qbman_cena_write_complete_wo_shadow(&s->sys,
602                                 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
603         return 0;
604 }
605 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
606                                                  const struct qbman_eq_desc *d,
607                                                  const struct qbman_fd *fd)
608 {
609         uint32_t *p;
610         const uint32_t *cl = qb_cl(d);
611         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
612
613         pr_debug("EQAR=%08x\n", eqar);
614         if (!EQAR_SUCCESS(eqar))
615                 return -EBUSY;
616         p = qbman_cena_write_start_wo_shadow(&s->sys,
617                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
618         memcpy(&p[1], &cl[1], 28);
619         memcpy(&p[8], fd, sizeof(*fd));
620
621         /* Set the verb byte, have to substitute in the valid-bit */
622         p[0] = cl[0] | EQAR_VB(eqar);
623         dma_wmb();
624         qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
625         return 0;
626 }
627
628 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
629                                                const struct qbman_eq_desc *d,
630                                                const struct qbman_fd *fd)
631 {
632         return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
633 }
634
635 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
636                                               const struct qbman_eq_desc *d,
637                                               const struct qbman_fd *fd)
638 {
639         uint32_t *p;
640         const uint32_t *cl = qb_cl(d);
641         uint32_t eqcr_ci, full_mask, half_mask;
642
643         half_mask = (s->eqcr.pi_mask>>1);
644         full_mask = s->eqcr.pi_mask;
645         if (!s->eqcr.available) {
646                 eqcr_ci = s->eqcr.ci;
647                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
648                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
649                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
650                                 eqcr_ci, s->eqcr.ci);
651                 if (!s->eqcr.available)
652                         return -EBUSY;
653         }
654
655         p = qbman_cena_write_start_wo_shadow(&s->sys,
656                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
657         memcpy(&p[1], &cl[1], 28);
658         memcpy(&p[8], fd, sizeof(*fd));
659         lwsync();
660
661         /* Set the verb byte, have to substitute in the valid-bit */
662         p[0] = cl[0] | s->eqcr.pi_vb;
663         qbman_cena_write_complete_wo_shadow(&s->sys,
664                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
665         s->eqcr.pi++;
666         s->eqcr.pi &= full_mask;
667         s->eqcr.available--;
668         if (!(s->eqcr.pi & half_mask))
669                 s->eqcr.pi_vb ^= QB_VALID_BIT;
670
671         return 0;
672 }
673
674 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
675                                                 const struct qbman_eq_desc *d,
676                                                 const struct qbman_fd *fd)
677 {
678         uint32_t *p;
679         const uint32_t *cl = qb_cl(d);
680         uint32_t eqcr_ci, full_mask, half_mask;
681
682         half_mask = (s->eqcr.pi_mask>>1);
683         full_mask = s->eqcr.pi_mask;
684         if (!s->eqcr.available) {
685                 eqcr_ci = s->eqcr.ci;
686                 s->eqcr.ci = qbman_cinh_read(&s->sys,
687                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
688                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
689                                 eqcr_ci, s->eqcr.ci);
690                 if (!s->eqcr.available)
691                         return -EBUSY;
692         }
693
694         p = qbman_cena_write_start_wo_shadow(&s->sys,
695                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
696         memcpy(&p[1], &cl[1], 28);
697         memcpy(&p[8], fd, sizeof(*fd));
698
699         /* Set the verb byte, have to substitute in the valid-bit */
700         p[0] = cl[0] | s->eqcr.pi_vb;
701         s->eqcr.pi++;
702         s->eqcr.pi &= full_mask;
703         s->eqcr.available--;
704         if (!(s->eqcr.pi & half_mask))
705                 s->eqcr.pi_vb ^= QB_VALID_BIT;
706         dma_wmb();
707         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
708                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
709         return 0;
710 }
711
712 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
713                                        const struct qbman_eq_desc *d,
714                                        const struct qbman_fd *fd)
715 {
716         return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
717 }
718
719 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
720                       const struct qbman_fd *fd)
721 {
722         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
723                 return qbman_swp_enqueue_array_mode(s, d, fd);
724         else    /* Use ring mode by default */
725                 return qbman_swp_enqueue_ring_mode(s, d, fd);
726 }
727
728 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
729                                              const struct qbman_eq_desc *d,
730                                              const struct qbman_fd *fd,
731                                              uint32_t *flags,
732                                              int num_frames)
733 {
734         uint32_t *p = NULL;
735         const uint32_t *cl = qb_cl(d);
736         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
737         int i, num_enqueued = 0;
738         uint64_t addr_cena;
739
740         half_mask = (s->eqcr.pi_mask>>1);
741         full_mask = s->eqcr.pi_mask;
742         if (!s->eqcr.available) {
743                 eqcr_ci = s->eqcr.ci;
744                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
745                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
746                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
747                                 eqcr_ci, s->eqcr.ci);
748                 if (!s->eqcr.available)
749                         return 0;
750         }
751
752         eqcr_pi = s->eqcr.pi;
753         num_enqueued = (s->eqcr.available < num_frames) ?
754                         s->eqcr.available : num_frames;
755         s->eqcr.available -= num_enqueued;
756         /* Fill in the EQCR ring */
757         for (i = 0; i < num_enqueued; i++) {
758                 p = qbman_cena_write_start_wo_shadow(&s->sys,
759                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
760                 memcpy(&p[1], &cl[1], 28);
761                 memcpy(&p[8], &fd[i], sizeof(*fd));
762                 eqcr_pi++;
763         }
764
765         lwsync();
766
767         /* Set the verb byte, have to substitute in the valid-bit */
768         eqcr_pi = s->eqcr.pi;
769         for (i = 0; i < num_enqueued; i++) {
770                 p = qbman_cena_write_start_wo_shadow(&s->sys,
771                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
772                 p[0] = cl[0] | s->eqcr.pi_vb;
773                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
774                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
775
776                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
777                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
778                 }
779                 eqcr_pi++;
780                 if (!(eqcr_pi & half_mask))
781                         s->eqcr.pi_vb ^= QB_VALID_BIT;
782         }
783
784         /* Flush all the cacheline without load/store in between */
785         eqcr_pi = s->eqcr.pi;
786         addr_cena = (size_t)s->sys.addr_cena;
787         for (i = 0; i < num_enqueued; i++) {
788                 dcbf((uintptr_t)(addr_cena +
789                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
790                 eqcr_pi++;
791         }
792         s->eqcr.pi = eqcr_pi & full_mask;
793
794         return num_enqueued;
795 }
796
797 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
798                                                const struct qbman_eq_desc *d,
799                                                const struct qbman_fd *fd,
800                                                uint32_t *flags,
801                                                int num_frames)
802 {
803         uint32_t *p = NULL;
804         const uint32_t *cl = qb_cl(d);
805         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
806         int i, num_enqueued = 0;
807
808         half_mask = (s->eqcr.pi_mask>>1);
809         full_mask = s->eqcr.pi_mask;
810         if (!s->eqcr.available) {
811                 eqcr_ci = s->eqcr.ci;
812                 s->eqcr.ci = qbman_cinh_read(&s->sys,
813                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
814                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
815                                         eqcr_ci, s->eqcr.ci);
816                 if (!s->eqcr.available)
817                         return 0;
818         }
819
820         eqcr_pi = s->eqcr.pi;
821         num_enqueued = (s->eqcr.available < num_frames) ?
822                         s->eqcr.available : num_frames;
823         s->eqcr.available -= num_enqueued;
824         /* Fill in the EQCR ring */
825         for (i = 0; i < num_enqueued; i++) {
826                 p = qbman_cena_write_start_wo_shadow(&s->sys,
827                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
828                 memcpy(&p[1], &cl[1], 28);
829                 memcpy(&p[8], &fd[i], sizeof(*fd));
830                 eqcr_pi++;
831         }
832
833         /* Set the verb byte, have to substitute in the valid-bit */
834         eqcr_pi = s->eqcr.pi;
835         for (i = 0; i < num_enqueued; i++) {
836                 p = qbman_cena_write_start_wo_shadow(&s->sys,
837                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
838                 p[0] = cl[0] | s->eqcr.pi_vb;
839                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
840                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
841
842                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
843                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
844                 }
845                 eqcr_pi++;
846                 if (!(eqcr_pi & half_mask))
847                         s->eqcr.pi_vb ^= QB_VALID_BIT;
848         }
849         s->eqcr.pi = eqcr_pi & full_mask;
850
851         dma_wmb();
852         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
853                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
854         return num_enqueued;
855 }
856
857 inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
858                                       const struct qbman_eq_desc *d,
859                                       const struct qbman_fd *fd,
860                                       uint32_t *flags,
861                                       int num_frames)
862 {
863         return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
864 }
865
866 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
867                                         const struct qbman_eq_desc *d,
868                                         const struct qbman_fd *fd,
869                                         int num_frames)
870 {
871         uint32_t *p;
872         const uint32_t *cl;
873         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
874         int i, num_enqueued = 0;
875         uint64_t addr_cena;
876
877         half_mask = (s->eqcr.pi_mask>>1);
878         full_mask = s->eqcr.pi_mask;
879         if (!s->eqcr.available) {
880                 eqcr_ci = s->eqcr.ci;
881                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
882                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
883                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
884                                         eqcr_ci, s->eqcr.ci);
885                 if (!s->eqcr.available)
886                         return 0;
887         }
888
889         eqcr_pi = s->eqcr.pi;
890         num_enqueued = (s->eqcr.available < num_frames) ?
891                         s->eqcr.available : num_frames;
892         s->eqcr.available -= num_enqueued;
893         /* Fill in the EQCR ring */
894         for (i = 0; i < num_enqueued; i++) {
895                 p = qbman_cena_write_start_wo_shadow(&s->sys,
896                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
897                 cl = qb_cl(&d[i]);
898                 memcpy(&p[1], &cl[1], 28);
899                 memcpy(&p[8], &fd[i], sizeof(*fd));
900                 eqcr_pi++;
901         }
902
903         lwsync();
904
905         /* Set the verb byte, have to substitute in the valid-bit */
906         eqcr_pi = s->eqcr.pi;
907         for (i = 0; i < num_enqueued; i++) {
908                 p = qbman_cena_write_start_wo_shadow(&s->sys,
909                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
910                 cl = qb_cl(&d[i]);
911                 p[0] = cl[0] | s->eqcr.pi_vb;
912                 eqcr_pi++;
913                 if (!(eqcr_pi & half_mask))
914                         s->eqcr.pi_vb ^= QB_VALID_BIT;
915         }
916
917         /* Flush all the cacheline without load/store in between */
918         eqcr_pi = s->eqcr.pi;
919         addr_cena = (size_t)s->sys.addr_cena;
920         for (i = 0; i < num_enqueued; i++) {
921                 dcbf((uintptr_t)(addr_cena +
922                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
923                 eqcr_pi++;
924         }
925         s->eqcr.pi = eqcr_pi & full_mask;
926
927         return num_enqueued;
928 }
929
930 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
931                                         const struct qbman_eq_desc *d,
932                                         const struct qbman_fd *fd,
933                                         int num_frames)
934 {
935         uint32_t *p;
936         const uint32_t *cl;
937         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
938         int i, num_enqueued = 0;
939
940         half_mask = (s->eqcr.pi_mask>>1);
941         full_mask = s->eqcr.pi_mask;
942         if (!s->eqcr.available) {
943                 eqcr_ci = s->eqcr.ci;
944                 s->eqcr.ci = qbman_cinh_read(&s->sys,
945                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
946                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
947                                         eqcr_ci, s->eqcr.ci);
948                 if (!s->eqcr.available)
949                         return 0;
950         }
951
952         eqcr_pi = s->eqcr.pi;
953         num_enqueued = (s->eqcr.available < num_frames) ?
954                         s->eqcr.available : num_frames;
955         s->eqcr.available -= num_enqueued;
956         /* Fill in the EQCR ring */
957         for (i = 0; i < num_enqueued; i++) {
958                 p = qbman_cena_write_start_wo_shadow(&s->sys,
959                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
960                 cl = qb_cl(&d[i]);
961                 memcpy(&p[1], &cl[1], 28);
962                 memcpy(&p[8], &fd[i], sizeof(*fd));
963                 eqcr_pi++;
964         }
965
966         /* Set the verb byte, have to substitute in the valid-bit */
967         eqcr_pi = s->eqcr.pi;
968         for (i = 0; i < num_enqueued; i++) {
969                 p = qbman_cena_write_start_wo_shadow(&s->sys,
970                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
971                 cl = qb_cl(&d[i]);
972                 p[0] = cl[0] | s->eqcr.pi_vb;
973                 eqcr_pi++;
974                 if (!(eqcr_pi & half_mask))
975                         s->eqcr.pi_vb ^= QB_VALID_BIT;
976         }
977
978         s->eqcr.pi = eqcr_pi & full_mask;
979
980         dma_wmb();
981         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
982                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
983
984         return num_enqueued;
985 }
986 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
987                                            const struct qbman_eq_desc *d,
988                                            const struct qbman_fd *fd,
989                                            int num_frames)
990 {
991         return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
992 }
993
994 /*************************/
995 /* Static (push) dequeue */
996 /*************************/
997
998 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
999 {
1000         uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1001
1002         QBMAN_BUG_ON(channel_idx > 15);
1003         *enabled = src | (1 << channel_idx);
1004 }
1005
1006 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1007 {
1008         uint16_t dqsrc;
1009
1010         QBMAN_BUG_ON(channel_idx > 15);
1011         if (enable)
1012                 s->sdq |= 1 << channel_idx;
1013         else
1014                 s->sdq &= ~(1 << channel_idx);
1015
1016         /* Read make the complete src map.  If no channels are enabled
1017          * the SDQCR must be 0 or else QMan will assert errors
1018          */
1019         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1020         if (dqsrc != 0)
1021                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1022         else
1023                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1024 }
1025
1026 /***************************/
1027 /* Volatile (pull) dequeue */
1028 /***************************/
1029
1030 /* These should be const, eventually */
1031 #define QB_VDQCR_VERB_DCT_SHIFT    0
1032 #define QB_VDQCR_VERB_DT_SHIFT     2
1033 #define QB_VDQCR_VERB_RLS_SHIFT    4
1034 #define QB_VDQCR_VERB_WAE_SHIFT    5
1035 #define QB_VDQCR_VERB_RAD_SHIFT    6
1036
1037 enum qb_pull_dt_e {
1038         qb_pull_dt_channel,
1039         qb_pull_dt_workqueue,
1040         qb_pull_dt_framequeue
1041 };
1042
1043 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1044 {
1045         memset(d, 0, sizeof(*d));
1046 }
1047
1048 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1049                                  struct qbman_result *storage,
1050                                  dma_addr_t storage_phys,
1051                                  int stash)
1052 {
1053         d->pull.rsp_addr_virt = (size_t)storage;
1054
1055         if (!storage) {
1056                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1057                 return;
1058         }
1059         d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1060         if (stash)
1061                 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1062         else
1063                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1064
1065         d->pull.rsp_addr = storage_phys;
1066 }
1067
1068 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1069                                    uint8_t numframes)
1070 {
1071         d->pull.numf = numframes - 1;
1072 }
1073
1074 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1075 {
1076         d->pull.tok = token;
1077 }
1078
1079 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1080 {
1081         d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1082         d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1083         d->pull.dq_src = fqid;
1084 }
1085
1086 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1087                             enum qbman_pull_type_e dct)
1088 {
1089         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1090         d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1091         d->pull.dq_src = wqid;
1092 }
1093
1094 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1095                                  enum qbman_pull_type_e dct)
1096 {
1097         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1098         d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1099         d->pull.dq_src = chid;
1100 }
1101
1102 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1103 {
1104         if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1105                 if (rad)
1106                         d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1107                 else
1108                         d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1109         } else {
1110                 printf("The RAD feature is not valid when RLS = 0\n");
1111         }
1112 }
1113
1114 static int qbman_swp_pull_direct(struct qbman_swp *s,
1115                                  struct qbman_pull_desc *d)
1116 {
1117         uint32_t *p;
1118         uint32_t *cl = qb_cl(d);
1119
1120         if (!atomic_dec_and_test(&s->vdq.busy)) {
1121                 atomic_inc(&s->vdq.busy);
1122                 return -EBUSY;
1123         }
1124
1125         d->pull.tok = s->sys.idx + 1;
1126         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1127         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1128         memcpy(&p[1], &cl[1], 12);
1129
1130         /* Set the verb byte, have to substitute in the valid-bit */
1131         lwsync();
1132         p[0] = cl[0] | s->vdq.valid_bit;
1133         s->vdq.valid_bit ^= QB_VALID_BIT;
1134         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1135
1136         return 0;
1137 }
1138
1139 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1140                                    struct qbman_pull_desc *d)
1141 {
1142         uint32_t *p;
1143         uint32_t *cl = qb_cl(d);
1144
1145         if (!atomic_dec_and_test(&s->vdq.busy)) {
1146                 atomic_inc(&s->vdq.busy);
1147                 return -EBUSY;
1148         }
1149
1150         d->pull.tok = s->sys.idx + 1;
1151         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1152         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1153         memcpy(&p[1], &cl[1], 12);
1154
1155         /* Set the verb byte, have to substitute in the valid-bit */
1156         p[0] = cl[0] | s->vdq.valid_bit;
1157         s->vdq.valid_bit ^= QB_VALID_BIT;
1158         dma_wmb();
1159         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1160
1161         return 0;
1162 }
1163
1164 inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1165 {
1166         return qbman_swp_pull_ptr(s, d);
1167 }
1168
1169 /****************/
1170 /* Polling DQRR */
1171 /****************/
1172
1173 #define QMAN_DQRR_PI_MASK              0xf
1174
1175 #define QBMAN_RESULT_DQ        0x60
1176 #define QBMAN_RESULT_FQRN      0x21
1177 #define QBMAN_RESULT_FQRNI     0x22
1178 #define QBMAN_RESULT_FQPN      0x24
1179 #define QBMAN_RESULT_FQDAN     0x25
1180 #define QBMAN_RESULT_CDAN      0x26
1181 #define QBMAN_RESULT_CSCN_MEM  0x27
1182 #define QBMAN_RESULT_CGCU      0x28
1183 #define QBMAN_RESULT_BPSCN     0x29
1184 #define QBMAN_RESULT_CSCN_WQ   0x2a
1185
1186 #include <rte_prefetch.h>
1187
1188 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1189 {
1190         const struct qbman_result *p;
1191
1192         p = qbman_cena_read_wo_shadow(&s->sys,
1193                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1194         rte_prefetch0(p);
1195 }
1196
1197 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1198  * only once, so repeated calls can return a sequence of DQRR entries, without
1199  * requiring they be consumed immediately or in any particular order.
1200  */
1201 inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1202 {
1203         return qbman_swp_dqrr_next_ptr(s);
1204 }
1205
1206 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1207 {
1208         uint32_t verb;
1209         uint32_t response_verb;
1210         uint32_t flags;
1211         const struct qbman_result *p;
1212
1213         /* Before using valid-bit to detect if something is there, we have to
1214          * handle the case of the DQRR reset bug...
1215          */
1216         if (s->dqrr.reset_bug) {
1217                 /* We pick up new entries by cache-inhibited producer index,
1218                  * which means that a non-coherent mapping would require us to
1219                  * invalidate and read *only* once that PI has indicated that
1220                  * there's an entry here. The first trip around the DQRR ring
1221                  * will be much less efficient than all subsequent trips around
1222                  * it...
1223                  */
1224                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
1225                              QMAN_DQRR_PI_MASK;
1226
1227                 /* there are new entries if pi != next_idx */
1228                 if (pi == s->dqrr.next_idx)
1229                         return NULL;
1230
1231                 /* if next_idx is/was the last ring index, and 'pi' is
1232                  * different, we can disable the workaround as all the ring
1233                  * entries have now been DMA'd to so valid-bit checking is
1234                  * repaired. Note: this logic needs to be based on next_idx
1235                  * (which increments one at a time), rather than on pi (which
1236                  * can burst and wrap-around between our snapshots of it).
1237                  */
1238                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
1239                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
1240                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1241                                  s->dqrr.next_idx, pi);
1242                         s->dqrr.reset_bug = 0;
1243                 }
1244                 qbman_cena_invalidate_prefetch(&s->sys,
1245                                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1246         }
1247         p = qbman_cena_read_wo_shadow(&s->sys,
1248                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1249
1250         verb = p->dq.verb;
1251
1252         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1253          * in the DQRR reset bug workaround, we shouldn't need to skip these
1254          * check, because we've already determined that a new entry is available
1255          * and we've invalidated the cacheline before reading it, so the
1256          * valid-bit behaviour is repaired and should tell us what we already
1257          * knew from reading PI.
1258          */
1259         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1260                 return NULL;
1261
1262         /* There's something there. Move "next_idx" attention to the next ring
1263          * entry (and prefetch it) before returning what we found.
1264          */
1265         s->dqrr.next_idx++;
1266         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1267                 s->dqrr.next_idx = 0;
1268                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1269         }
1270         /* If this is the final response to a volatile dequeue command
1271          * indicate that the vdq is no longer busy
1272          */
1273         flags = p->dq.stat;
1274         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1275         if ((response_verb == QBMAN_RESULT_DQ) &&
1276             (flags & QBMAN_DQ_STAT_VOLATILE) &&
1277             (flags & QBMAN_DQ_STAT_EXPIRED))
1278                 atomic_inc(&s->vdq.busy);
1279
1280         return p;
1281 }
1282
1283 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1284 {
1285         uint32_t verb;
1286         uint32_t response_verb;
1287         uint32_t flags;
1288         const struct qbman_result *p;
1289
1290         p = qbman_cena_read_wo_shadow(&s->sys,
1291                         QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1292
1293         verb = p->dq.verb;
1294
1295         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1296          * in the DQRR reset bug workaround, we shouldn't need to skip these
1297          * check, because we've already determined that a new entry is available
1298          * and we've invalidated the cacheline before reading it, so the
1299          * valid-bit behaviour is repaired and should tell us what we already
1300          * knew from reading PI.
1301          */
1302         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1303                 return NULL;
1304
1305         /* There's something there. Move "next_idx" attention to the next ring
1306          * entry (and prefetch it) before returning what we found.
1307          */
1308         s->dqrr.next_idx++;
1309         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1310                 s->dqrr.next_idx = 0;
1311                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1312         }
1313         /* If this is the final response to a volatile dequeue command
1314          * indicate that the vdq is no longer busy
1315          */
1316         flags = p->dq.stat;
1317         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1318         if ((response_verb == QBMAN_RESULT_DQ) &&
1319             (flags & QBMAN_DQ_STAT_VOLATILE) &&
1320             (flags & QBMAN_DQ_STAT_EXPIRED))
1321                 atomic_inc(&s->vdq.busy);
1322         return p;
1323 }
1324
1325 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1326 void qbman_swp_dqrr_consume(struct qbman_swp *s,
1327                             const struct qbman_result *dq)
1328 {
1329         qbman_cinh_write(&s->sys,
1330                         QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1331 }
1332
1333 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1334 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
1335                             uint8_t dqrr_index)
1336 {
1337         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
1338 }
1339
1340 /*********************************/
1341 /* Polling user-provided storage */
1342 /*********************************/
1343
1344 int qbman_result_has_new_result(struct qbman_swp *s,
1345                                 struct qbman_result *dq)
1346 {
1347         if (dq->dq.tok == 0)
1348                 return 0;
1349
1350         /*
1351          * Set token to be 0 so we will detect change back to 1
1352          * next time the looping is traversed. Const is cast away here
1353          * as we want users to treat the dequeue responses as read only.
1354          */
1355         ((struct qbman_result *)dq)->dq.tok = 0;
1356
1357         /*
1358          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1359          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1360          * that makes it available. Eg. we may be looking at our 10th dequeue
1361          * result, having released VDQCR after the 1st result and it is now
1362          * busy due to some other command!
1363          */
1364         if (s->vdq.storage == dq) {
1365                 s->vdq.storage = NULL;
1366                 atomic_inc(&s->vdq.busy);
1367         }
1368
1369         return 1;
1370 }
1371
1372 int qbman_check_new_result(struct qbman_result *dq)
1373 {
1374         if (dq->dq.tok == 0)
1375                 return 0;
1376
1377         /*
1378          * Set token to be 0 so we will detect change back to 1
1379          * next time the looping is traversed. Const is cast away here
1380          * as we want users to treat the dequeue responses as read only.
1381          */
1382         ((struct qbman_result *)dq)->dq.tok = 0;
1383
1384         return 1;
1385 }
1386
1387 int qbman_check_command_complete(struct qbman_result *dq)
1388 {
1389         struct qbman_swp *s;
1390
1391         if (dq->dq.tok == 0)
1392                 return 0;
1393
1394         s = portal_idx_map[dq->dq.tok - 1];
1395         /*
1396          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1397          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1398          * that makes it available. Eg. we may be looking at our 10th dequeue
1399          * result, having released VDQCR after the 1st result and it is now
1400          * busy due to some other command!
1401          */
1402         if (s->vdq.storage == dq) {
1403                 s->vdq.storage = NULL;
1404                 atomic_inc(&s->vdq.busy);
1405         }
1406
1407         return 1;
1408 }
1409
1410 /********************************/
1411 /* Categorising qbman results   */
1412 /********************************/
1413
1414 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1415                                       uint8_t x)
1416 {
1417         uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
1418
1419         return (response_verb == x);
1420 }
1421
1422 int qbman_result_is_DQ(const struct qbman_result *dq)
1423 {
1424         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1425 }
1426
1427 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1428 {
1429         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1430 }
1431
1432 int qbman_result_is_CDAN(const struct qbman_result *dq)
1433 {
1434         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1435 }
1436
1437 int qbman_result_is_CSCN(const struct qbman_result *dq)
1438 {
1439         return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
1440                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1441 }
1442
1443 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1444 {
1445         return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
1446 }
1447
1448 int qbman_result_is_CGCU(const struct qbman_result *dq)
1449 {
1450         return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
1451 }
1452
1453 int qbman_result_is_FQRN(const struct qbman_result *dq)
1454 {
1455         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1456 }
1457
1458 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1459 {
1460         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1461 }
1462
1463 int qbman_result_is_FQPN(const struct qbman_result *dq)
1464 {
1465         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1466 }
1467
1468 /*********************************/
1469 /* Parsing frame dequeue results */
1470 /*********************************/
1471
1472 /* These APIs assume qbman_result_is_DQ() is TRUE */
1473
1474 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1475 {
1476         return dq->dq.stat;
1477 }
1478
1479 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1480 {
1481         return dq->dq.seqnum;
1482 }
1483
1484 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1485 {
1486         return dq->dq.oprid;
1487 }
1488
1489 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1490 {
1491         return dq->dq.fqid;
1492 }
1493
1494 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1495 {
1496         return dq->dq.fq_byte_cnt;
1497 }
1498
1499 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1500 {
1501         return dq->dq.fq_frm_cnt;
1502 }
1503
1504 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1505 {
1506         return dq->dq.fqd_ctx;
1507 }
1508
1509 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1510 {
1511         return (const struct qbman_fd *)&dq->dq.fd[0];
1512 }
1513
1514 /**************************************/
1515 /* Parsing state-change notifications */
1516 /**************************************/
1517 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1518 {
1519         return scn->scn.state;
1520 }
1521
1522 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1523 {
1524         return scn->scn.rid_tok;
1525 }
1526
1527 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1528 {
1529         return scn->scn.ctx;
1530 }
1531
1532 /*****************/
1533 /* Parsing BPSCN */
1534 /*****************/
1535 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1536 {
1537         return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1538 }
1539
1540 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1541 {
1542         return !(int)(qbman_result_SCN_state(scn) & 0x1);
1543 }
1544
1545 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1546 {
1547         return (int)(qbman_result_SCN_state(scn) & 0x2);
1548 }
1549
1550 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1551 {
1552         return (int)(qbman_result_SCN_state(scn) & 0x4);
1553 }
1554
1555 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1556 {
1557         return qbman_result_SCN_ctx(scn);
1558 }
1559
1560 /*****************/
1561 /* Parsing CGCU  */
1562 /*****************/
1563 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1564 {
1565         return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
1566 }
1567
1568 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1569 {
1570         return qbman_result_SCN_ctx(scn);
1571 }
1572
1573 /******************/
1574 /* Buffer release */
1575 /******************/
1576 #define QB_BR_RC_VALID_SHIFT  5
1577 #define QB_BR_RCDI_SHIFT      6
1578
1579 void qbman_release_desc_clear(struct qbman_release_desc *d)
1580 {
1581         memset(d, 0, sizeof(*d));
1582         d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
1583 }
1584
1585 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
1586 {
1587         d->br.bpid = bpid;
1588 }
1589
1590 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1591 {
1592         if (enable)
1593                 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
1594         else
1595                 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
1596 }
1597
1598 #define RAR_IDX(rar)     ((rar) & 0x7)
1599 #define RAR_VB(rar)      ((rar) & 0x80)
1600 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1601
1602 static int qbman_swp_release_direct(struct qbman_swp *s,
1603                                     const struct qbman_release_desc *d,
1604                                     const uint64_t *buffers,
1605                                     unsigned int num_buffers)
1606 {
1607         uint32_t *p;
1608         const uint32_t *cl = qb_cl(d);
1609         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1610
1611         pr_debug("RAR=%08x\n", rar);
1612         if (!RAR_SUCCESS(rar))
1613                 return -EBUSY;
1614
1615         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1616
1617         /* Start the release command */
1618         p = qbman_cena_write_start_wo_shadow(&s->sys,
1619                                      QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1620
1621         /* Copy the caller's buffer pointers to the command */
1622         u64_to_le32_copy(&p[2], buffers, num_buffers);
1623
1624         /* Set the verb byte, have to substitute in the valid-bit and the
1625          * number of buffers.
1626          */
1627         lwsync();
1628         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1629         qbman_cena_write_complete_wo_shadow(&s->sys,
1630                                     QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1631
1632         return 0;
1633 }
1634
1635 static int qbman_swp_release_mem_back(struct qbman_swp *s,
1636                                       const struct qbman_release_desc *d,
1637                                       const uint64_t *buffers,
1638                                       unsigned int num_buffers)
1639 {
1640         uint32_t *p;
1641         const uint32_t *cl = qb_cl(d);
1642         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1643
1644         pr_debug("RAR=%08x\n", rar);
1645         if (!RAR_SUCCESS(rar))
1646                 return -EBUSY;
1647
1648         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1649
1650         /* Start the release command */
1651         p = qbman_cena_write_start_wo_shadow(&s->sys,
1652                 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1653
1654         /* Copy the caller's buffer pointers to the command */
1655         u64_to_le32_copy(&p[2], buffers, num_buffers);
1656
1657         /* Set the verb byte, have to substitute in the valid-bit and the
1658          * number of buffers.
1659          */
1660         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1661         lwsync();
1662         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
1663                 RAR_IDX(rar) * 4, QMAN_RT_MODE);
1664
1665         return 0;
1666 }
1667
1668 inline int qbman_swp_release(struct qbman_swp *s,
1669                              const struct qbman_release_desc *d,
1670                              const uint64_t *buffers,
1671                              unsigned int num_buffers)
1672 {
1673         return qbman_swp_release_ptr(s, d, buffers, num_buffers);
1674 }
1675
1676 /*******************/
1677 /* Buffer acquires */
1678 /*******************/
1679 struct qbman_acquire_desc {
1680         uint8_t verb;
1681         uint8_t reserved;
1682         uint16_t bpid;
1683         uint8_t num;
1684         uint8_t reserved2[59];
1685 };
1686
1687 struct qbman_acquire_rslt {
1688         uint8_t verb;
1689         uint8_t rslt;
1690         uint16_t reserved;
1691         uint8_t num;
1692         uint8_t reserved2[3];
1693         uint64_t buf[7];
1694 };
1695
1696 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1697                       unsigned int num_buffers)
1698 {
1699         struct qbman_acquire_desc *p;
1700         struct qbman_acquire_rslt *r;
1701
1702         if (!num_buffers || (num_buffers > 7))
1703                 return -EINVAL;
1704
1705         /* Start the management command */
1706         p = qbman_swp_mc_start(s);
1707
1708         if (!p)
1709                 return -EBUSY;
1710
1711         /* Encode the caller-provided attributes */
1712         p->bpid = bpid;
1713         p->num = num_buffers;
1714
1715         /* Complete the management command */
1716         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1717         if (!r) {
1718                 pr_err("qbman: acquire from BPID %d failed, no response\n",
1719                        bpid);
1720                 return -EIO;
1721         }
1722
1723         /* Decode the outcome */
1724         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
1725
1726         /* Determine success or failure */
1727         if (r->rslt != QBMAN_MC_RSLT_OK) {
1728                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1729                        bpid, r->rslt);
1730                 return -EIO;
1731         }
1732
1733         QBMAN_BUG_ON(r->num > num_buffers);
1734
1735         /* Copy the acquired buffers to the caller's array */
1736         u64_from_le32_copy(buffers, &r->buf[0], r->num);
1737
1738         return (int)r->num;
1739 }
1740
1741 /*****************/
1742 /* FQ management */
1743 /*****************/
1744 struct qbman_alt_fq_state_desc {
1745         uint8_t verb;
1746         uint8_t reserved[3];
1747         uint32_t fqid;
1748         uint8_t reserved2[56];
1749 };
1750
1751 struct qbman_alt_fq_state_rslt {
1752         uint8_t verb;
1753         uint8_t rslt;
1754         uint8_t reserved[62];
1755 };
1756
1757 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1758
1759 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1760                                   uint8_t alt_fq_verb)
1761 {
1762         struct qbman_alt_fq_state_desc *p;
1763         struct qbman_alt_fq_state_rslt *r;
1764
1765         /* Start the management command */
1766         p = qbman_swp_mc_start(s);
1767         if (!p)
1768                 return -EBUSY;
1769
1770         p->fqid = fqid & ALT_FQ_FQID_MASK;
1771
1772         /* Complete the management command */
1773         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1774         if (!r) {
1775                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1776                        alt_fq_verb);
1777                 return -EIO;
1778         }
1779
1780         /* Decode the outcome */
1781         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
1782
1783         /* Determine success or failure */
1784         if (r->rslt != QBMAN_MC_RSLT_OK) {
1785                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1786                        fqid, alt_fq_verb, r->rslt);
1787                 return -EIO;
1788         }
1789
1790         return 0;
1791 }
1792
1793 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1794 {
1795         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1796 }
1797
1798 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1799 {
1800         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1801 }
1802
1803 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1804 {
1805         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1806 }
1807
1808 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1809 {
1810         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1811 }
1812
1813 /**********************/
1814 /* Channel management */
1815 /**********************/
1816
1817 struct qbman_cdan_ctrl_desc {
1818         uint8_t verb;
1819         uint8_t reserved;
1820         uint16_t ch;
1821         uint8_t we;
1822         uint8_t ctrl;
1823         uint16_t reserved2;
1824         uint64_t cdan_ctx;
1825         uint8_t reserved3[48];
1826
1827 };
1828
1829 struct qbman_cdan_ctrl_rslt {
1830         uint8_t verb;
1831         uint8_t rslt;
1832         uint16_t ch;
1833         uint8_t reserved[60];
1834 };
1835
1836 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1837  * would be irresponsible to expose it.
1838  */
1839 #define CODE_CDAN_WE_EN    0x1
1840 #define CODE_CDAN_WE_CTX   0x4
1841
1842 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1843                               uint8_t we_mask, uint8_t cdan_en,
1844                               uint64_t ctx)
1845 {
1846         struct qbman_cdan_ctrl_desc *p;
1847         struct qbman_cdan_ctrl_rslt *r;
1848
1849         /* Start the management command */
1850         p = qbman_swp_mc_start(s);
1851         if (!p)
1852                 return -EBUSY;
1853
1854         /* Encode the caller-provided attributes */
1855         p->ch = channelid;
1856         p->we = we_mask;
1857         if (cdan_en)
1858                 p->ctrl = 1;
1859         else
1860                 p->ctrl = 0;
1861         p->cdan_ctx = ctx;
1862
1863         /* Complete the management command */
1864         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1865         if (!r) {
1866                 pr_err("qbman: wqchan config failed, no response\n");
1867                 return -EIO;
1868         }
1869
1870         /* Decode the outcome */
1871         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
1872                      != QBMAN_WQCHAN_CONFIGURE);
1873
1874         /* Determine success or failure */
1875         if (r->rslt != QBMAN_MC_RSLT_OK) {
1876                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1877                        channelid, r->rslt);
1878                 return -EIO;
1879         }
1880
1881         return 0;
1882 }
1883
1884 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1885                                uint64_t ctx)
1886 {
1887         return qbman_swp_CDAN_set(s, channelid,
1888                                   CODE_CDAN_WE_CTX,
1889                                   0, ctx);
1890 }
1891
1892 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1893 {
1894         return qbman_swp_CDAN_set(s, channelid,
1895                                   CODE_CDAN_WE_EN,
1896                                   1, 0);
1897 }
1898
1899 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1900 {
1901         return qbman_swp_CDAN_set(s, channelid,
1902                                   CODE_CDAN_WE_EN,
1903                                   0, 0);
1904 }
1905
1906 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1907                                       uint64_t ctx)
1908 {
1909         return qbman_swp_CDAN_set(s, channelid,
1910                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1911                                   1, ctx);
1912 }
1913
1914 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1915 {
1916         return QBMAN_IDX_FROM_DQRR(dqrr);
1917 }
1918
1919 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1920 {
1921         struct qbman_result *dq;
1922
1923         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
1924         return dq;
1925 }