4 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "qbman_portal.h"
31 /* QBMan portal management command codes */
32 #define QBMAN_MC_ACQUIRE 0x30
33 #define QBMAN_WQCHAN_CONFIGURE 0x46
35 /* CINH register offsets */
36 #define QBMAN_CINH_SWP_EQCR_PI 0x800
37 #define QBMAN_CINH_SWP_EQCR_CI 0x840
38 #define QBMAN_CINH_SWP_EQAR 0x8c0
39 #define QBMAN_CINH_SWP_DQPI 0xa00
40 #define QBMAN_CINH_SWP_DCAP 0xac0
41 #define QBMAN_CINH_SWP_SDQCR 0xb00
42 #define QBMAN_CINH_SWP_RAR 0xcc0
43 #define QBMAN_CINH_SWP_ISR 0xe00
44 #define QBMAN_CINH_SWP_IER 0xe40
45 #define QBMAN_CINH_SWP_ISDR 0xe80
46 #define QBMAN_CINH_SWP_IIR 0xec0
47 #define QBMAN_CINH_SWP_DQRR_ITR 0xa80
48 #define QBMAN_CINH_SWP_ITPR 0xf40
50 /* CENA register offsets */
51 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
53 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
54 #define QBMAN_CENA_SWP_CR 0x600
55 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
56 #define QBMAN_CENA_SWP_VDQCR 0x780
57 #define QBMAN_CENA_SWP_EQCR_CI 0x840
59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
62 /* QBMan FQ management command codes */
63 #define QBMAN_FQ_SCHEDULE 0x48
64 #define QBMAN_FQ_FORCE 0x49
65 #define QBMAN_FQ_XON 0x4d
66 #define QBMAN_FQ_XOFF 0x4e
68 /*******************************/
69 /* Pre-defined attribute codes */
70 /*******************************/
72 #define QBMAN_RESPONSE_VERB_MASK 0x7f
74 /*************************/
75 /* SDQCR attribute codes */
76 /*************************/
77 #define QB_SDQCR_FC_SHIFT 29
78 #define QB_SDQCR_FC_MASK 0x1
79 #define QB_SDQCR_DCT_SHIFT 24
80 #define QB_SDQCR_DCT_MASK 0x3
81 #define QB_SDQCR_TOK_SHIFT 16
82 #define QB_SDQCR_TOK_MASK 0xff
83 #define QB_SDQCR_SRC_SHIFT 0
84 #define QB_SDQCR_SRC_MASK 0xffff
86 /* opaque token for static dequeues */
87 #define QMAN_SDQCR_TOKEN 0xbb
89 enum qbman_sdqcr_dct {
90 qbman_sdqcr_dct_null = 0,
91 qbman_sdqcr_dct_prio_ics,
92 qbman_sdqcr_dct_active_ics,
93 qbman_sdqcr_dct_active
97 qbman_sdqcr_fc_one = 0,
98 qbman_sdqcr_fc_up_to_3 = 1
101 /* We need to keep track of which SWP triggered a pull command
102 * so keep an array of portal IDs and use the token field to
103 * be able to find the proper portal
105 #define MAX_QBMAN_PORTALS 64
106 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
108 /*********************************/
109 /* Portal constructor/destructor */
110 /*********************************/
112 /* Software portals should always be in the power-on state when we initialise,
113 * due to the CCSR-based portal reset functionality that MC has.
115 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
116 * valid-bits, so we need to support a workaround where we don't trust
117 * valid-bits when detecting new entries until any stale ring entries have been
118 * overwritten at least once. The idea is that we read PI for the first few
119 * entries, then switch to valid-bit after that. The trick is to clear the
120 * bug-work-around boolean once the PI wraps around the ring for the first time.
122 * Note: this still carries a slight additional cost once the decrementer hits
125 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
129 struct qbman_swp *p = malloc(sizeof(*p));
134 #ifdef QBMAN_CHECKING
135 p->mc.check = swp_mc_can_start;
137 p->mc.valid_bit = QB_VALID_BIT;
139 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
140 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
141 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
143 atomic_set(&p->vdq.busy, 1);
144 p->vdq.valid_bit = QB_VALID_BIT;
145 p->dqrr.next_idx = 0;
146 p->dqrr.valid_bit = QB_VALID_BIT;
147 qman_version = p->desc.qman_version;
148 if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
149 p->dqrr.dqrr_size = 4;
150 p->dqrr.reset_bug = 1;
152 p->dqrr.dqrr_size = 8;
153 p->dqrr.reset_bug = 0;
156 ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
159 pr_err("qbman_swp_sys_init() failed %d\n", ret);
162 /* SDQCR needs to be initialized to 0 when no channels are
163 * being dequeued from or else the QMan HW will indicate an
164 * error. The values that were calculated above will be
165 * applied when dequeues from a specific channel are enabled.
167 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
168 eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
169 p->eqcr.pi = eqcr_pi & 0xF;
170 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
171 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
172 p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
173 p->eqcr.ci, p->eqcr.pi);
175 portal_idx_map[p->desc.idx] = p;
179 void qbman_swp_finish(struct qbman_swp *p)
181 #ifdef QBMAN_CHECKING
182 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
184 qbman_swp_sys_finish(&p->sys);
185 portal_idx_map[p->desc.idx] = NULL;
189 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
198 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
200 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
203 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
205 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
208 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
210 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
213 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
215 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
218 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
220 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
223 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
225 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
228 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
230 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
233 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
235 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
238 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
240 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
243 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
245 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
248 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
250 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
253 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
255 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
258 /***********************/
259 /* Management commands */
260 /***********************/
263 * Internal code common to all types of management commands.
266 void *qbman_swp_mc_start(struct qbman_swp *p)
269 #ifdef QBMAN_CHECKING
270 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
272 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
273 #ifdef QBMAN_CHECKING
275 p->mc.check = swp_mc_can_submit;
280 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
283 #ifdef QBMAN_CHECKING
284 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
286 /* TBD: "|=" is going to hurt performance. Need to move as many fields
287 * out of word zero, and for those that remain, the "OR" needs to occur
288 * at the caller side. This debug check helps to catch cases where the
289 * caller wants to OR but has forgotten to do so.
291 QBMAN_BUG_ON((*v & cmd_verb) != *v);
292 *v = cmd_verb | p->mc.valid_bit;
293 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
294 #ifdef QBMAN_CHECKING
295 p->mc.check = swp_mc_can_poll;
299 void *qbman_swp_mc_result(struct qbman_swp *p)
302 #ifdef QBMAN_CHECKING
303 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
305 qbman_cena_invalidate_prefetch(&p->sys,
306 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
307 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
308 /* Remove the valid-bit - command completed if the rest is non-zero */
309 verb = ret[0] & ~QB_VALID_BIT;
312 #ifdef QBMAN_CHECKING
313 p->mc.check = swp_mc_can_start;
315 p->mc.valid_bit ^= QB_VALID_BIT;
323 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
324 enum qb_enqueue_commands {
326 enqueue_response_always = 1,
327 enqueue_rejects_to_fq = 2
330 #define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
331 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
332 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
333 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
334 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
335 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
336 #define QB_ENQUEUE_CMD_NLIS_SHIFT 14
337 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
339 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
341 memset(d, 0, sizeof(*d));
344 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
346 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
348 d->eq.verb |= enqueue_response_always;
350 d->eq.verb |= enqueue_rejects_to_fq;
353 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
354 uint16_t opr_id, uint16_t seqnum, int incomplete)
356 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
358 d->eq.verb |= enqueue_response_always;
360 d->eq.verb |= enqueue_rejects_to_fq;
362 d->eq.orpid = opr_id;
363 d->eq.seqnum = seqnum;
365 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
367 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
370 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
373 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
374 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
375 d->eq.orpid = opr_id;
376 d->eq.seqnum = seqnum;
377 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
378 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
381 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
384 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
385 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
386 d->eq.orpid = opr_id;
387 d->eq.seqnum = seqnum;
388 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
389 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
392 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
393 dma_addr_t storage_phys,
396 d->eq.rsp_addr = storage_phys;
400 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
405 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
407 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
411 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
412 uint16_t qd_bin, uint8_t qd_prio)
414 d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
416 d->eq.qdbin = qd_bin;
417 d->eq.qpri = qd_prio;
420 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
423 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
425 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
428 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
429 uint8_t dqrr_idx, int park)
432 d->eq.dca = dqrr_idx;
434 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
436 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
437 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
439 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
443 #define EQAR_IDX(eqar) ((eqar) & 0x7)
444 #define EQAR_VB(eqar) ((eqar) & 0x80)
445 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
447 static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
448 const struct qbman_eq_desc *d,
449 const struct qbman_fd *fd)
452 const uint32_t *cl = qb_cl(d);
453 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
455 pr_debug("EQAR=%08x\n", eqar);
456 if (!EQAR_SUCCESS(eqar))
458 p = qbman_cena_write_start_wo_shadow(&s->sys,
459 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
460 memcpy(&p[1], &cl[1], 28);
461 memcpy(&p[8], fd, sizeof(*fd));
462 /* Set the verb byte, have to substitute in the valid-bit */
464 p[0] = cl[0] | EQAR_VB(eqar);
465 qbman_cena_write_complete_wo_shadow(&s->sys,
466 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
470 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
471 const struct qbman_eq_desc *d,
472 const struct qbman_fd *fd)
475 const uint32_t *cl = qb_cl(d);
479 if (!s->eqcr.available) {
480 eqcr_ci = s->eqcr.ci;
481 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
482 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
483 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
484 eqcr_ci, s->eqcr.ci);
485 s->eqcr.available += diff;
490 p = qbman_cena_write_start_wo_shadow(&s->sys,
491 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
492 memcpy(&p[1], &cl[1], 28);
493 memcpy(&p[8], fd, sizeof(*fd));
496 /* Set the verb byte, have to substitute in the valid-bit */
497 p[0] = cl[0] | s->eqcr.pi_vb;
498 qbman_cena_write_complete_wo_shadow(&s->sys,
499 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
503 if (!(s->eqcr.pi & 7))
504 s->eqcr.pi_vb ^= QB_VALID_BIT;
509 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
510 const struct qbman_fd *fd)
512 if (s->sys.eqcr_mode == qman_eqcr_vb_array)
513 return qbman_swp_enqueue_array_mode(s, d, fd);
514 else /* Use ring mode by default */
515 return qbman_swp_enqueue_ring_mode(s, d, fd);
518 int qbman_swp_enqueue_multiple(struct qbman_swp *s,
519 const struct qbman_eq_desc *d,
520 const struct qbman_fd *fd,
524 const uint32_t *cl = qb_cl(d);
525 uint32_t eqcr_ci, eqcr_pi;
527 int i, num_enqueued = 0;
530 if (!s->eqcr.available) {
531 eqcr_ci = s->eqcr.ci;
532 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
533 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
534 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
535 eqcr_ci, s->eqcr.ci);
536 s->eqcr.available += diff;
541 eqcr_pi = s->eqcr.pi;
542 num_enqueued = (s->eqcr.available < num_frames) ?
543 s->eqcr.available : num_frames;
544 s->eqcr.available -= num_enqueued;
545 /* Fill in the EQCR ring */
546 for (i = 0; i < num_enqueued; i++) {
547 p = qbman_cena_write_start_wo_shadow(&s->sys,
548 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
549 memcpy(&p[1], &cl[1], 28);
550 memcpy(&p[8], &fd[i], sizeof(*fd));
557 /* Set the verb byte, have to substitute in the valid-bit */
558 eqcr_pi = s->eqcr.pi;
559 for (i = 0; i < num_enqueued; i++) {
560 p = qbman_cena_write_start_wo_shadow(&s->sys,
561 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
562 p[0] = cl[0] | s->eqcr.pi_vb;
566 s->eqcr.pi_vb ^= QB_VALID_BIT;
569 /* Flush all the cacheline without load/store in between */
570 eqcr_pi = s->eqcr.pi;
571 addr_cena = (uint64_t)s->sys.addr_cena;
572 for (i = 0; i < num_enqueued; i++) {
573 dcbf((uint64_t *)(addr_cena +
574 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
578 s->eqcr.pi = eqcr_pi;
583 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
584 const struct qbman_eq_desc *d,
585 const struct qbman_fd *fd,
590 uint32_t eqcr_ci, eqcr_pi;
592 int i, num_enqueued = 0;
595 if (!s->eqcr.available) {
596 eqcr_ci = s->eqcr.ci;
597 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
598 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
599 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
600 eqcr_ci, s->eqcr.ci);
601 s->eqcr.available += diff;
606 eqcr_pi = s->eqcr.pi;
607 num_enqueued = (s->eqcr.available < num_frames) ?
608 s->eqcr.available : num_frames;
609 s->eqcr.available -= num_enqueued;
610 /* Fill in the EQCR ring */
611 for (i = 0; i < num_enqueued; i++) {
612 p = qbman_cena_write_start_wo_shadow(&s->sys,
613 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
615 memcpy(&p[1], &cl[1], 28);
616 memcpy(&p[8], &fd[i], sizeof(*fd));
623 /* Set the verb byte, have to substitute in the valid-bit */
624 eqcr_pi = s->eqcr.pi;
625 for (i = 0; i < num_enqueued; i++) {
626 p = qbman_cena_write_start_wo_shadow(&s->sys,
627 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
629 p[0] = cl[0] | s->eqcr.pi_vb;
633 s->eqcr.pi_vb ^= QB_VALID_BIT;
636 /* Flush all the cacheline without load/store in between */
637 eqcr_pi = s->eqcr.pi;
638 addr_cena = (uint64_t)s->sys.addr_cena;
639 for (i = 0; i < num_enqueued; i++) {
640 dcbf((uint64_t *)(addr_cena +
641 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
645 s->eqcr.pi = eqcr_pi;
650 /*************************/
651 /* Static (push) dequeue */
652 /*************************/
654 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
656 uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
658 QBMAN_BUG_ON(channel_idx > 15);
659 *enabled = src | (1 << channel_idx);
662 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
666 QBMAN_BUG_ON(channel_idx > 15);
668 s->sdq |= 1 << channel_idx;
670 s->sdq &= ~(1 << channel_idx);
672 /* Read make the complete src map. If no channels are enabled
673 * the SDQCR must be 0 or else QMan will assert errors
675 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
677 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
679 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
682 /***************************/
683 /* Volatile (pull) dequeue */
684 /***************************/
686 /* These should be const, eventually */
687 #define QB_VDQCR_VERB_DCT_SHIFT 0
688 #define QB_VDQCR_VERB_DT_SHIFT 2
689 #define QB_VDQCR_VERB_RLS_SHIFT 4
690 #define QB_VDQCR_VERB_WAE_SHIFT 5
694 qb_pull_dt_workqueue,
695 qb_pull_dt_framequeue
698 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
700 memset(d, 0, sizeof(*d));
703 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
704 struct qbman_result *storage,
705 dma_addr_t storage_phys,
708 d->pull.rsp_addr_virt = (uint64_t)storage;
711 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
714 d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
716 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
718 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
720 d->pull.rsp_addr = storage_phys;
723 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
725 d->pull.numf = numframes - 1;
728 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
733 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
735 d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
736 d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
737 d->pull.dq_src = fqid;
740 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
741 enum qbman_pull_type_e dct)
743 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
744 d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
745 d->pull.dq_src = wqid;
748 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
749 enum qbman_pull_type_e dct)
751 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
752 d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
753 d->pull.dq_src = chid;
756 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
759 uint32_t *cl = qb_cl(d);
761 if (!atomic_dec_and_test(&s->vdq.busy)) {
762 atomic_inc(&s->vdq.busy);
766 d->pull.tok = s->sys.idx + 1;
767 s->vdq.storage = (void *)d->pull.rsp_addr_virt;
768 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
769 memcpy(&p[1], &cl[1], 12);
771 /* Set the verb byte, have to substitute in the valid-bit */
773 p[0] = cl[0] | s->vdq.valid_bit;
774 s->vdq.valid_bit ^= QB_VALID_BIT;
775 qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
784 #define QMAN_DQRR_PI_MASK 0xf
786 #define QBMAN_RESULT_DQ 0x60
787 #define QBMAN_RESULT_FQRN 0x21
788 #define QBMAN_RESULT_FQRNI 0x22
789 #define QBMAN_RESULT_FQPN 0x24
790 #define QBMAN_RESULT_FQDAN 0x25
791 #define QBMAN_RESULT_CDAN 0x26
792 #define QBMAN_RESULT_CSCN_MEM 0x27
793 #define QBMAN_RESULT_CGCU 0x28
794 #define QBMAN_RESULT_BPSCN 0x29
795 #define QBMAN_RESULT_CSCN_WQ 0x2a
797 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
798 * only once, so repeated calls can return a sequence of DQRR entries, without
799 * requiring they be consumed immediately or in any particular order.
801 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
804 uint32_t response_verb;
806 const struct qbman_result *p;
808 /* Before using valid-bit to detect if something is there, we have to
809 * handle the case of the DQRR reset bug...
811 if (unlikely(s->dqrr.reset_bug)) {
812 /* We pick up new entries by cache-inhibited producer index,
813 * which means that a non-coherent mapping would require us to
814 * invalidate and read *only* once that PI has indicated that
815 * there's an entry here. The first trip around the DQRR ring
816 * will be much less efficient than all subsequent trips around
819 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
822 /* there are new entries if pi != next_idx */
823 if (pi == s->dqrr.next_idx)
826 /* if next_idx is/was the last ring index, and 'pi' is
827 * different, we can disable the workaround as all the ring
828 * entries have now been DMA'd to so valid-bit checking is
829 * repaired. Note: this logic needs to be based on next_idx
830 * (which increments one at a time), rather than on pi (which
831 * can burst and wrap-around between our snapshots of it).
833 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
834 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
835 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
836 s->dqrr.next_idx, pi);
837 s->dqrr.reset_bug = 0;
839 qbman_cena_invalidate_prefetch(&s->sys,
840 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
842 p = qbman_cena_read_wo_shadow(&s->sys,
843 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
846 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
847 * in the DQRR reset bug workaround, we shouldn't need to skip these
848 * check, because we've already determined that a new entry is available
849 * and we've invalidated the cacheline before reading it, so the
850 * valid-bit behaviour is repaired and should tell us what we already
851 * knew from reading PI.
853 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
856 /* There's something there. Move "next_idx" attention to the next ring
857 * entry (and prefetch it) before returning what we found.
860 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
861 s->dqrr.next_idx = 0;
862 s->dqrr.valid_bit ^= QB_VALID_BIT;
864 /* If this is the final response to a volatile dequeue command
865 * indicate that the vdq is no longer busy
868 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
869 if ((response_verb == QBMAN_RESULT_DQ) &&
870 (flags & QBMAN_DQ_STAT_VOLATILE) &&
871 (flags & QBMAN_DQ_STAT_EXPIRED))
872 atomic_inc(&s->vdq.busy);
877 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
878 void qbman_swp_dqrr_consume(struct qbman_swp *s,
879 const struct qbman_result *dq)
881 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
884 /*********************************/
885 /* Polling user-provided storage */
886 /*********************************/
887 int qbman_result_has_new_result(struct qbman_swp *s,
888 struct qbman_result *dq)
894 * Set token to be 0 so we will detect change back to 1
895 * next time the looping is traversed. Const is cast away here
896 * as we want users to treat the dequeue responses as read only.
898 ((struct qbman_result *)dq)->dq.tok = 0;
901 * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
902 * fact "VDQCR" shows busy doesn't mean that we hold the result that
903 * makes it available. Eg. we may be looking at our 10th dequeue result,
904 * having released VDQCR after the 1st result and it is now busy due to
905 * some other command!
907 if (s->vdq.storage == dq) {
908 s->vdq.storage = NULL;
909 atomic_inc(&s->vdq.busy);
915 int qbman_check_new_result(struct qbman_result *dq)
921 * Set token to be 0 so we will detect change back to 1
922 * next time the looping is traversed. Const is cast away here
923 * as we want users to treat the dequeue responses as read only.
925 ((struct qbman_result *)dq)->dq.tok = 0;
930 int qbman_check_command_complete(struct qbman_result *dq)
937 s = portal_idx_map[dq->dq.tok - 1];
939 * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
940 * fact "VDQCR" shows busy doesn't mean that we hold the result that
941 * makes it available. Eg. we may be looking at our 10th dequeue result,
942 * having released VDQCR after the 1st result and it is now busy due to
943 * some other command!
945 if (s->vdq.storage == dq) {
946 s->vdq.storage = NULL;
947 atomic_inc(&s->vdq.busy);
953 /********************************/
954 /* Categorising qbman results */
955 /********************************/
957 static inline int __qbman_result_is_x(const struct qbman_result *dq,
960 uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
962 return (response_verb == x);
965 int qbman_result_is_DQ(const struct qbman_result *dq)
967 return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
970 int qbman_result_is_FQDAN(const struct qbman_result *dq)
972 return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
975 int qbman_result_is_CDAN(const struct qbman_result *dq)
977 return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
980 int qbman_result_is_CSCN(const struct qbman_result *dq)
982 return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
983 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
986 int qbman_result_is_BPSCN(const struct qbman_result *dq)
988 return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
991 int qbman_result_is_CGCU(const struct qbman_result *dq)
993 return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
996 int qbman_result_is_FQRN(const struct qbman_result *dq)
998 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1001 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1003 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1006 int qbman_result_is_FQPN(const struct qbman_result *dq)
1008 return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1011 /*********************************/
1012 /* Parsing frame dequeue results */
1013 /*********************************/
1015 /* These APIs assume qbman_result_is_DQ() is TRUE */
1017 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1022 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1024 return dq->dq.seqnum;
1027 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1029 return dq->dq.oprid;
1032 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1037 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1039 return dq->dq.fq_byte_cnt;
1042 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1044 return dq->dq.fq_frm_cnt;
1047 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1049 return dq->dq.fqd_ctx;
1052 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1054 return (const struct qbman_fd *)&dq->dq.fd[0];
1057 /**************************************/
1058 /* Parsing state-change notifications */
1059 /**************************************/
1060 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1062 return scn->scn.state;
1065 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1067 return scn->scn.rid_tok;
1070 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1072 return scn->scn.ctx;
1078 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1080 return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1083 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1085 return !(int)(qbman_result_SCN_state(scn) & 0x1);
1088 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1090 return (int)(qbman_result_SCN_state(scn) & 0x2);
1093 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1095 return (int)(qbman_result_SCN_state(scn) & 0x4);
1098 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1100 return qbman_result_SCN_ctx(scn);
1106 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1108 return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
1111 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1113 return qbman_result_SCN_ctx(scn);
1116 /******************/
1117 /* Buffer release */
1118 /******************/
1119 #define QB_BR_RC_VALID_SHIFT 5
1120 #define QB_BR_RCDI_SHIFT 6
1122 void qbman_release_desc_clear(struct qbman_release_desc *d)
1124 memset(d, 0, sizeof(*d));
1125 d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
1128 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
1133 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1136 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
1138 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
1141 #define RAR_IDX(rar) ((rar) & 0x7)
1142 #define RAR_VB(rar) ((rar) & 0x80)
1143 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1145 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1146 const uint64_t *buffers, unsigned int num_buffers)
1149 const uint32_t *cl = qb_cl(d);
1150 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1152 pr_debug("RAR=%08x\n", rar);
1153 if (!RAR_SUCCESS(rar))
1156 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1158 /* Start the release command */
1159 p = qbman_cena_write_start_wo_shadow(&s->sys,
1160 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1162 /* Copy the caller's buffer pointers to the command */
1163 u64_to_le32_copy(&p[2], buffers, num_buffers);
1165 /* Set the verb byte, have to substitute in the valid-bit and the number
1169 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1170 qbman_cena_write_complete_wo_shadow(&s->sys,
1171 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1176 /*******************/
1177 /* Buffer acquires */
1178 /*******************/
1179 struct qbman_acquire_desc {
1184 uint8_t reserved2[59];
1187 struct qbman_acquire_rslt {
1192 uint8_t reserved2[3];
1196 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1197 unsigned int num_buffers)
1199 struct qbman_acquire_desc *p;
1200 struct qbman_acquire_rslt *r;
1202 if (!num_buffers || (num_buffers > 7))
1205 /* Start the management command */
1206 p = qbman_swp_mc_start(s);
1211 /* Encode the caller-provided attributes */
1213 p->num = num_buffers;
1215 /* Complete the management command */
1216 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1218 pr_err("qbman: acquire from BPID %d failed, no response\n",
1223 /* Decode the outcome */
1224 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
1226 /* Determine success or failure */
1227 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1228 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1233 QBMAN_BUG_ON(r->num > num_buffers);
1235 /* Copy the acquired buffers to the caller's array */
1236 u64_from_le32_copy(buffers, &r->buf[0], r->num);
1244 struct qbman_alt_fq_state_desc {
1246 uint8_t reserved[3];
1248 uint8_t reserved2[56];
1251 struct qbman_alt_fq_state_rslt {
1254 uint8_t reserved[62];
1257 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1259 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1260 uint8_t alt_fq_verb)
1262 struct qbman_alt_fq_state_desc *p;
1263 struct qbman_alt_fq_state_rslt *r;
1265 /* Start the management command */
1266 p = qbman_swp_mc_start(s);
1270 p->fqid = fqid & ALT_FQ_FQID_MASK;
1272 /* Complete the management command */
1273 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1275 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1280 /* Decode the outcome */
1281 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
1283 /* Determine success or failure */
1284 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1285 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1286 fqid, alt_fq_verb, r->rslt);
1293 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1295 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1298 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1300 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1303 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1305 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1308 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1310 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1313 /**********************/
1314 /* Channel management */
1315 /**********************/
1317 struct qbman_cdan_ctrl_desc {
1325 uint8_t reserved3[48];
1329 struct qbman_cdan_ctrl_rslt {
1333 uint8_t reserved[60];
1336 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1337 * would be irresponsible to expose it.
1339 #define CODE_CDAN_WE_EN 0x1
1340 #define CODE_CDAN_WE_CTX 0x4
1342 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1343 uint8_t we_mask, uint8_t cdan_en,
1346 struct qbman_cdan_ctrl_desc *p;
1347 struct qbman_cdan_ctrl_rslt *r;
1349 /* Start the management command */
1350 p = qbman_swp_mc_start(s);
1354 /* Encode the caller-provided attributes */
1363 /* Complete the management command */
1364 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1366 pr_err("qbman: wqchan config failed, no response\n");
1370 /* Decode the outcome */
1371 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
1372 != QBMAN_WQCHAN_CONFIGURE);
1374 /* Determine success or failure */
1375 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1376 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1377 channelid, r->rslt);
1384 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1387 return qbman_swp_CDAN_set(s, channelid,
1392 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1394 return qbman_swp_CDAN_set(s, channelid,
1399 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1401 return qbman_swp_CDAN_set(s, channelid,
1406 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1409 return qbman_swp_CDAN_set(s, channelid,
1410 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1414 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1416 return QBMAN_IDX_FROM_DQRR(dqrr);
1419 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1421 struct qbman_result *dq;
1423 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));