dd62e9af3fff18375e9d903ab67dc95377e837cd
[deb_dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *     * Redistributions of source code must retain the above copyright
9  *       notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above copyright
11  *       notice, this list of conditions and the following disclaimer in the
12  *       documentation and/or other materials provided with the distribution.
13  *     * Neither the name of Freescale Semiconductor nor the
14  *       names of its contributors may be used to endorse or promote products
15  *       derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include "qbman_portal.h"
30
31 /* QBMan portal management command codes */
32 #define QBMAN_MC_ACQUIRE       0x30
33 #define QBMAN_WQCHAN_CONFIGURE 0x46
34
35 /* CINH register offsets */
36 #define QBMAN_CINH_SWP_EQCR_PI 0x800
37 #define QBMAN_CINH_SWP_EQCR_CI 0x840
38 #define QBMAN_CINH_SWP_EQAR    0x8c0
39 #define QBMAN_CINH_SWP_DQPI    0xa00
40 #define QBMAN_CINH_SWP_DCAP    0xac0
41 #define QBMAN_CINH_SWP_SDQCR   0xb00
42 #define QBMAN_CINH_SWP_RAR     0xcc0
43 #define QBMAN_CINH_SWP_ISR     0xe00
44 #define QBMAN_CINH_SWP_IER     0xe40
45 #define QBMAN_CINH_SWP_ISDR    0xe80
46 #define QBMAN_CINH_SWP_IIR     0xec0
47 #define QBMAN_CINH_SWP_DQRR_ITR    0xa80
48 #define QBMAN_CINH_SWP_ITPR    0xf40
49
50 /* CENA register offsets */
51 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
53 #define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((uint32_t)(n) << 6))
54 #define QBMAN_CENA_SWP_CR      0x600
55 #define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((uint32_t)(vb) >> 1))
56 #define QBMAN_CENA_SWP_VDQCR   0x780
57 #define QBMAN_CENA_SWP_EQCR_CI 0x840
58
59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
61
62 /* QBMan FQ management command codes */
63 #define QBMAN_FQ_SCHEDULE       0x48
64 #define QBMAN_FQ_FORCE          0x49
65 #define QBMAN_FQ_XON            0x4d
66 #define QBMAN_FQ_XOFF           0x4e
67
68 /*******************************/
69 /* Pre-defined attribute codes */
70 /*******************************/
71
72 struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
73 struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
74
75 /*************************/
76 /* SDQCR attribute codes */
77 /*************************/
78
79 /* we put these here because at least some of them are required by
80  * qbman_swp_init()
81  */
82 struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
83 struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
84 struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
85 static struct qb_attr_code code_eq_dca_idx;
86 #define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
87 enum qbman_sdqcr_dct {
88         qbman_sdqcr_dct_null = 0,
89         qbman_sdqcr_dct_prio_ics,
90         qbman_sdqcr_dct_active_ics,
91         qbman_sdqcr_dct_active
92 };
93
94 enum qbman_sdqcr_fc {
95         qbman_sdqcr_fc_one = 0,
96         qbman_sdqcr_fc_up_to_3 = 1
97 };
98
99 struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);
100
101 /* We need to keep track of which SWP triggered a pull command
102  * so keep an array of portal IDs and use the token field to
103  * be able to find the proper portal
104  */
105 #define MAX_QBMAN_PORTALS  35
106 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
107
108 uint32_t qman_version;
109
110 /*********************************/
111 /* Portal constructor/destructor */
112 /*********************************/
113
114 /* Software portals should always be in the power-on state when we initialise,
115  * due to the CCSR-based portal reset functionality that MC has.
116  *
117  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
118  * valid-bits, so we need to support a workaround where we don't trust
119  * valid-bits when detecting new entries until any stale ring entries have been
120  * overwritten at least once. The idea is that we read PI for the first few
121  * entries, then switch to valid-bit after that. The trick is to clear the
122  * bug-work-around boolean once the PI wraps around the ring for the first time.
123  *
124  * Note: this still carries a slight additional cost once the decrementer hits
125  * zero.
126  */
127 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
128 {
129         int ret;
130         uint32_t eqcr_pi;
131         struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
132
133         if (!p)
134                 return NULL;
135         p->desc = *d;
136 #ifdef QBMAN_CHECKING
137         p->mc.check = swp_mc_can_start;
138 #endif
139         p->mc.valid_bit = QB_VALID_BIT;
140         p->sdq = 0;
141         qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
142         qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
143         qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
144         atomic_set(&p->vdq.busy, 1);
145         p->vdq.valid_bit = QB_VALID_BIT;
146         p->dqrr.next_idx = 0;
147         p->dqrr.valid_bit = QB_VALID_BIT;
148         qman_version = p->desc.qman_version;
149         if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
150                 p->dqrr.dqrr_size = 4;
151                 p->dqrr.reset_bug = 1;
152                 /* Set size of DQRR to 4, encoded in 2 bits */
153                 code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 2);
154         } else {
155                 p->dqrr.dqrr_size = 8;
156                 p->dqrr.reset_bug = 0;
157                 /* Set size of DQRR to 8, encoded in 3 bits */
158                 code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 3);
159         }
160
161         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
162         if (ret) {
163                 kfree(p);
164                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
165                 return NULL;
166         }
167         /* SDQCR needs to be initialized to 0 when no channels are
168          * being dequeued from or else the QMan HW will indicate an
169          * error.  The values that were calculated above will be
170          * applied when dequeues from a specific channel are enabled
171          */
172         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
173         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
174         p->eqcr.pi = eqcr_pi & 0xF;
175         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
176         p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
177         p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
178                                                 p->eqcr.ci, p->eqcr.pi);
179
180         portal_idx_map[p->desc.idx] = p;
181         return p;
182 }
183
184 void qbman_swp_finish(struct qbman_swp *p)
185 {
186 #ifdef QBMAN_CHECKING
187         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
188 #endif
189         qbman_swp_sys_finish(&p->sys);
190         portal_idx_map[p->desc.idx] = NULL;
191         kfree(p);
192 }
193
194 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
195 {
196         return &p->desc;
197 }
198
199 /**************/
200 /* Interrupts */
201 /**************/
202
203 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
204 {
205         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
206 }
207
208 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
209 {
210         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
211 }
212
213 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
214 {
215         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
216 }
217
218 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
219 {
220         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
221 }
222
223 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
224 {
225         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
226 }
227
228 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
229 {
230         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
231 }
232
233 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
234 {
235         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
236 }
237
238 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
239 {
240         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
241 }
242
243 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
244 {
245         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
246 }
247
248 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
249 {
250         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
251 }
252
253 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
254 {
255         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
256 }
257
258 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
259 {
260         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
261 }
262
263 /***********************/
264 /* Management commands */
265 /***********************/
266
267 /*
268  * Internal code common to all types of management commands.
269  */
270
271 void *qbman_swp_mc_start(struct qbman_swp *p)
272 {
273         void *ret;
274 #ifdef QBMAN_CHECKING
275         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
276 #endif
277         ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
278 #ifdef QBMAN_CHECKING
279         if (!ret)
280                 p->mc.check = swp_mc_can_submit;
281 #endif
282         return ret;
283 }
284
285 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
286 {
287         uint32_t *v = cmd;
288 #ifdef QBMAN_CHECKING
289         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
290 #endif
291         /* TBD: "|=" is going to hurt performance. Need to move as many fields
292          * out of word zero, and for those that remain, the "OR" needs to occur
293          * at the caller side. This debug check helps to catch cases where the
294          * caller wants to OR but has forgotten to do so.
295          */
296         QBMAN_BUG_ON((*v & cmd_verb) != *v);
297         *v = cmd_verb | p->mc.valid_bit;
298         qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
299 #ifdef QBMAN_CHECKING
300         p->mc.check = swp_mc_can_poll;
301 #endif
302 }
303
304 void *qbman_swp_mc_result(struct qbman_swp *p)
305 {
306         uint32_t *ret, verb;
307 #ifdef QBMAN_CHECKING
308         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
309 #endif
310         qbman_cena_invalidate_prefetch(&p->sys,
311                                        QBMAN_CENA_SWP_RR(p->mc.valid_bit));
312         ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
313         /* Remove the valid-bit - command completed if the rest is non-zero */
314         verb = ret[0] & ~QB_VALID_BIT;
315         if (!verb)
316                 return NULL;
317 #ifdef QBMAN_CHECKING
318         p->mc.check = swp_mc_can_start;
319 #endif
320         p->mc.valid_bit ^= QB_VALID_BIT;
321         return ret;
322 }
323
324 /***********/
325 /* Enqueue */
326 /***********/
327
328 /* These should be const, eventually */
329 static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
330 static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);
331 static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);
332 static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);
333 /* Can't set code_eq_dca_idx width. Need qman version. Read at runtime */
334 static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
335 static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);
336 static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);
337 static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);
338 static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);
339 static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
340 /* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
341 static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
342 static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
343 static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
344 static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
345 static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);
346 static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
347
348 enum qbman_eq_cmd_e {
349         /* No enqueue, primarily for plugging ORP gaps for dropped frames */
350         qbman_eq_cmd_empty,
351         /* DMA an enqueue response once complete */
352         qbman_eq_cmd_respond,
353         /* DMA an enqueue response only if the enqueue fails */
354         qbman_eq_cmd_respond_reject
355 };
356
357 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
358 {
359         memset(d, 0, sizeof(*d));
360 }
361
362 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
363 {
364         uint32_t *cl = qb_cl(d);
365
366         qb_attr_code_encode(&code_eq_orp_en, cl, 0);
367         qb_attr_code_encode(&code_eq_cmd, cl,
368                             respond_success ? qbman_eq_cmd_respond :
369                                               qbman_eq_cmd_respond_reject);
370 }
371
372 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
373                            uint32_t opr_id, uint32_t seqnum, int incomplete)
374 {
375         uint32_t *cl = qb_cl(d);
376
377         qb_attr_code_encode(&code_eq_orp_en, cl, 1);
378         qb_attr_code_encode(&code_eq_cmd, cl,
379                             respond_success ? qbman_eq_cmd_respond :
380                                               qbman_eq_cmd_respond_reject);
381         qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
382         qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
383         qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);
384 }
385
386 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
387                                 uint32_t seqnum)
388 {
389         uint32_t *cl = qb_cl(d);
390
391         qb_attr_code_encode(&code_eq_orp_en, cl, 1);
392         qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
393         qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
394         qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
395         qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
396         qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);
397 }
398
399 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
400                                 uint32_t seqnum)
401 {
402         uint32_t *cl = qb_cl(d);
403
404         qb_attr_code_encode(&code_eq_orp_en, cl, 1);
405         qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
406         qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
407         qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
408         qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
409         qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);
410 }
411
412 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
413                                 dma_addr_t storage_phys,
414                                 int stash)
415 {
416         uint32_t *cl = qb_cl(d);
417
418         qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
419         qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
420 }
421
422 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
423 {
424         uint32_t *cl = qb_cl(d);
425
426         qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);
427 }
428
429 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
430 {
431         uint32_t *cl = qb_cl(d);
432
433         qb_attr_code_encode(&code_eq_qd_en, cl, 0);
434         qb_attr_code_encode(&code_eq_tgt_id, cl, fqid);
435 }
436
437 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
438                           uint32_t qd_bin, uint32_t qd_prio)
439 {
440         uint32_t *cl = qb_cl(d);
441
442         qb_attr_code_encode(&code_eq_qd_en, cl, 1);
443         qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
444         qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
445         qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
446 }
447
448 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
449 {
450         uint32_t *cl = qb_cl(d);
451
452         qb_attr_code_encode(&code_eq_eqdi, cl, !!enable);
453 }
454
455 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
456                            uint32_t dqrr_idx, int park)
457 {
458         uint32_t *cl = qb_cl(d);
459
460         qb_attr_code_encode(&code_eq_dca_en, cl, !!enable);
461         if (enable) {
462                 qb_attr_code_encode(&code_eq_dca_pk, cl, !!park);
463                 qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);
464         }
465 }
466
467 #define EQAR_IDX(eqar)     ((eqar) & 0x7)
468 #define EQAR_VB(eqar)      ((eqar) & 0x80)
469 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
470 static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
471                                         const struct qbman_eq_desc *d,
472                                  const struct qbman_fd *fd)
473 {
474         uint32_t *p;
475         const uint32_t *cl = qb_cl(d);
476         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
477
478         pr_debug("EQAR=%08x\n", eqar);
479         if (!EQAR_SUCCESS(eqar))
480                 return -EBUSY;
481         p = qbman_cena_write_start_wo_shadow(&s->sys,
482                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
483         word_copy(&p[1], &cl[1], 7);
484         word_copy(&p[8], fd, sizeof(*fd) >> 2);
485         /* Set the verb byte, have to substitute in the valid-bit */
486         lwsync();
487         p[0] = cl[0] | EQAR_VB(eqar);
488         qbman_cena_write_complete_wo_shadow(&s->sys,
489                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
490         return 0;
491 }
492
493 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
494                                        const struct qbman_eq_desc *d,
495                                 const struct qbman_fd *fd)
496 {
497         uint32_t *p;
498         const uint32_t *cl = qb_cl(d);
499         uint32_t eqcr_ci;
500         uint8_t diff;
501
502         if (!s->eqcr.available) {
503                 eqcr_ci = s->eqcr.ci;
504                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
505                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
506                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
507                                    eqcr_ci, s->eqcr.ci);
508                 s->eqcr.available += diff;
509                 if (!diff)
510                         return -EBUSY;
511         }
512
513         p = qbman_cena_write_start_wo_shadow(&s->sys,
514                 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
515         word_copy(&p[1], &cl[1], 7);
516         word_copy(&p[8], fd, sizeof(*fd) >> 2);
517         lwsync();
518         /* Set the verb byte, have to substitute in the valid-bit */
519         p[0] = cl[0] | s->eqcr.pi_vb;
520         qbman_cena_write_complete_wo_shadow(&s->sys,
521                 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
522         s->eqcr.pi++;
523         s->eqcr.pi &= 0xF;
524         s->eqcr.available--;
525         if (!(s->eqcr.pi & 7))
526                 s->eqcr.pi_vb ^= QB_VALID_BIT;
527         return 0;
528 }
529
530 int qbman_swp_fill_ring(struct qbman_swp *s,
531                         const struct qbman_eq_desc *d,
532                         const struct qbman_fd *fd,
533                         __attribute__((unused)) uint8_t burst_index)
534 {
535         uint32_t *p;
536         const uint32_t *cl = qb_cl(d);
537         uint32_t eqcr_ci;
538         uint8_t diff;
539
540         if (!s->eqcr.available) {
541                 eqcr_ci = s->eqcr.ci;
542                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
543                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
544                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
545                                    eqcr_ci, s->eqcr.ci);
546                 s->eqcr.available += diff;
547                 if (!diff)
548                         return -EBUSY;
549         }
550         p = qbman_cena_write_start_wo_shadow(&s->sys,
551                 QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7));
552         /* word_copy(&p[1], &cl[1], 7); */
553         memcpy(&p[1], &cl[1], 7 * 4);
554         /* word_copy(&p[8], fd, sizeof(*fd) >> 2); */
555         memcpy(&p[8], fd, sizeof(struct qbman_fd));
556
557         /* lwsync(); */
558         p[0] = cl[0] | s->eqcr.pi_vb;
559
560         s->eqcr.pi++;
561         s->eqcr.pi &= 0xF;
562         s->eqcr.available--;
563         if (!(s->eqcr.pi & 7))
564                 s->eqcr.pi_vb ^= QB_VALID_BIT;
565
566         return 0;
567 }
568
569 int qbman_swp_flush_ring(struct qbman_swp *s)
570 {
571         void *ptr = s->sys.addr_cena;
572
573         dcbf((uint64_t)ptr);
574         dcbf((uint64_t)ptr + 0x40);
575         dcbf((uint64_t)ptr + 0x80);
576         dcbf((uint64_t)ptr + 0xc0);
577         dcbf((uint64_t)ptr + 0x100);
578         dcbf((uint64_t)ptr + 0x140);
579         dcbf((uint64_t)ptr + 0x180);
580         dcbf((uint64_t)ptr + 0x1c0);
581
582         return 0;
583 }
584
585 void qbman_sync(void)
586 {
587         lwsync();
588 }
589
590 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
591                       const struct qbman_fd *fd)
592 {
593         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
594                 return qbman_swp_enqueue_array_mode(s, d, fd);
595         else    /* Use ring mode by default */
596                 return qbman_swp_enqueue_ring_mode(s, d, fd);
597 }
598
599 int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
600                                const struct qbman_eq_desc *d,
601                                const struct qbman_fd *fd,
602                                int num_frames)
603 {
604         uint32_t *p;
605         const uint32_t *cl = qb_cl(d);
606         uint32_t eqcr_ci, eqcr_pi;
607         uint8_t diff;
608         int i, num_enqueued = 0;
609         uint64_t addr_cena;
610
611         if (!s->eqcr.available) {
612                 eqcr_ci = s->eqcr.ci;
613                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
614                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
615                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
616                                    eqcr_ci, s->eqcr.ci);
617                 s->eqcr.available += diff;
618                 if (!diff)
619                         return 0;
620         }
621
622         eqcr_pi = s->eqcr.pi;
623         num_enqueued = (s->eqcr.available < num_frames) ?
624                         s->eqcr.available : num_frames;
625         s->eqcr.available -= num_enqueued;
626         /* Fill in the EQCR ring */
627         for (i = 0; i < num_enqueued; i++) {
628                 p = qbman_cena_write_start_wo_shadow(&s->sys,
629                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
630                 memcpy(&p[1], &cl[1], 28);
631                 memcpy(&p[8], &fd[i], sizeof(*fd));
632                 eqcr_pi++;
633                 eqcr_pi &= 0xF;
634                 /*Pointing to the next enqueue descriptor*/
635                 cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
636         }
637
638         lwsync();
639
640         /* Set the verb byte, have to substitute in the valid-bit */
641         eqcr_pi = s->eqcr.pi;
642         cl = qb_cl(d);
643         for (i = 0; i < num_enqueued; i++) {
644                 p = qbman_cena_write_start_wo_shadow(&s->sys,
645                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
646                 p[0] = cl[0] | s->eqcr.pi_vb;
647                 eqcr_pi++;
648                 eqcr_pi &= 0xF;
649                 if (!(eqcr_pi & 7))
650                         s->eqcr.pi_vb ^= QB_VALID_BIT;
651                 /*Pointing to the next enqueue descriptor*/
652                 cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
653         }
654
655         /* Flush all the cacheline without load/store in between */
656         eqcr_pi = s->eqcr.pi;
657         addr_cena = (uint64_t)s->sys.addr_cena;
658         for (i = 0; i < num_enqueued; i++) {
659                 dcbf((uint64_t *)(addr_cena +
660                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
661                 eqcr_pi++;
662                 eqcr_pi &= 0xF;
663         }
664         s->eqcr.pi = eqcr_pi;
665
666         return num_enqueued;
667 }
668
669 /*************************/
670 /* Static (push) dequeue */
671 /*************************/
672
673 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
674 {
675         struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
676
677         QBMAN_BUG_ON(channel_idx > 15);
678         *enabled = (int)qb_attr_code_decode(&code, &s->sdq);
679 }
680
681 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
682 {
683         uint16_t dqsrc;
684         struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
685
686         QBMAN_BUG_ON(channel_idx > 15);
687         qb_attr_code_encode(&code, &s->sdq, !!enable);
688         /* Read make the complete src map.  If no channels are enabled
689          * the SDQCR must be 0 or else QMan will assert errors
690          */
691         dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);
692         if (dqsrc != 0)
693                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
694         else
695                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
696 }
697
698 /***************************/
699 /* Volatile (pull) dequeue */
700 /***************************/
701
702 /* These should be const, eventually */
703 static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
704 static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
705 static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
706 static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
707 static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
708 static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
709 static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
710 static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
711
712 enum qb_pull_dt_e {
713         qb_pull_dt_channel,
714         qb_pull_dt_workqueue,
715         qb_pull_dt_framequeue
716 };
717
718 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
719 {
720         memset(d, 0, sizeof(*d));
721 }
722
723 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
724                                  struct qbman_result *storage,
725                                  dma_addr_t storage_phys,
726                                  int stash)
727 {
728         uint32_t *cl = qb_cl(d);
729         /* Squiggle the pointer 'storage' into the extra 2 words of the
730          * descriptor (which aren't copied to the hw command)
731          */
732         *(void **)&cl[4] = storage;
733         if (!storage) {
734                 qb_attr_code_encode(&code_pull_rls, cl, 0);
735                 return;
736         }
737         qb_attr_code_encode(&code_pull_rls, cl, 1);
738         qb_attr_code_encode(&code_pull_stash, cl, !!stash);
739         qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
740 }
741
742 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
743 {
744         uint32_t *cl = qb_cl(d);
745
746         QBMAN_BUG_ON(!numframes || (numframes > 16));
747         qb_attr_code_encode(&code_pull_numframes, cl,
748                             (uint32_t)(numframes - 1));
749 }
750
751 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
752 {
753         uint32_t *cl = qb_cl(d);
754
755         qb_attr_code_encode(&code_pull_token, cl, token);
756 }
757
758 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
759 {
760         uint32_t *cl = qb_cl(d);
761
762         qb_attr_code_encode(&code_pull_dct, cl, 1);
763         qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
764         qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
765 }
766
767 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
768                             enum qbman_pull_type_e dct)
769 {
770         uint32_t *cl = qb_cl(d);
771
772         qb_attr_code_encode(&code_pull_dct, cl, dct);
773         qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);
774         qb_attr_code_encode(&code_pull_dqsource, cl, wqid);
775 }
776
777 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
778                                  enum qbman_pull_type_e dct)
779 {
780         uint32_t *cl = qb_cl(d);
781
782         qb_attr_code_encode(&code_pull_dct, cl, dct);
783         qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);
784         qb_attr_code_encode(&code_pull_dqsource, cl, chid);
785 }
786
787 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
788 {
789         uint32_t *p;
790         uint32_t *cl = qb_cl(d);
791
792         if (!atomic_dec_and_test(&s->vdq.busy)) {
793                 atomic_inc(&s->vdq.busy);
794                 return -EBUSY;
795         }
796         s->vdq.storage = *(void **)&cl[4];
797         /* We use portal index +1 as token so that 0 still indicates
798          * that the result isn't valid yet.
799          */
800         qb_attr_code_encode(&code_pull_token, cl, s->desc.idx + 1);
801         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
802         word_copy(&p[1], &cl[1], 3);
803         /* Set the verb byte, have to substitute in the valid-bit */
804         lwsync();
805         p[0] = cl[0] | s->vdq.valid_bit;
806         s->vdq.valid_bit ^= QB_VALID_BIT;
807         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
808         return 0;
809 }
810
811 /****************/
812 /* Polling DQRR */
813 /****************/
814
815 static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
816 static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
817 static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
818 static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);
819 static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);
820 /* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
821 static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);
822 static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);
823 static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);
824 static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
825
826 #define QBMAN_RESULT_DQ        0x60
827 #define QBMAN_RESULT_FQRN      0x21
828 #define QBMAN_RESULT_FQRNI     0x22
829 #define QBMAN_RESULT_FQPN      0x24
830 #define QBMAN_RESULT_FQDAN     0x25
831 #define QBMAN_RESULT_CDAN      0x26
832 #define QBMAN_RESULT_CSCN_MEM  0x27
833 #define QBMAN_RESULT_CGCU      0x28
834 #define QBMAN_RESULT_BPSCN     0x29
835 #define QBMAN_RESULT_CSCN_WQ   0x2a
836
837 static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);
838
839 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
840  * only once, so repeated calls can return a sequence of DQRR entries, without
841  * requiring they be consumed immediately or in any particular order.
842  */
843 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
844 {
845         uint32_t verb;
846         uint32_t response_verb;
847         uint32_t flags;
848         const struct qbman_result *dq;
849         const uint32_t *p;
850
851         /* Before using valid-bit to detect if something is there, we have to
852          * handle the case of the DQRR reset bug...
853          */
854         if (unlikely(s->dqrr.reset_bug)) {
855                 /* We pick up new entries by cache-inhibited producer index,
856                  * which means that a non-coherent mapping would require us to
857                  * invalidate and read *only* once that PI has indicated that
858                  * there's an entry here. The first trip around the DQRR ring
859                  * will be much less efficient than all subsequent trips around
860                  * it...
861                  */
862                 uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
863                 uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
864                 /* there are new entries if pi != next_idx */
865                 if (pi == s->dqrr.next_idx)
866                         return NULL;
867                 /* if next_idx is/was the last ring index, and 'pi' is
868                  * different, we can disable the workaround as all the ring
869                  * entries have now been DMA'd to so valid-bit checking is
870                  * repaired. Note: this logic needs to be based on next_idx
871                  * (which increments one at a time), rather than on pi (which
872                  * can burst and wrap-around between our snapshots of it).
873                  */
874                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
875                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
876                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
877                                  s->dqrr.next_idx, pi);
878                         s->dqrr.reset_bug = 0;
879                 }
880                 qbman_cena_invalidate_prefetch(&s->sys,
881                                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
882         }
883         dq = qbman_cena_read_wo_shadow(&s->sys,
884                                        QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
885         p = qb_cl(dq);
886         verb = qb_attr_code_decode(&code_dqrr_verb, p);
887         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
888          * in the DQRR reset bug workaround, we shouldn't need to skip these
889          * check, because we've already determined that a new entry is available
890          * and we've invalidated the cacheline before reading it, so the
891          * valid-bit behaviour is repaired and should tell us what we already
892          * knew from reading PI.
893          */
894         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
895                 return NULL;
896
897         /* There's something there. Move "next_idx" attention to the next ring
898          * entry (and prefetch it) before returning what we found.
899          */
900         s->dqrr.next_idx++;
901         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
902                 s->dqrr.next_idx = 0;
903                 s->dqrr.valid_bit ^= QB_VALID_BIT;
904         }
905         /* If this is the final response to a volatile dequeue command
906          * indicate that the vdq is no longer busy.
907          */
908         flags = qbman_result_DQ_flags(dq);
909         response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
910         if ((response_verb == QBMAN_RESULT_DQ) &&
911             (flags & QBMAN_DQ_STAT_VOLATILE) &&
912             (flags & QBMAN_DQ_STAT_EXPIRED))
913                 atomic_inc(&s->vdq.busy);
914
915         return dq;
916 }
917
918 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
919 void qbman_swp_dqrr_consume(struct qbman_swp *s,
920                             const struct qbman_result *dq)
921 {
922         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
923 }
924
925 /*********************************/
926 /* Polling user-provided storage */
927 /*********************************/
928
929 int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s,
930                                 const struct qbman_result *dq)
931 {
932         /* To avoid converting the little-endian DQ entry to host-endian prior
933          * to us knowing whether there is a valid entry or not (and run the
934          * risk of corrupting the incoming hardware LE write), we detect in
935          * hardware endianness rather than host. This means we need a different
936          * "code" depending on whether we are BE or LE in software, which is
937          * where DQRR_TOK_OFFSET comes in...
938          */
939         static struct qb_attr_code code_dqrr_tok_detect =
940                                         QB_CODE(0, DQRR_TOK_OFFSET, 8);
941         /* The user trying to poll for a result treats "dq" as const. It is
942          * however the same address that was provided to us non-const in the
943          * first place, for directing hardware DMA to. So we can cast away the
944          * const because it is mutable from our perspective.
945          */
946         uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
947         uint32_t token;
948
949         token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
950         if (token == 0)
951                 return 0;
952         /* Entry is valid - overwrite token back to 0 so
953          * a) If this memory is reused tokesn will be 0
954          * b) If someone calls "has_new_result()" again on this entry it
955          *    will not appear to be new
956          */
957         qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);
958
959         /* Only now do we convert from hardware to host endianness. Also, as we
960          * are returning success, the user has promised not to call us again, so
961          * there's no risk of us converting the endianness twice...
962          */
963         make_le32_n(p, 16);
964         return 1;
965 }
966
967 int qbman_check_command_complete(struct qbman_swp *s,
968                                  const struct qbman_result *dq)
969 {
970         /* To avoid converting the little-endian DQ entry to host-endian prior
971          * to us knowing whether there is a valid entry or not (and run the
972          * risk of corrupting the incoming hardware LE write), we detect in
973          * hardware endianness rather than host. This means we need a different
974          * "code" depending on whether we are BE or LE in software, which is
975          * where DQRR_TOK_OFFSET comes in...
976          */
977         static struct qb_attr_code code_dqrr_tok_detect =
978                                         QB_CODE(0, DQRR_TOK_OFFSET, 8);
979         /* The user trying to poll for a result treats "dq" as const. It is
980          * however the same address that was provided to us non-const in the
981          * first place, for directing hardware DMA to. So we can cast away the
982          * const because it is mutable from our perspective.
983          */
984         uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
985         uint32_t token;
986
987         token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
988         if (token == 0)
989                 return 0;
990         /* TODO: Remove qbman_swp from parameters and make it a local
991          * once we've tested the reserve portal map change
992          */
993         s = portal_idx_map[token - 1];
994         /* When token is set it indicates that VDQ command has been fetched
995          * by qbman and is working on it. It is safe for software to issue
996          * another VDQ command, so incrementing the busy variable.
997          */
998         if (s->vdq.storage == dq) {
999                 s->vdq.storage = NULL;
1000                 atomic_inc(&s->vdq.busy);
1001         }
1002         return 1;
1003 }
1004
1005 /********************************/
1006 /* Categorising qbman results   */
1007 /********************************/
1008
1009 static struct qb_attr_code code_result_in_mem =
1010                         QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);
1011
1012 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1013                                       uint32_t x)
1014 {
1015         const uint32_t *p = qb_cl(dq);
1016         uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
1017
1018         return (response_verb == x);
1019 }
1020
1021 static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq,
1022                                              uint32_t x)
1023 {
1024         const uint32_t *p = qb_cl(dq);
1025         uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);
1026
1027         return (response_verb == x);
1028 }
1029
1030 int qbman_result_is_DQ(const struct qbman_result *dq)
1031 {
1032         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1033 }
1034
1035 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1036 {
1037         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1038 }
1039
1040 int qbman_result_is_CDAN(const struct qbman_result *dq)
1041 {
1042         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1043 }
1044
1045 int qbman_result_is_CSCN(const struct qbman_result *dq)
1046 {
1047         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||
1048                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1049 }
1050
1051 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1052 {
1053         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);
1054 }
1055
1056 int qbman_result_is_CGCU(const struct qbman_result *dq)
1057 {
1058         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);
1059 }
1060
1061 int qbman_result_is_FQRN(const struct qbman_result *dq)
1062 {
1063         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);
1064 }
1065
1066 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1067 {
1068         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);
1069 }
1070
1071 int qbman_result_is_FQPN(const struct qbman_result *dq)
1072 {
1073         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1074 }
1075
1076 /*********************************/
1077 /* Parsing frame dequeue results */
1078 /*********************************/
1079
1080 /* These APIs assume qbman_result_is_DQ() is TRUE */
1081
1082 uint32_t qbman_result_DQ_flags(const struct qbman_result *dq)
1083 {
1084         const uint32_t *p = qb_cl(dq);
1085
1086         return qb_attr_code_decode(&code_dqrr_stat, p);
1087 }
1088
1089 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1090 {
1091         const uint32_t *p = qb_cl(dq);
1092
1093         return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);
1094 }
1095
1096 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1097 {
1098         const uint32_t *p = qb_cl(dq);
1099
1100         return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);
1101 }
1102
1103 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1104 {
1105         const uint32_t *p = qb_cl(dq);
1106
1107         return qb_attr_code_decode(&code_dqrr_fqid, p);
1108 }
1109
1110 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1111 {
1112         const uint32_t *p = qb_cl(dq);
1113
1114         return qb_attr_code_decode(&code_dqrr_byte_count, p);
1115 }
1116
1117 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1118 {
1119         const uint32_t *p = qb_cl(dq);
1120
1121         return qb_attr_code_decode(&code_dqrr_frame_count, p);
1122 }
1123
1124 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1125 {
1126         const uint64_t *p = (const uint64_t *)qb_cl(dq);
1127
1128         return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);
1129 }
1130
1131 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1132 {
1133         const uint32_t *p = qb_cl(dq);
1134
1135         return (const struct qbman_fd *)&p[8];
1136 }
1137
1138 /**************************************/
1139 /* Parsing state-change notifications */
1140 /**************************************/
1141
1142 static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);
1143 static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);
1144 static struct qb_attr_code code_scn_state_in_mem =
1145                         QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);
1146 static struct qb_attr_code code_scn_rid_in_mem =
1147                         QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);
1148 static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);
1149
1150 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1151 {
1152         const uint32_t *p = qb_cl(scn);
1153
1154         return (uint8_t)qb_attr_code_decode(&code_scn_state, p);
1155 }
1156
1157 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1158 {
1159         const uint32_t *p = qb_cl(scn);
1160
1161         return qb_attr_code_decode(&code_scn_rid, p);
1162 }
1163
1164 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1165 {
1166         const uint64_t *p = (const uint64_t *)qb_cl(scn);
1167
1168         return qb_attr_code_decode_64(&code_scn_ctx_lo, p);
1169 }
1170
1171 uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn)
1172 {
1173         const uint32_t *p = qb_cl(scn);
1174
1175         return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);
1176 }
1177
1178 uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)
1179 {
1180         const uint32_t *p = qb_cl(scn);
1181         uint32_t result_rid;
1182
1183         result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);
1184         return make_le24(result_rid);
1185 }
1186
1187 /*****************/
1188 /* Parsing BPSCN */
1189 /*****************/
1190 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1191 {
1192         return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;
1193 }
1194
1195 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1196 {
1197         return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);
1198 }
1199
1200 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1201 {
1202         return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);
1203 }
1204
1205 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1206 {
1207         return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);
1208 }
1209
1210 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1211 {
1212         uint64_t ctx;
1213         uint32_t ctx_hi, ctx_lo;
1214
1215         ctx = qbman_result_SCN_ctx(scn);
1216         ctx_hi = upper32(ctx);
1217         ctx_lo = lower32(ctx);
1218         return ((uint64_t)make_le32(ctx_hi) << 32 |
1219                 (uint64_t)make_le32(ctx_lo));
1220 }
1221
1222 /*****************/
1223 /* Parsing CGCU  */
1224 /*****************/
1225 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1226 {
1227         return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;
1228 }
1229
1230 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1231 {
1232         uint64_t ctx;
1233         uint32_t ctx_hi, ctx_lo;
1234
1235         ctx = qbman_result_SCN_ctx(scn);
1236         ctx_hi = upper32(ctx);
1237         ctx_lo = lower32(ctx);
1238         return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) |
1239                 (uint64_t)make_le32(ctx_lo);
1240 }
1241
1242 /******************/
1243 /* Buffer release */
1244 /******************/
1245
1246 /* These should be const, eventually */
1247 /* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
1248 static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
1249 static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);
1250 static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
1251
1252 void qbman_release_desc_clear(struct qbman_release_desc *d)
1253 {
1254         uint32_t *cl;
1255
1256         memset(d, 0, sizeof(*d));
1257         cl = qb_cl(d);
1258         qb_attr_code_encode(&code_release_set_me, cl, 1);
1259 }
1260
1261 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
1262 {
1263         uint32_t *cl = qb_cl(d);
1264
1265         qb_attr_code_encode(&code_release_bpid, cl, bpid);
1266 }
1267
1268 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1269 {
1270         uint32_t *cl = qb_cl(d);
1271
1272         qb_attr_code_encode(&code_release_rcdi, cl, !!enable);
1273 }
1274
1275 #define RAR_IDX(rar)     ((rar) & 0x7)
1276 #define RAR_VB(rar)      ((rar) & 0x80)
1277 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1278
1279 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1280                       const uint64_t *buffers, unsigned int num_buffers)
1281 {
1282         uint32_t *p;
1283         const uint32_t *cl = qb_cl(d);
1284         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1285
1286         pr_debug("RAR=%08x\n", rar);
1287         if (!RAR_SUCCESS(rar))
1288                 return -EBUSY;
1289         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1290         /* Start the release command */
1291         p = qbman_cena_write_start_wo_shadow(&s->sys,
1292                                              QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1293         /* Copy the caller's buffer pointers to the command */
1294         u64_to_le32_copy(&p[2], buffers, num_buffers);
1295         /* Set the verb byte, have to substitute in the valid-bit and the number
1296          * of buffers.
1297          */
1298         lwsync();
1299         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1300         qbman_cena_write_complete_wo_shadow(&s->sys,
1301                                             QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1302         return 0;
1303 }
1304
1305 /*******************/
1306 /* Buffer acquires */
1307 /*******************/
1308
1309 /* These should be const, eventually */
1310 static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
1311 static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
1312 static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
1313
1314 int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
1315                       unsigned int num_buffers)
1316 {
1317         uint32_t *p;
1318         uint32_t rslt, num;
1319
1320         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1321
1322         /* Start the management command */
1323         p = qbman_swp_mc_start(s);
1324
1325         if (!p)
1326                 return -EBUSY;
1327
1328         /* Encode the caller-provided attributes */
1329         qb_attr_code_encode(&code_acquire_bpid, p, bpid);
1330         qb_attr_code_encode(&code_acquire_num, p, num_buffers);
1331
1332         /* Complete the management command */
1333         p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
1334
1335         /* Decode the outcome */
1336         rslt = qb_attr_code_decode(&code_generic_rslt, p);
1337         num = qb_attr_code_decode(&code_acquire_r_num, p);
1338         QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) !=
1339                      QBMAN_MC_ACQUIRE);
1340
1341         /* Determine success or failure */
1342         if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1343                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1344                        bpid, rslt);
1345                 return -EIO;
1346         }
1347         QBMAN_BUG_ON(num > num_buffers);
1348         /* Copy the acquired buffers to the caller's array */
1349         u64_from_le32_copy(buffers, &p[2], num);
1350         return (int)num;
1351 }
1352
1353 /*****************/
1354 /* FQ management */
1355 /*****************/
1356
1357 static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);
1358
1359 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1360                                   uint8_t alt_fq_verb)
1361 {
1362         uint32_t *p;
1363         uint32_t rslt;
1364
1365         /* Start the management command */
1366         p = qbman_swp_mc_start(s);
1367         if (!p)
1368                 return -EBUSY;
1369
1370         qb_attr_code_encode(&code_fqalt_fqid, p, fqid);
1371         /* Complete the management command */
1372         p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);
1373
1374         /* Decode the outcome */
1375         rslt = qb_attr_code_decode(&code_generic_rslt, p);
1376         QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb);
1377
1378         /* Determine success or failure */
1379         if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1380                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1381                        fqid, alt_fq_verb, rslt);
1382                 return -EIO;
1383         }
1384
1385         return 0;
1386 }
1387
1388 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1389 {
1390         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1391 }
1392
1393 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1394 {
1395         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1396 }
1397
1398 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1399 {
1400         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1401 }
1402
1403 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1404 {
1405         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1406 }
1407
1408 /**********************/
1409 /* Channel management */
1410 /**********************/
1411
1412 static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);
1413 static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);
1414 static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);
1415 static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);
1416
1417 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1418  * would be irresponsible to expose it.
1419  */
1420 #define CODE_CDAN_WE_EN    0x1
1421 #define CODE_CDAN_WE_CTX   0x4
1422
1423 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1424                               uint8_t we_mask, uint8_t cdan_en,
1425                               uint64_t ctx)
1426 {
1427         uint32_t *p;
1428         uint32_t rslt;
1429
1430         /* Start the management command */
1431         p = qbman_swp_mc_start(s);
1432         if (!p)
1433                 return -EBUSY;
1434
1435         /* Encode the caller-provided attributes */
1436         qb_attr_code_encode(&code_cdan_cid, p, channelid);
1437         qb_attr_code_encode(&code_cdan_we, p, we_mask);
1438         qb_attr_code_encode(&code_cdan_en, p, cdan_en);
1439         qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);
1440         /* Complete the management command */
1441         p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);
1442
1443         /* Decode the outcome */
1444         rslt = qb_attr_code_decode(&code_generic_rslt, p);
1445         QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p)
1446                                         != QBMAN_WQCHAN_CONFIGURE);
1447
1448         /* Determine success or failure */
1449         if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1450                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1451                        channelid, rslt);
1452                 return -EIO;
1453         }
1454
1455         return 0;
1456 }
1457
1458 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1459                                uint64_t ctx)
1460 {
1461         return qbman_swp_CDAN_set(s, channelid,
1462                                   CODE_CDAN_WE_CTX,
1463                                   0, ctx);
1464 }
1465
1466 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1467 {
1468         return qbman_swp_CDAN_set(s, channelid,
1469                                   CODE_CDAN_WE_EN,
1470                                   1, 0);
1471 }
1472
1473 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1474 {
1475         return qbman_swp_CDAN_set(s, channelid,
1476                                   CODE_CDAN_WE_EN,
1477                                   0, 0);
1478 }
1479
1480 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1481                                       uint64_t ctx)
1482 {
1483         return qbman_swp_CDAN_set(s, channelid,
1484                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1485                                   1, ctx);
1486 }
1487
1488 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1489 {
1490         return QBMAN_IDX_FROM_DQRR(dqrr);
1491 }
1492
1493 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1494 {
1495         struct qbman_result *dq;
1496
1497         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
1498         return dq;
1499 }
1500
1501 int qbman_swp_send_multiple(struct qbman_swp *s,
1502                             const struct qbman_eq_desc *d,
1503                             const struct qbman_fd *fd,
1504                             int frames_to_send)
1505 {
1506         uint32_t *p;
1507         const uint32_t *cl = qb_cl(d);
1508         uint32_t eqcr_ci;
1509         uint8_t diff;
1510         int sent = 0;
1511         int i;
1512         int initial_pi = s->eqcr.pi;
1513         uint64_t start_pointer;
1514
1515         if (!s->eqcr.available) {
1516                 eqcr_ci = s->eqcr.ci;
1517                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1518                                  QBMAN_CENA_SWP_EQCR_CI) & 0xF;
1519                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
1520                                    eqcr_ci, s->eqcr.ci);
1521                 if (!diff)
1522                         goto done;
1523                 s->eqcr.available += diff;
1524         }
1525
1526         /* we are trying to send frames_to_send,
1527          * if we have enough space in the ring
1528          */
1529         while (s->eqcr.available && frames_to_send--) {
1530                 p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
1531                                         QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
1532                 /* Write command (except of first byte) and FD */
1533                 memcpy(&p[1], &cl[1], 7 * 4);
1534                 memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd));
1535
1536                 initial_pi++;
1537                 initial_pi &= 0xF;
1538                 s->eqcr.available--;
1539                 sent++;
1540         }
1541
1542 done:
1543         initial_pi =  s->eqcr.pi;
1544         lwsync();
1545
1546         /* in order for flushes to complete faster:
1547          * we use a following trick: we record all lines in 32 bit word
1548          */
1549
1550         initial_pi =  s->eqcr.pi;
1551         for (i = 0; i < sent; i++) {
1552                 p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
1553                                         QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
1554
1555                 p[0] = cl[0] | s->eqcr.pi_vb;
1556                 initial_pi++;
1557                 initial_pi &= 0xF;
1558
1559                 if (!(initial_pi & 7))
1560                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1561         }
1562
1563         initial_pi = s->eqcr.pi;
1564
1565         /* We need  to flush all the lines but without
1566          * load/store operations between them.
1567          * We assign start_pointer before we start loop so that
1568          * in loop we do not read it from memory
1569          */
1570         start_pointer = (uint64_t)s->sys.addr_cena;
1571         for (i = 0; i < sent; i++) {
1572                 p = (uint32_t *)(start_pointer
1573                                  + QBMAN_CENA_SWP_EQCR(initial_pi & 7));
1574                 dcbf((uint64_t)p);
1575                 initial_pi++;
1576                 initial_pi &= 0xF;
1577         }
1578
1579         /* Update producer index for the next call */
1580         s->eqcr.pi = initial_pi;
1581
1582         return sent;
1583 }
1584
1585 int qbman_get_version(void)
1586 {
1587         return qman_version;
1588 }