1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
6 /* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
7 * driver. They are only included via qbman_private.h, which is itself a
8 * platform-independent file and is included by all the other driver source.
10 * qbman_sys_decl.h is included prior to all other declarations and logic, and
11 * it exists to provide compatibility with any linux interfaces our
12 * single-source driver code is dependent on (eg. kmalloc). Ie. this file
13 * provides linux compatibility.
15 * This qbman_sys.h header, on the other hand, is included *after* any common
16 * and platform-neutral declarations and logic in qbman_private.h, and exists to
17 * implement any platform-specific logic of the qbman driver itself. Ie. it is
18 * *not* to provide linux compatibility.
24 #include "qbman_sys_decl.h"
26 #define CENA_WRITE_ENABLE 0
27 #define CINH_WRITE_ENABLE 1
29 /* CINH register offsets */
30 #define QBMAN_CINH_SWP_EQCR_PI 0x800
31 #define QBMAN_CINH_SWP_EQCR_CI 0x840
32 #define QBMAN_CINH_SWP_EQAR 0x8c0
33 #define QBMAN_CINH_SWP_CR_RT 0x900
34 #define QBMAN_CINH_SWP_VDQCR_RT 0x940
35 #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
36 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
37 #define QBMAN_CINH_SWP_DQPI 0xa00
38 #define QBMAN_CINH_SWP_DQRR_ITR 0xa80
39 #define QBMAN_CINH_SWP_DCAP 0xac0
40 #define QBMAN_CINH_SWP_SDQCR 0xb00
41 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
42 #define QBMAN_CINH_SWP_RCR_PI 0xc00
43 #define QBMAN_CINH_SWP_RAR 0xcc0
44 #define QBMAN_CINH_SWP_ISR 0xe00
45 #define QBMAN_CINH_SWP_IER 0xe40
46 #define QBMAN_CINH_SWP_ISDR 0xe80
47 #define QBMAN_CINH_SWP_IIR 0xec0
48 #define QBMAN_CINH_SWP_ITPR 0xf40
50 /* CENA register offsets */
51 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
53 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
54 #define QBMAN_CENA_SWP_CR 0x600
55 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
56 #define QBMAN_CENA_SWP_VDQCR 0x780
57 #define QBMAN_CENA_SWP_EQCR_CI 0x840
59 /* CENA register offsets in memory-backed mode */
60 #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((uint32_t)(n) << 6))
61 #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((uint32_t)(n) << 6))
62 #define QBMAN_CENA_SWP_CR_MEM 0x1600
63 #define QBMAN_CENA_SWP_RR_MEM 0x1680
64 #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
66 /* Debugging assists */
67 static inline void __hexdump(unsigned long start, unsigned long end,
68 unsigned long p, size_t sz, const unsigned char *c)
75 pos += sprintf(buf + pos, "%08lx: ", start);
77 if ((start < p) || (start >= (p + sz)))
78 pos += sprintf(buf + pos, "..");
80 pos += sprintf(buf + pos, "%02x", *(c++));
81 if (!(++start & 15)) {
99 static inline void hexdump(const void *ptr, size_t sz)
101 unsigned long p = (unsigned long)ptr;
102 unsigned long start = p & ~15;
103 unsigned long end = (p + sz + 15) & ~15;
104 const unsigned char *c = ptr;
106 __hexdump(start, end, p, sz, c);
109 /* Currently, the CENA support code expects each 32-bit word to be written in
110 * host order, and these are converted to hardware (little-endian) order on
111 * command submission. However, 64-bit quantities are must be written (and read)
112 * as two 32-bit words with the least-significant word first, irrespective of
115 static inline void u64_to_le32_copy(void *d, const uint64_t *s,
119 const uint32_t *ss = (const uint32_t *)s;
122 /* TBD: the toolchain was choking on the use of 64-bit types up
123 * until recently so this works entirely with 32-bit variables.
124 * When 64-bit types become usable again, investigate better
125 * ways of doing this.
127 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
138 static inline void u64_from_le32_copy(uint64_t *d, const void *s,
141 const uint32_t *ss = s;
142 uint32_t *dd = (uint32_t *)d;
145 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
159 struct qbman_swp_sys {
160 /* On GPP, the sys support for qbman_swp is here. The CENA region isi
161 * not an mmap() of the real portal registers, but an allocated
162 * place-holder, because the actual writes/reads to/from the portal are
163 * marshalled from these allocated areas using QBMan's "MC access
164 * registers". CINH accesses are atomic so there's no need for a
171 enum qbman_eqcr_mode eqcr_mode;
174 /* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
175 * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
176 * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
177 * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
178 * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
179 * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
182 static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
185 __raw_writel(val, s->addr_cinh + offset);
186 #ifdef QBMAN_CINH_TRACE
187 pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n",
188 s->addr_cinh, s->idx, offset, val);
192 static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
194 uint32_t reg = __raw_readl(s->addr_cinh + offset);
195 #ifdef QBMAN_CINH_TRACE
196 pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n",
197 s->addr_cinh, s->idx, offset, reg);
202 static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
205 void *shadow = s->cena + offset;
207 #ifdef QBMAN_CENA_TRACE
208 pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n",
209 s->addr_cena, s->idx, offset, shadow);
211 QBMAN_BUG_ON(offset & 63);
216 static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
219 #ifdef QBMAN_CENA_TRACE
220 pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
221 s->addr_cena, s->idx, offset);
223 QBMAN_BUG_ON(offset & 63);
225 return (s->addr_cena + offset);
227 return (s->addr_cinh + offset);
231 static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
232 uint32_t offset, void *cmd)
234 const uint32_t *shadow = cmd;
236 #ifdef QBMAN_CENA_TRACE
237 pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n",
238 s->addr_cena, s->idx, offset, shadow);
242 for (loop = 15; loop >= 1; loop--)
243 __raw_writel(shadow[loop], s->addr_cena +
246 __raw_writel(shadow[0], s->addr_cena + offset);
248 for (loop = 15; loop >= 1; loop--)
249 __raw_writel(shadow[loop], s->addr_cinh +
252 __raw_writel(shadow[0], s->addr_cinh + offset);
254 dcbf(s->addr_cena + offset);
257 static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,
260 #ifdef QBMAN_CENA_TRACE
261 pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n",
262 s->addr_cena, s->idx, offset);
264 dcbf(s->addr_cena + offset);
267 static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s,
270 return __raw_readl(s->addr_cena + offset);
273 static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
275 uint32_t *shadow = (uint32_t *)(s->cena + offset);
277 #ifdef QBMAN_CENA_TRACE
278 pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
279 s->addr_cena, s->idx, offset, shadow);
283 for (loop = 0; loop < 16; loop++)
284 shadow[loop] = __raw_readl(s->addr_cena + offset
287 for (loop = 0; loop < 16; loop++)
288 shadow[loop] = __raw_readl(s->addr_cinh + offset
291 #ifdef QBMAN_CENA_TRACE
297 static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,
300 #ifdef QBMAN_CENA_TRACE
301 pr_info("qbman_cena_read(%p:%d:0x%03x)\n",
302 s->addr_cena, s->idx, offset);
304 return s->addr_cena + offset;
307 static inline void qbman_cena_invalidate(struct qbman_swp_sys *s,
310 dccivac(s->addr_cena + offset);
313 static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
316 dccivac(s->addr_cena + offset);
317 prefetch_for_load(s->addr_cena + offset);
320 static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
323 prefetch_for_load(s->addr_cena + offset);
330 /* The SWP_CFG portal register is special, in that it is used by the
331 * platform-specific code rather than the platform-independent code in
332 * qbman_portal.c. So use of it is declared locally here.
334 #define QBMAN_CINH_SWP_CFG 0xd00
336 #define SWP_CFG_DQRR_MF_SHIFT 20
337 #define SWP_CFG_EST_SHIFT 16
338 #define SWP_CFG_CPBS_SHIFT 15
339 #define SWP_CFG_WN_SHIFT 14
340 #define SWP_CFG_RPM_SHIFT 12
341 #define SWP_CFG_DCM_SHIFT 10
342 #define SWP_CFG_EPM_SHIFT 8
343 #define SWP_CFG_VPM_SHIFT 7
344 #define SWP_CFG_CPM_SHIFT 6
345 #define SWP_CFG_SD_SHIFT 5
346 #define SWP_CFG_SP_SHIFT 4
347 #define SWP_CFG_SE_SHIFT 3
348 #define SWP_CFG_DP_SHIFT 2
349 #define SWP_CFG_DE_SHIFT 1
350 #define SWP_CFG_EP_SHIFT 0
352 static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
353 uint8_t est, uint8_t rpm, uint8_t dcm,
354 uint8_t epm, int sd, int sp, int se,
355 int dp, int de, int ep)
359 reg = (max_fill << SWP_CFG_DQRR_MF_SHIFT |
360 est << SWP_CFG_EST_SHIFT |
361 wn << SWP_CFG_WN_SHIFT |
362 rpm << SWP_CFG_RPM_SHIFT |
363 dcm << SWP_CFG_DCM_SHIFT |
364 epm << SWP_CFG_EPM_SHIFT |
365 sd << SWP_CFG_SD_SHIFT |
366 sp << SWP_CFG_SP_SHIFT |
367 se << SWP_CFG_SE_SHIFT |
368 dp << SWP_CFG_DP_SHIFT |
369 de << SWP_CFG_DE_SHIFT |
370 ep << SWP_CFG_EP_SHIFT);
375 #define QMAN_RT_MODE 0x00000100
377 #define QMAN_REV_4000 0x04000000
378 #define QMAN_REV_4100 0x04010000
379 #define QMAN_REV_4101 0x04010001
380 #define QMAN_REV_5000 0x05000000
381 #define QMAN_REV_MASK 0xffff0000
383 static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
384 const struct qbman_swp_desc *d,
390 uint8_t wn = CENA_WRITE_ENABLE;
392 uint8_t wn = CINH_WRITE_ENABLE;
395 s->addr_cena = d->cena_bar;
396 s->addr_cinh = d->cinh_bar;
397 s->idx = (uint32_t)d->idx;
398 s->cena = malloc(64*1024);
400 pr_err("Could not allocate page for cena shadow\n");
403 s->eqcr_mode = d->eqcr_mode;
404 QBMAN_BUG_ON(d->idx < 0);
405 #ifdef QBMAN_CHECKING
406 /* We should never be asked to initialise for a portal that isn't in
407 * the power-on state. (Ie. don't forget to reset portals when they are
410 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
413 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
414 memset(s->addr_cena, 0, 64*1024);
416 /* Invalidate the portal memory.
417 * This ensures no stale cache lines
419 for (i = 0; i < 0x1000; i += 64)
420 dccivac(s->addr_cena + i);
423 if (s->eqcr_mode == qman_eqcr_vb_array)
424 reg = qbman_set_swp_cfg(dqrr_size, wn,
425 0, 3, 2, 3, 1, 1, 1, 1, 1, 1);
427 if ((d->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
428 reg = qbman_set_swp_cfg(dqrr_size, wn,
429 1, 3, 2, 2, 1, 1, 1, 1, 1, 1);
431 reg = qbman_set_swp_cfg(dqrr_size, wn,
432 1, 3, 2, 0, 1, 1, 1, 1, 1, 1);
435 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
436 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
437 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
438 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
441 qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
442 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
444 pr_err("The portal %d is not enabled!\n", s->idx);
449 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
450 qbman_cinh_write(s, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
451 qbman_cinh_write(s, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
457 static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
462 #endif /* _QBMAN_SYS_H_ */