1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
3 * Copyright 2011 Freescale Semiconductor, Inc.
23 #include <linux/types.h>
27 #include <sys/types.h>
37 #include <rte_byteorder.h>
38 #include <rte_atomic.h>
39 #include <rte_spinlock.h>
40 #include <rte_common.h>
41 #include <rte_debug.h>
43 /* The following definitions are primarily to allow the single-source driver
44 * interfaces to be included by arbitrary program code. Ie. for interfaces that
45 * are also available in kernel-space, these definitions provide compatibility
46 * with certain attributes and types used in those interfaces.
49 /* Required compiler attributes */
50 #define __maybe_unused __rte_unused
51 #define __always_unused __rte_unused
52 #define __packed __rte_packed
53 #define noinline __attribute__((noinline))
55 #define L1_CACHE_BYTES 64
56 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
57 #define __stringify_1(x) #x
58 #define __stringify(x) __stringify_1(x)
63 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
66 #define prflush(fmt, args...) \
68 printf(fmt, ##args); \
72 #define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
73 #define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
74 #define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
75 #define pr_info(fmt, args...) prflush(fmt, ##args)
77 #ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
81 #define pr_debug(fmt, args...) printf(fmt, ##args)
83 #define pr_debug(fmt, args...) {}
86 #define DPAA_BUG_ON(x) RTE_ASSERT(x)
93 typedef uint64_t dma_addr_t;
94 typedef cpu_set_t cpumask_t;
95 typedef uint32_t phandle;
96 typedef uint32_t gfp_t;
97 typedef uint32_t irqreturn_t;
100 #define request_irq qbman_request_irq
101 #define free_irq qbman_free_irq
105 #define __raw_readb(p) (*(const volatile unsigned char *)(p))
106 #define __raw_readl(p) (*(const volatile unsigned int *)(p))
107 #define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
109 /* to be used as an upper-limit only */
112 /* Waitqueue stuff */
113 typedef struct { } wait_queue_head_t;
114 #define DECLARE_WAIT_QUEUE_HEAD(x) int dummy_##x __always_unused
115 #define wake_up(x) do { } while (0)
118 static inline u32 in_be32(volatile void *__p)
120 volatile u32 *p = __p;
121 return rte_be_to_cpu_32(*p);
124 static inline void out_be32(volatile void *__p, u32 val)
126 volatile u32 *p = __p;
127 *p = rte_cpu_to_be_32(val);
130 #define dcbt_ro(p) __builtin_prefetch(p, 0)
131 #define dcbt_rw(p) __builtin_prefetch(p, 1)
133 #define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
134 #define dcbz_64(p) dcbz(p)
135 #define hwsync() rte_rmb()
136 #define lwsync() rte_wmb()
137 #define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
138 #define dcbf_64(p) dcbf(p)
139 #define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
141 #define dcbit_ro(p) \
144 asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \
147 #define barrier() { asm volatile ("" : : : "memory"); }
148 #define cpu_relax barrier
150 static inline uint64_t mfatb(void)
152 uint64_t ret, ret_new, timeout = 200;
154 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret));
155 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
156 while (ret != ret_new && timeout--) {
158 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
160 DPAA_BUG_ON(!timeout && (ret != ret_new));
164 /* Spin for a few cycles without bothering the bus */
165 static inline void cpu_spin(int cycles)
167 uint64_t now = mfatb();
169 while (mfatb() < (now + cycles))
173 /* Qman/Bman API inlines and macros; */
177 #define lower_32_bits(x) ((u32)(x))
182 #define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
185 * Swap bytes of a 48-bit value.
187 static inline uint64_t
188 __bswap_48(uint64_t x)
190 return ((x & 0x0000000000ffULL) << 40) |
191 ((x & 0x00000000ff00ULL) << 24) |
192 ((x & 0x000000ff0000ULL) << 8) |
193 ((x & 0x0000ff000000ULL) >> 8) |
194 ((x & 0x00ff00000000ULL) >> 24) |
195 ((x & 0xff0000000000ULL) >> 40);
199 * Swap bytes of a 40-bit value.
201 static inline uint64_t
202 __bswap_40(uint64_t x)
204 return ((x & 0x00000000ffULL) << 32) |
205 ((x & 0x000000ff00ULL) << 16) |
206 ((x & 0x0000ff0000ULL)) |
207 ((x & 0x00ff000000ULL) >> 16) |
208 ((x & 0xff00000000ULL) >> 32);
212 * Swap bytes of a 24-bit value.
214 static inline uint32_t
215 __bswap_24(uint32_t x)
217 return ((x & 0x0000ffULL) << 16) |
218 ((x & 0x00ff00ULL)) |
219 ((x & 0xff0000ULL) >> 16);
222 #define be64_to_cpu(x) rte_be_to_cpu_64(x)
223 #define be32_to_cpu(x) rte_be_to_cpu_32(x)
224 #define be16_to_cpu(x) rte_be_to_cpu_16(x)
226 #define cpu_to_be64(x) rte_cpu_to_be_64(x)
227 #define cpu_to_be32(x) rte_cpu_to_be_32(x)
228 #define cpu_to_be16(x) rte_cpu_to_be_16(x)
230 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
232 #define cpu_to_be48(x) __bswap_48(x)
233 #define be48_to_cpu(x) __bswap_48(x)
235 #define cpu_to_be40(x) __bswap_40(x)
236 #define be40_to_cpu(x) __bswap_40(x)
238 #define cpu_to_be24(x) __bswap_24(x)
239 #define be24_to_cpu(x) __bswap_24(x)
241 #else /* RTE_BIG_ENDIAN */
243 #define cpu_to_be48(x) (x)
244 #define be48_to_cpu(x) (x)
246 #define cpu_to_be40(x) (x)
247 #define be40_to_cpu(x) (x)
249 #define cpu_to_be24(x) (x)
250 #define be24_to_cpu(x) (x)
252 #endif /* RTE_BIG_ENDIAN */
254 /* When copying aligned words or shorts, try to avoid memcpy() */
255 /* memcpy() stuff - when you know alignments in advance */
256 #define CONFIG_TRY_BETTER_MEMCPY
258 #ifdef CONFIG_TRY_BETTER_MEMCPY
259 static inline void copy_words(void *dest, const void *src, size_t sz)
262 const u32 *__src = src;
263 size_t __sz = sz >> 2;
265 DPAA_BUG_ON((unsigned long)dest & 0x3);
266 DPAA_BUG_ON((unsigned long)src & 0x3);
267 DPAA_BUG_ON(sz & 0x3);
269 *(__dest++) = *(__src++);
272 static inline void copy_shorts(void *dest, const void *src, size_t sz)
275 const u16 *__src = src;
276 size_t __sz = sz >> 1;
278 DPAA_BUG_ON((unsigned long)dest & 0x1);
279 DPAA_BUG_ON((unsigned long)src & 0x1);
280 DPAA_BUG_ON(sz & 0x1);
282 *(__dest++) = *(__src++);
285 static inline void copy_bytes(void *dest, const void *src, size_t sz)
288 const u8 *__src = src;
291 *(__dest++) = *(__src++);
294 #define copy_words memcpy
295 #define copy_shorts memcpy
296 #define copy_bytes memcpy
299 /* Allocator stuff */
300 #define kmalloc(sz, t) malloc(sz)
301 #define vmalloc(sz) malloc(sz)
302 #define kfree(p) { if (p) free(p); }
303 static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)
305 void *ptr = malloc(sz);
312 static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
316 if (posix_memalign(&p, 4096, 4096))
319 return (unsigned long)p;
323 #define spinlock_t rte_spinlock_t
324 #define __SPIN_LOCK_UNLOCKED(x) RTE_SPINLOCK_INITIALIZER
325 #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
326 #define spin_lock_init(x) rte_spinlock_init(x)
327 #define spin_lock_destroy(x)
328 #define spin_lock(x) rte_spinlock_lock(x)
329 #define spin_unlock(x) rte_spinlock_unlock(x)
330 #define spin_lock_irq(x) spin_lock(x)
331 #define spin_unlock_irq(x) spin_unlock(x)
332 #define spin_lock_irqsave(x, f) spin_lock_irq(x)
333 #define spin_unlock_irqrestore(x, f) spin_unlock_irq(x)
335 #define atomic_t rte_atomic32_t
336 #define atomic_read(v) rte_atomic32_read(v)
337 #define atomic_set(v, i) rte_atomic32_set(v, i)
339 #define atomic_inc(v) rte_atomic32_add(v, 1)
340 #define atomic_dec(v) rte_atomic32_sub(v, 1)
342 #define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
343 #define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
345 #define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
346 #define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
347 #define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
349 #include <dpaa_list.h>
350 #include <dpaa_bits.h>
352 #endif /* __COMPAT_H */