4 * Copyright (c) 2008-2016 Freescale Semiconductor, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * * Neither the name of Freescale Semiconductor nor the
15 * names of its contributors may be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #ifndef HEADER_COMPAT_H
31 #define HEADER_COMPAT_H
44 #include <net/ethernet.h>
49 #include <sys/types.h>
59 #include <rte_atomic.h>
61 /* The following definitions are primarily to allow the single-source driver
62 * interfaces to be included by arbitrary program code. Ie. for interfaces that
63 * are also available in kernel-space, these definitions provide compatibility
64 * with certain attributes and types used in those interfaces.
67 /* Required compiler attributes */
69 #define likely(x) __builtin_expect(!!(x), 1)
70 #define unlikely(x) __builtin_expect(!!(x), 0)
71 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
73 #define container_of(ptr, type, member) ({ \
74 typeof(((type *)0)->member)(*__mptr) = (ptr); \
75 (type *)((char *)__mptr - offsetof(type, member)); })
76 #define __stringify_1(x) #x
77 #define __stringify(x) __stringify_1(x)
82 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
89 typedef uint64_t dma_addr_t;
90 typedef cpu_set_t cpumask_t;
91 typedef u32 compat_uptr_t;
93 static inline void __user *compat_ptr(compat_uptr_t uptr)
95 return (void __user *)(unsigned long)uptr;
98 static inline compat_uptr_t ptr_to_compat(void __user *uptr)
100 return (u32)(unsigned long)uptr;
104 static inline u32 in_be32(volatile void *__p)
106 volatile u32 *p = __p;
110 static inline void out_be32(volatile void *__p, u32 val)
112 volatile u32 *p = __p;
117 #define prflush(fmt, args...) \
119 printf(fmt, ##args); \
122 #define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
123 #define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
124 #define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
125 #define pr_info(fmt, args...) prflush(fmt, ##args)
130 #define pr_debug(fmt, args...) {}
131 #define might_sleep_if(c) {}
133 #define WARN_ON(c, str) \
135 static int warned_##__LINE__; \
136 if ((c) && !warned_##__LINE__) { \
137 pr_warn("%s\n", str); \
138 pr_warn("(%s:%d)\n", __FILE__, __LINE__); \
139 warned_##__LINE__ = 1; \
143 #define QBMAN_BUG_ON(c) WARN_ON(c, "BUG")
145 #define QBMAN_BUG_ON(c) {}
148 #define ALIGN(x, a) (((x) + ((typeof(x))(a) - 1)) & ~((typeof(x))(a) - 1))
155 struct list_head *prev;
156 struct list_head *next;
159 #define LIST_HEAD(n) \
160 struct list_head n = { \
165 #define INIT_LIST_HEAD(p) \
167 struct list_head *__p298 = (p); \
168 __p298->next = __p298; \
169 __p298->prev = __p298->next; \
171 #define list_entry(node, type, member) \
172 (type *)((void *)node - offsetof(type, member))
173 #define list_empty(p) \
175 const struct list_head *__p298 = (p); \
176 ((__p298->next == __p298) && (__p298->prev == __p298)); \
178 #define list_add(p, l) \
180 struct list_head *__p298 = (p); \
181 struct list_head *__l298 = (l); \
182 __p298->next = __l298->next; \
183 __p298->prev = __l298; \
184 __l298->next->prev = __p298; \
185 __l298->next = __p298; \
187 #define list_add_tail(p, l) \
189 struct list_head *__p298 = (p); \
190 struct list_head *__l298 = (l); \
191 __p298->prev = __l298->prev; \
192 __p298->next = __l298; \
193 __l298->prev->next = __p298; \
194 __l298->prev = __p298; \
196 #define list_for_each(i, l) \
197 for (i = (l)->next; i != (l); i = i->next)
198 #define list_for_each_safe(i, j, l) \
199 for (i = (l)->next, j = i->next; i != (l); \
201 #define list_for_each_entry(i, l, name) \
202 for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \
203 i = list_entry(i->name.next, typeof(*i), name))
204 #define list_for_each_entry_safe(i, j, l, name) \
205 for (i = list_entry((l)->next, typeof(*i), name), \
206 j = list_entry(i->name.next, typeof(*j), name); \
208 i = j, j = list_entry(j->name.next, typeof(*j), name))
209 #define list_del(i) \
211 (i)->next->prev = (i)->prev; \
212 (i)->prev->next = (i)->next; \
215 /* Other miscellaneous interfaces our APIs depend on; */
217 #define lower_32_bits(x) ((u32)(x))
218 #define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
220 /* Compiler/type stuff */
221 typedef unsigned int gfp_t;
222 typedef uint32_t phandle;
228 #define __raw_readb(p) (*(const volatile unsigned char *)(p))
229 #define __raw_readl(p) (*(const volatile unsigned int *)(p))
230 #define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
232 /* memcpy() stuff - when you know alignments in advance */
233 #ifdef CONFIG_TRY_BETTER_MEMCPY
234 static inline void copy_words(void *dest, const void *src, size_t sz)
237 const u32 *__src = src;
238 size_t __sz = sz >> 2;
240 QBMAN_BUG_ON((unsigned long)dest & 0x3);
241 QBMAN_BUG_ON((unsigned long)src & 0x3);
242 QBMAN_BUG_ON(sz & 0x3);
244 *(__dest++) = *(__src++);
247 static inline void copy_shorts(void *dest, const void *src, size_t sz)
250 const u16 *__src = src;
251 size_t __sz = sz >> 1;
253 QBMAN_BUG_ON((unsigned long)dest & 0x1);
254 QBMAN_BUG_ON((unsigned long)src & 0x1);
255 QBMAN_BUG_ON(sz & 0x1);
257 *(__dest++) = *(__src++);
260 static inline void copy_bytes(void *dest, const void *src, size_t sz)
263 const u8 *__src = src;
266 *(__dest++) = *(__src++);
269 #define copy_words memcpy
270 #define copy_shorts memcpy
271 #define copy_bytes memcpy
274 /* Completion stuff */
275 #define DECLARE_COMPLETION(n) int n = 0
276 #define complete(n) { *n = 1; }
277 #define wait_for_completion(n) \
286 /* Allocator stuff */
287 #define kmalloc(sz, t) malloc(sz)
288 #define vmalloc(sz) malloc(sz)
289 #define kfree(p) { if (p) free(p); }
290 static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)
292 void *ptr = malloc(sz);
299 static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
303 if (posix_memalign(&p, 4096, 4096))
306 return (unsigned long)p;
309 static inline void free_page(unsigned long p)
314 /* Bitfield stuff. */
315 #define BITS_PER_ULONG (sizeof(unsigned long) << 3)
316 #define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6)
317 #define BITS_MASK(idx) ((unsigned long)1 << ((idx) & (BITS_PER_ULONG - 1)))
318 #define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG)
319 static inline unsigned long test_bits(unsigned long mask,
320 volatile unsigned long *p)
325 static inline int test_bit(int idx, volatile unsigned long *bits)
327 return test_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
330 static inline void set_bits(unsigned long mask, volatile unsigned long *p)
335 static inline void set_bit(int idx, volatile unsigned long *bits)
337 set_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
340 static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
345 static inline void clear_bit(int idx, volatile unsigned long *bits)
347 clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
350 static inline unsigned long test_and_set_bits(unsigned long mask,
351 volatile unsigned long *p)
353 unsigned long ret = test_bits(mask, p);
359 static inline int test_and_set_bit(int idx, volatile unsigned long *bits)
361 int ret = test_bit(idx, bits);
367 static inline int test_and_clear_bit(int idx, volatile unsigned long *bits)
369 int ret = test_bit(idx, bits);
371 clear_bit(idx, bits);
375 static inline int find_next_zero_bit(unsigned long *bits, int limit, int idx)
377 while ((++idx < limit) && test_bit(idx, bits))
382 static inline int find_first_zero_bit(unsigned long *bits, int limit)
386 while (test_bit(idx, bits) && (++idx < limit))
391 static inline u64 div64_u64(u64 n, u64 d)
396 #define atomic_t rte_atomic32_t
397 #define atomic_read(v) rte_atomic32_read(v)
398 #define atomic_set(v, i) rte_atomic32_set(v, i)
400 #define atomic_inc(v) rte_atomic32_add(v, 1)
401 #define atomic_dec(v) rte_atomic32_sub(v, 1)
403 #define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
404 #define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
406 #define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
407 #define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
408 #define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
410 #endif /* HEADER_COMPAT_H */