#ifndef included_clib_h
#define included_clib_h
+#include <stddef.h>
+
+#if __has_include(<vppinfra/config.h>)
+#include <vppinfra/config.h>
+#endif
+
+#ifdef __x86_64__
+#include <x86intrin.h>
+#endif
+
/* Standalone means to not assume we are running on a Unix box. */
#if ! defined (CLIB_STANDALONE) && ! defined (CLIB_LINUX_KERNEL)
#define CLIB_UNIX
#endif
+#ifdef __linux__
+#define CLIB_LINUX 1
+#else
+#define CLIB_LINUX 0
+#endif
+
#include <vppinfra/types.h>
+#include <vppinfra/atomics.h>
/* Global DEBUG flag. Setting this to 1 or 0 turns off
ASSERT (see vppinfra/error.h) & other debugging code. */
#define ARRAY_LEN(x) (sizeof (x)/sizeof (x[0]))
#define _STRUCT_FIELD(t,f) (((t *) 0)->f)
-#define STRUCT_OFFSET_OF(t,f) ((uword) & _STRUCT_FIELD (t, f))
-#define STRUCT_BIT_OFFSET_OF(t,f) (BITS(u8) * (uword) & _STRUCT_FIELD (t, f))
+#define STRUCT_OFFSET_OF(t,f) offsetof(t, f)
+#define STRUCT_BIT_OFFSET_OF(t,f) (BITS(u8) * STRUCT_OFFSET_OF (t, f))
#define STRUCT_SIZE_OF(t,f) (sizeof (_STRUCT_FIELD (t, f)))
#define STRUCT_BITS_OF(t,f) (BITS (_STRUCT_FIELD (t, f)))
#define STRUCT_ARRAY_LEN(t,f) ARRAY_LEN (_STRUCT_FIELD (t, f))
#define CLIB_PACKED(x) x __attribute__ ((packed))
#define CLIB_UNUSED(x) x __attribute__ ((unused))
+/* similar to CLIB_CACHE_LINE_ALIGN_MARK() but with arbitrary alignment */
+#define CLIB_ALIGN_MARK(name, alignment) u8 name[0] __attribute__((aligned(alignment)))
+
+/* Make a string from the macro's argument */
+#define CLIB_STRING_MACRO(x) #x
+
+#define CLIB_STRING_ARRAY(...) \
+ (char *[]) { __VA_ARGS__, 0 }
+
+/* sanitizers */
+#ifdef __has_feature
+#if __has_feature(address_sanitizer)
+#define CLIB_SANITIZE_ADDR 1
+#endif
+#elif defined(__SANITIZE_ADDRESS__)
+#define CLIB_SANITIZE_ADDR 1
+#endif
+
+#define __clib_unused __attribute__ ((unused))
+#define __clib_weak __attribute__ ((weak))
+#define __clib_packed __attribute__ ((packed))
+#define __clib_flatten __attribute__ ((flatten))
+#define __clib_constructor __attribute__ ((constructor))
+#define __clib_noinline __attribute__ ((noinline))
+#ifdef __clang__
+#define __clib_noclone
+#else
+#define __clib_noclone __attribute__ ((noclone))
+#endif
+#define __clib_aligned(x) __attribute__ ((aligned(x)))
+#define __clib_section(s) __attribute__ ((section(s)))
+#define __clib_warn_unused_result __attribute__ ((warn_unused_result))
+#define __clib_export __attribute__ ((visibility("default")))
+#ifdef __clang__
+#define __clib_no_tail_calls __attribute__ ((disable_tail_calls))
+#else
+#define __clib_no_tail_calls \
+ __attribute__ ((optimize ("no-optimize-sibling-calls")))
+#endif
+
+#ifdef CLIB_SANITIZE_ADDR
+#define __clib_nosanitize_addr __attribute__ ((no_sanitize_address))
+#else
+#define __clib_nosanitize_addr
+#endif
+
#define never_inline __attribute__ ((__noinline__))
#if CLIB_DEBUG > 0
/* Hints to compiler about hot/cold code. */
#define PREDICT_FALSE(x) __builtin_expect((x),0)
#define PREDICT_TRUE(x) __builtin_expect((x),1)
+#define COMPILE_TIME_CONST(x) __builtin_constant_p (x)
+#define CLIB_ASSUME(x) \
+ do \
+ { \
+ if (!(x)) \
+ __builtin_unreachable (); \
+ } \
+ while (0)
+
+/*
+ * Compiler barrier
+ * prevent compiler to reorder memory access across this boundary
+ * prevent compiler to cache values in register (force reload)
+ * Not to be confused with CPU memory barrier below
+ */
+#define CLIB_COMPILER_BARRIER() asm volatile ("":::"memory")
/* Full memory barrier (read and write). */
#define CLIB_MEMORY_BARRIER() __sync_synchronize ()
decl __attribute ((destructor)); \
decl
-/* Use __builtin_clz if available. */
-#ifdef __GNUC__
-#include <features.h>
-#if __GNUC_PREREQ(3, 4)
-#if uword_bits == 64
-#define count_leading_zeros(count,x) count = __builtin_clzll (x)
-#define count_trailing_zeros(count,x) count = __builtin_ctzll (x)
-#else
-#define count_leading_zeros(count,x) count = __builtin_clzl (x)
-#define count_trailing_zeros(count,x) count = __builtin_ctzl (x)
-#endif
-#endif
-#endif
-
-#ifndef count_leading_zeros
-
-/* Misc. integer arithmetic functions. */
-#if defined (i386)
-#define count_leading_zeros(count, x) \
- do { \
- word _clz; \
- __asm__ ("bsrl %1,%0" \
- : "=r" (_clz) : "rm" ((word) (x)));\
- (count) = _clz ^ 31; \
- } while (0)
-
-#define count_trailing_zeros(count, x) \
- __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((word)(x)))
-#endif /* i386 */
-
-#if defined (__alpha__) && defined (HAVE_CIX)
-#define count_leading_zeros(count, x) \
- __asm__ ("ctlz %1,%0" \
- : "=r" ((word) (count)) \
- : "r" ((word) (x)))
-#define count_trailing_zeros(count, x) \
- __asm__ ("cttz %1,%0" \
- : "=r" ((word) (count)) \
- : "r" ((word) (x)))
-#endif /* alpha && HAVE_CIX */
-
-#if __mips >= 4
-
-/* Select between 32/64 opcodes. */
-#if uword_bits == 32
-#define count_leading_zeros(_count, _x) \
- __asm__ ("clz %[count],%[x]" \
- : [count] "=r" ((word) (_count)) \
- : [x] "r" ((word) (_x)))
-#else
-#define count_leading_zeros(_count, _x) \
- __asm__ ("dclz %[count],%[x]" \
- : [count] "=r" ((word) (_count)) \
- : [x] "r" ((word) (_x)))
-#endif
-
-#endif /* __mips >= 4 */
+#include <vppinfra/bitops.h>
-#endif /* count_leading_zeros */
-
-#if defined (count_leading_zeros)
always_inline uword
min_log2 (uword x)
{
uword n;
- count_leading_zeros (n, x);
+ n = count_leading_zeros (x);
return BITS (uword) - n - 1;
}
-#else
-always_inline uword
-min_log2 (uword x)
-{
- uword a = x, b = BITS (uword) / 2, c = 0, r = 0;
-
- /* Reduce x to 4 bit result. */
-#define _ \
-{ \
- c = a >> b; \
- if (c) a = c; \
- if (c) r += b; \
- b /= 2; \
-}
-
- if (BITS (uword) > 32)
- _;
- _;
- _;
- _;
-#undef _
-
- /* Do table lookup on 4 bit partial. */
- if (BITS (uword) > 32)
- {
- const u64 table = 0x3333333322221104LL;
- uword t = (table >> (4 * a)) & 0xf;
- r = t < 4 ? r + t : ~0;
- }
- else
- {
- const u32 table = 0x22221104;
- uword t = (a & 8) ? 3 : ((table >> (4 * a)) & 0xf);
- r = t < 4 ? r + t : ~0;
- }
-
- return r;
-}
-#endif
always_inline uword
max_log2 (uword x)
always_inline uword
pow2_mask (uword x)
{
+#ifdef __BMI2__
+ return _bzhi_u64 (-1ULL, x);
+#endif
return ((uword) 1 << x) - (uword) 1;
}
return 0 == (x & (x - 1));
}
+always_inline uword
+round_down_pow2 (uword x, uword pow2)
+{
+ return (x) & ~(pow2 - 1);
+}
+
always_inline uword
round_pow2 (uword x, uword pow2)
{
return x & -x;
}
-always_inline uword
-log2_first_set (uword x)
-{
- uword result;
-#ifdef count_trailing_zeros
- count_trailing_zeros (result, x);
-#else
- result = min_log2 (first_set (x));
-#endif
- return result;
-}
-
always_inline f64
flt_round_down (f64 x)
{
return f * flt_round_nearest (x / f);
}
+always_inline uword
+extract_bits (uword x, int start, int count)
+{
+#ifdef __BMI__
+ return _bextr_u64 (x, start, count);
+#endif
+ return (x >> start) & pow2_mask (count);
+}
+
#define clib_max(x,y) \
({ \
__typeof__ (x) _x = (x); \
_x < _y ? _x : _y; \
})
+#define clib_clamp(x,lo,hi) \
+({ \
+ __typeof__ (x) _x = (x); \
+ __typeof__ (lo) _lo = (lo); \
+ __typeof__ (hi) _hi = (hi); \
+ _x < _lo ? _lo : (_x > _hi ? _hi : _x); \
+})
+
#define clib_abs(x) \
({ \
__typeof__ (x) _x = (x); \
uword
clib_backtrace (uword * callers, uword max_callers, uword n_frames_to_skip);
+#include <vppinfra/byte_order.h>
#endif /* included_clib_h */
/*