X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fclib.h;h=d14582492d6908c202aae719f5c5110373ca4027;hb=607eb203b1e954ac3f7ed82bd7bde3cf3aad60cf;hp=6961d100a753cccc979d22f7c787d572f2057e55;hpb=a2185120d542040c2f46beec190db9a6c6e11588;p=vpp.git diff --git a/src/vppinfra/clib.h b/src/vppinfra/clib.h index 6961d100a75..d14582492d6 100644 --- a/src/vppinfra/clib.h +++ b/src/vppinfra/clib.h @@ -38,7 +38,11 @@ #ifndef included_clib_h #define included_clib_h +#include + +#if __has_include() #include +#endif #ifdef __x86_64__ #include @@ -49,6 +53,12 @@ #define CLIB_UNIX #endif +#ifdef __linux__ +#define CLIB_LINUX 1 +#else +#define CLIB_LINUX 0 +#endif + #include #include @@ -64,10 +74,12 @@ #define BITS(x) (8*sizeof(x)) #define ARRAY_LEN(x) (sizeof (x)/sizeof (x[0])) +#define FOREACH_ARRAY_ELT(a, b) \ + for (typeof ((b)[0]) *(a) = (b); (a) - (b) < ARRAY_LEN (b); (a)++) #define _STRUCT_FIELD(t,f) (((t *) 0)->f) -#define STRUCT_OFFSET_OF(t,f) ((uword) & _STRUCT_FIELD (t, f)) -#define STRUCT_BIT_OFFSET_OF(t,f) (BITS(u8) * (uword) & _STRUCT_FIELD (t, f)) +#define STRUCT_OFFSET_OF(t,f) offsetof(t, f) +#define STRUCT_BIT_OFFSET_OF(t,f) (BITS(u8) * STRUCT_OFFSET_OF (t, f)) #define STRUCT_SIZE_OF(t,f) (sizeof (_STRUCT_FIELD (t, f))) #define STRUCT_BITS_OF(t,f) (BITS (_STRUCT_FIELD (t, f))) #define STRUCT_ARRAY_LEN(t,f) ARRAY_LEN (_STRUCT_FIELD (t, f)) @@ -91,14 +103,45 @@ /* Make a string from the macro's argument */ #define CLIB_STRING_MACRO(x) #x +#define CLIB_STRING_ARRAY(...) \ + (char *[]) { __VA_ARGS__, 0 } + +/* sanitizers */ +#ifdef __has_feature +#if __has_feature(address_sanitizer) +#define CLIB_SANITIZE_ADDR 1 +#endif +#elif defined(__SANITIZE_ADDRESS__) +#define CLIB_SANITIZE_ADDR 1 +#endif + #define __clib_unused __attribute__ ((unused)) #define __clib_weak __attribute__ ((weak)) #define __clib_packed __attribute__ ((packed)) +#define __clib_flatten __attribute__ ((flatten)) #define __clib_constructor __attribute__ ((constructor)) #define __clib_noinline __attribute__ ((noinline)) +#ifdef __clang__ +#define __clib_noclone +#else +#define __clib_noclone __attribute__ ((noclone)) +#endif #define __clib_aligned(x) __attribute__ ((aligned(x))) #define __clib_section(s) __attribute__ ((section(s))) #define __clib_warn_unused_result __attribute__ ((warn_unused_result)) +#define __clib_export __attribute__ ((visibility("default"))) +#ifdef __clang__ +#define __clib_no_tail_calls __attribute__ ((disable_tail_calls)) +#else +#define __clib_no_tail_calls \ + __attribute__ ((optimize ("no-optimize-sibling-calls"))) +#endif + +#ifdef CLIB_SANITIZE_ADDR +#define __clib_nosanitize_addr __attribute__ ((no_sanitize_address)) +#else +#define __clib_nosanitize_addr +#endif #define never_inline __attribute__ ((__noinline__)) @@ -118,10 +161,18 @@ /* Hints to compiler about hot/cold code. */ #define PREDICT_FALSE(x) __builtin_expect((x),0) #define PREDICT_TRUE(x) __builtin_expect((x),1) +#define COMPILE_TIME_CONST(x) __builtin_constant_p (x) +#define CLIB_ASSUME(x) \ + do \ + { \ + if (!(x)) \ + __builtin_unreachable (); \ + } \ + while (0) /* * Compiler barrier - * prevent compiler to reorder memory access accross this boundary + * prevent compiler to reorder memory access across this boundary * prevent compiler to cache values in register (force reload) * Not to be confused with CPU memory barrier below */ @@ -130,7 +181,7 @@ /* Full memory barrier (read and write). */ #define CLIB_MEMORY_BARRIER() __sync_synchronize () -#if __x86_64__ +#if __SSE__ #define CLIB_MEMORY_STORE_BARRIER() __builtin_ia32_sfence () #else #define CLIB_MEMORY_STORE_BARRIER() __sync_synchronize () @@ -146,16 +197,17 @@ decl __attribute ((destructor)); \ decl -/* Use __builtin_clz if available. */ -#if uword_bits == 64 -#define count_leading_zeros(x) __builtin_clzll (x) -#define count_trailing_zeros(x) __builtin_ctzll (x) -#else -#define count_leading_zeros(x) __builtin_clzl (x) -#define count_trailing_zeros(x) __builtin_ctzl (x) +always_inline uword +pow2_mask (uword x) +{ +#ifdef __BMI2__ + return _bzhi_u64 (-1ULL, x); #endif + return ((uword) 1 << x) - (uword) 1; +} + +#include -#if defined (count_leading_zeros) always_inline uword min_log2 (uword x) { @@ -163,45 +215,6 @@ min_log2 (uword x) n = count_leading_zeros (x); return BITS (uword) - n - 1; } -#else -always_inline uword -min_log2 (uword x) -{ - uword a = x, b = BITS (uword) / 2, c = 0, r = 0; - - /* Reduce x to 4 bit result. */ -#define _ \ -{ \ - c = a >> b; \ - if (c) a = c; \ - if (c) r += b; \ - b /= 2; \ -} - - if (BITS (uword) > 32) - _; - _; - _; - _; -#undef _ - - /* Do table lookup on 4 bit partial. */ - if (BITS (uword) > 32) - { - const u64 table = 0x3333333322221104LL; - uword t = (table >> (4 * a)) & 0xf; - r = t < 4 ? r + t : ~0; - } - else - { - const u32 table = 0x22221104; - uword t = (a & 8) ? 3 : ((table >> (4 * a)) & 0xf); - r = t < 4 ? r + t : ~0; - } - - return r; -} -#endif always_inline uword max_log2 (uword x) @@ -232,12 +245,6 @@ min_log2_u64 (u64 x) } } -always_inline uword -pow2_mask (uword x) -{ - return ((uword) 1 << x) - (uword) 1; -} - always_inline uword max_pow2 (uword x) { @@ -253,6 +260,12 @@ is_pow2 (uword x) return 0 == (x & (x - 1)); } +always_inline uword +round_down_pow2 (uword x, uword pow2) +{ + return (x) & ~(pow2 - 1); +} + always_inline uword round_pow2 (uword x, uword pow2) { @@ -271,18 +284,6 @@ first_set (uword x) return x & -x; } -always_inline uword -log2_first_set (uword x) -{ - uword result; -#ifdef count_trailing_zeros - result = count_trailing_zeros (x); -#else - result = min_log2 (first_set (x)); -#endif - return result; -} - always_inline f64 flt_round_down (f64 x) { @@ -324,12 +325,58 @@ extract_bits (uword x, int start, int count) _x < _y ? _x : _y; \ }) +#define clib_clamp(x,lo,hi) \ +({ \ + __typeof__ (x) _x = (x); \ + __typeof__ (lo) _lo = (lo); \ + __typeof__ (hi) _hi = (hi); \ + _x < _lo ? _lo : (_x > _hi ? _hi : _x); \ +}) + #define clib_abs(x) \ ({ \ __typeof__ (x) _x = (x); \ _x < 0 ? -_x : _x; \ }) +static_always_inline u64 +u64_add_with_carry (u64 *carry, u64 a, u64 b) +{ +#if defined(__x86_64__) + unsigned long long v; + *carry = _addcarry_u64 (*carry, a, b, &v); + return (u64) v; +#elif defined(__clang__) + unsigned long long c; + u64 rv = __builtin_addcll (a, b, *carry, &c); + *carry = c; + return rv; +#else + u64 rv = a + b + *carry; + *carry = rv < a; + return rv; +#endif +} + +static_always_inline u64 +u64_sub_with_borrow (u64 *borrow, u64 x, u64 y) +{ +#if defined(__x86_64__) + unsigned long long v; + *borrow = _subborrow_u64 (*borrow, x, y, &v); + return (u64) v; +#elif defined(__clang__) + unsigned long long b; + u64 rv = __builtin_subcll (x, y, *borrow, &b); + *borrow = b; + return rv; +#else + unsigned long long rv = x - (y + *borrow); + *borrow = rv >= x; + return rv; +#endif +} + /* Standard standalone-only function declarations. */ #ifndef CLIB_UNIX void clib_standalone_init (void *memory, uword memory_bytes); @@ -342,6 +389,7 @@ void qsort (void *base, uword n, uword size, uword clib_backtrace (uword * callers, uword max_callers, uword n_frames_to_skip); +#include #endif /* included_clib_h */ /*