X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fclib.h;h=22377c61440ef5dc309d56f89059101fb67a483e;hb=af2cc6425;hp=a6f88245d7956c011832970adc111409b95bfc17;hpb=5f21e1bd613b372ea6f8be6423894548dae59bdc;p=vpp.git diff --git a/src/vppinfra/clib.h b/src/vppinfra/clib.h index a6f88245d79..22377c61440 100644 --- a/src/vppinfra/clib.h +++ b/src/vppinfra/clib.h @@ -38,14 +38,20 @@ #ifndef included_clib_h #define included_clib_h +#include #include +#ifdef __x86_64__ +#include +#endif + /* Standalone means to not assume we are running on a Unix box. */ #if ! defined (CLIB_STANDALONE) && ! defined (CLIB_LINUX_KERNEL) #define CLIB_UNIX #endif #include +#include /* Global DEBUG flag. Setting this to 1 or 0 turns off ASSERT (see vppinfra/error.h) & other debugging code. */ @@ -61,8 +67,8 @@ #define ARRAY_LEN(x) (sizeof (x)/sizeof (x[0])) #define _STRUCT_FIELD(t,f) (((t *) 0)->f) -#define STRUCT_OFFSET_OF(t,f) ((uword) & _STRUCT_FIELD (t, f)) -#define STRUCT_BIT_OFFSET_OF(t,f) (BITS(u8) * (uword) & _STRUCT_FIELD (t, f)) +#define STRUCT_OFFSET_OF(t,f) offsetof(t, f) +#define STRUCT_BIT_OFFSET_OF(t,f) (BITS(u8) * STRUCT_OFFSET_OF (t, f)) #define STRUCT_SIZE_OF(t,f) (sizeof (_STRUCT_FIELD (t, f))) #define STRUCT_BITS_OF(t,f) (BITS (_STRUCT_FIELD (t, f))) #define STRUCT_ARRAY_LEN(t,f) ARRAY_LEN (_STRUCT_FIELD (t, f)) @@ -80,6 +86,9 @@ #define CLIB_PACKED(x) x __attribute__ ((packed)) #define CLIB_UNUSED(x) x __attribute__ ((unused)) +/* similar to CLIB_CACHE_LINE_ALIGN_MARK() but with arbitrary alignment */ +#define CLIB_ALIGN_MARK(name, alignment) u8 name[0] __attribute__((aligned(alignment))) + /* Make a string from the macro's argument */ #define CLIB_STRING_MACRO(x) #x @@ -87,6 +96,11 @@ #define __clib_weak __attribute__ ((weak)) #define __clib_packed __attribute__ ((packed)) #define __clib_constructor __attribute__ ((constructor)) +#define __clib_noinline __attribute__ ((noinline)) +#define __clib_aligned(x) __attribute__ ((aligned(x))) +#define __clib_section(s) __attribute__ ((section(s))) +#define __clib_warn_unused_result __attribute__ ((warn_unused_result)) +#define __clib_export __attribute__ ((visibility("default"))) #define never_inline __attribute__ ((__noinline__)) @@ -107,6 +121,14 @@ #define PREDICT_FALSE(x) __builtin_expect((x),0) #define PREDICT_TRUE(x) __builtin_expect((x),1) +/* + * Compiler barrier + * prevent compiler to reorder memory access accross this boundary + * prevent compiler to cache values in register (force reload) + * Not to be confused with CPU memory barrier below + */ +#define CLIB_COMPILER_BARRIER() asm volatile ("":::"memory") + /* Full memory barrier (read and write). */ #define CLIB_MEMORY_BARRIER() __sync_synchronize () @@ -233,6 +255,12 @@ is_pow2 (uword x) return 0 == (x & (x - 1)); } +always_inline uword +round_down_pow2 (uword x, uword pow2) +{ + return (x) & ~(pow2 - 1); +} + always_inline uword round_pow2 (uword x, uword pow2) { @@ -281,6 +309,15 @@ flt_round_to_multiple (f64 x, f64 f) return f * flt_round_nearest (x / f); } +always_inline uword +extract_bits (uword x, int start, int count) +{ +#ifdef __BMI__ + return _bextr_u64 (x, start, count); +#endif + return (x >> start) & pow2_mask (count); +} + #define clib_max(x,y) \ ({ \ __typeof__ (x) _x = (x); \ @@ -295,6 +332,14 @@ flt_round_to_multiple (f64 x, f64 f) _x < _y ? _x : _y; \ }) +#define clib_clamp(x,lo,hi) \ +({ \ + __typeof__ (x) _x = (x); \ + __typeof__ (lo) _lo = (lo); \ + __typeof__ (hi) _hi = (hi); \ + _x < _lo ? _lo : (_x > _hi ? _hi : _x); \ +}) + #define clib_abs(x) \ ({ \ __typeof__ (x) _x = (x); \