X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fvector_sse42.h;h=c22e86e7437179893e3ad7a7d67ea667bfba6f2a;hb=622b5ce61971066917cfba9ae795d9cb926f2700;hp=50aa662a925aa1f445b6be624ace6e462204b117;hpb=8c3f8a29374deed5a67a5fd084f186413f6183d7;p=vpp.git diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h index 50aa662a925..c22e86e7437 100644 --- a/src/vppinfra/vector_sse42.h +++ b/src/vppinfra/vector_sse42.h @@ -41,6 +41,62 @@ #include /* for ASSERT */ #include +/* *INDENT-OFF* */ +#define foreach_sse42_vec128i \ + _(i,8,16,epi8) _(i,16,8,epi16) _(i,32,4,epi32) _(i,64,2,epi64x) +#define foreach_sse42_vec128u \ + _(u,8,16,epi8) _(u,16,8,epi16) _(u,32,4,epi32) _(u,64,2,epi64x) +#define foreach_sse42_vec128f \ + _(f,32,4,ps) _(f,64,2,pd) + +/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal, + is_all_equal */ +#define _(t, s, c, i) \ +static_always_inline t##s##x##c \ +t##s##x##c##_splat (t##s x) \ +{ return (t##s##x##c) _mm_set1_##i (x); } \ +\ +static_always_inline t##s##x##c \ +t##s##x##c##_load_unaligned (void *p) \ +{ return (t##s##x##c) _mm_loadu_si128 (p); } \ +\ +static_always_inline void \ +t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \ +{ _mm_storeu_si128 ((__m128i *) p, (__m128i) v); } \ +\ +static_always_inline int \ +t##s##x##c##_is_all_zero (t##s##x##c x) \ +{ return _mm_testz_si128 ((__m128i) x, (__m128i) x); } \ +\ +static_always_inline int \ +t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \ +{ return t##s##x##c##_is_all_zero (a ^ b); } \ +\ +static_always_inline int \ +t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \ +{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \ + +foreach_sse42_vec128i foreach_sse42_vec128u +#undef _ + +/* min, max */ +#define _(t, s, c, i) \ +static_always_inline t##s##x##c \ +t##s##x##c##_min (t##s##x##c a, t##s##x##c b) \ +{ return (t##s##x##c) _mm_min_##i ((__m128i) a, (__m128i) b); } \ +\ +static_always_inline t##s##x##c \ +t##s##x##c##_max (t##s##x##c a, t##s##x##c b) \ +{ return (t##s##x##c) _mm_max_##i ((__m128i) a, (__m128i) b); } \ + +_(i,8,16,epi8) _(i,16,8,epi16) _(i,32,4,epi32) _(i,64,2,epi64) +_(u,8,16,epu8) _(u,16,8,epu16) _(u,32,4,epu32) _(u,64,2,epu64) +#undef _ +/* *INDENT-ON* */ + +#define CLIB_VEC128_SPLAT_DEFINED +#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE + /* 128 bit interleaves. */ always_inline u8x16 u8x16_interleave_hi (u8x16 a, u8x16 b) @@ -197,16 +253,6 @@ u64x2_write_hi (u64x2 x, u64 * a) } #endif -/* Unaligned loads/stores. */ - -#define _(t) \ - always_inline void t##_store_unaligned (t x, void * a) \ - { _mm_storeu_si128 ((__m128i *) a, (__m128i) x); } \ - always_inline t t##_load_unaligned (void * a) \ - { return (t) _mm_loadu_si128 ((__m128i *) a); } - -_(u8x16) _(u16x8) _(u32x4) _(u64x2) _(i8x16) _(i16x8) _(i32x4) _(i64x2) -#undef _ #define _signed_binop(n,m,f,g) \ /* Unsigned */ \ always_inline u##n##x##m \ @@ -218,7 +264,7 @@ _(u8x16) _(u16x8) _(u32x4) _(u64x2) _(i8x16) _(i16x8) _(i32x4) _(i64x2) i##n##x##m##_##f (i##n##x##m x, i##n##x##m y) \ { return (i##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); } /* Addition/subtraction with saturation. */ - _signed_binop (8, 16, add_saturate, adds_epu) +_signed_binop (8, 16, add_saturate, adds_epu) _signed_binop (16, 8, add_saturate, adds_epu) _signed_binop (8, 16, sub_saturate, subs_epu) _signed_binop (16, 8, sub_saturate, subs_epu) @@ -403,30 +449,6 @@ _(u64, 2, right, left); #undef _ #endif -always_inline int -u8x16_is_all_zero (u8x16 x) -{ - return _mm_testz_si128 ((__m128i) x, (__m128i) x); -} - -always_inline int -u16x8_is_all_zero (u16x8 x) -{ - return _mm_testz_si128 ((__m128i) x, (__m128i) x); -} - -always_inline int -u32x4_is_all_zero (u32x4 x) -{ - return _mm_testz_si128 ((__m128i) x, (__m128i) x); -} - -always_inline int -u64x2_is_all_zero (u64x2 x) -{ - return _mm_testz_si128 ((__m128i) x, (__m128i) x); -} - #define u32x4_select(A,MASK) \ ({ \ u32x4 _x, _y; \ @@ -495,27 +517,21 @@ always_inline u32 u8x16_zero_byte_mask (u8x16 x) { u8x16 zero = { 0 }; - return u8x16_compare_byte_mask (u8x16_is_equal (x, zero)); + return u8x16_compare_byte_mask (x == zero); } always_inline u32 u16x8_zero_byte_mask (u16x8 x) { u16x8 zero = { 0 }; - return u8x16_compare_byte_mask ((u8x16) u16x8_is_equal (x, zero)); + return u8x16_compare_byte_mask ((u8x16) (x == zero)); } always_inline u32 u32x4_zero_byte_mask (u32x4 x) { u32x4 zero = { 0 }; - return u8x16_compare_byte_mask ((u8x16) u32x4_is_equal (x, zero)); -} - -always_inline u8x16 -u8x16_max (u8x16 x, u8x16 y) -{ - return (u8x16) _mm_max_epu8 ((__m128i) x, (__m128i) y); + return u8x16_compare_byte_mask ((u8x16) (x == zero)); } always_inline u32 @@ -528,12 +544,6 @@ u8x16_max_scalar (u8x16 x) return _mm_extract_epi16 ((__m128i) x, 0) & 0xff; } -always_inline u8x16 -u8x16_min (u8x16 x, u8x16 y) -{ - return (u8x16) _mm_min_epu8 ((__m128i) x, (__m128i) y); -} - always_inline u8 u8x16_min_scalar (u8x16 x) { @@ -544,12 +554,6 @@ u8x16_min_scalar (u8x16 x) return _mm_extract_epi16 ((__m128i) x, 0) & 0xff; } -always_inline i16x8 -i16x8_max (i16x8 x, i16x8 y) -{ - return (i16x8) _mm_max_epi16 ((__m128i) x, (__m128i) y); -} - always_inline i16 i16x8_max_scalar (i16x8 x) { @@ -559,12 +563,6 @@ i16x8_max_scalar (i16x8 x) return _mm_extract_epi16 ((__m128i) x, 0); } -always_inline i16x8 -i16x8_min (i16x8 x, i16x8 y) -{ - return (i16x8) _mm_min_epi16 ((__m128i) x, (__m128i) y); -} - always_inline i16 i16x8_min_scalar (i16x8 x) { @@ -574,14 +572,181 @@ i16x8_min_scalar (i16x8 x) return _mm_extract_epi16 ((__m128i) x, 0); } +#define u8x16_align_right(a, b, imm) \ + (u8x16) _mm_alignr_epi8 ((__m128i) a, (__m128i) b, imm) + +static_always_inline u32 +u32x4_min_scalar (u32x4 v) +{ + v = u32x4_min (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8)); + v = u32x4_min (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4)); + return v[0]; +} + +static_always_inline u32 +u32x4_max_scalar (u32x4 v) +{ + v = u32x4_max (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8)); + v = u32x4_max (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4)); + return v[0]; +} + +static_always_inline u32 +i32x4_min_scalar (i32x4 v) +{ + v = i32x4_min (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8)); + v = i32x4_min (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4)); + return v[0]; +} + +static_always_inline u32 +i32x4_max_scalar (i32x4 v) +{ + v = i32x4_max (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8)); + v = i32x4_max (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4)); + return v[0]; +} + static_always_inline u16 u8x16_msb_mask (u8x16 v) { return _mm_movemask_epi8 ((__m128i) v); } +#define CLIB_HAVE_VEC128_MSB_MASK + #undef _signed_binop +static_always_inline u32x4 +u32x4_byte_swap (u32x4 v) +{ + u8x16 swap = { + 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 + }; + return (u32x4) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap); +} + +static_always_inline u16x8 +u16x8_byte_swap (u16x8 v) +{ + u8x16 swap = { + 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14, + }; + return (u16x8) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap); +} + +static_always_inline u8x16 +u8x16_reflect (u8x16 v) +{ + u8x16 mask = { + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 + }; + return (u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) mask); +} + +static_always_inline u32x4 +u32x4_hadd (u32x4 v1, u32x4 v2) +{ + return (u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2); +} + +static_always_inline u8x16 +u8x16_shuffle (u8x16 v, u8x16 m) +{ + return (u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) m); +} + +static_always_inline u32x4 +u32x4_shuffle (u32x4 v, const int a, const int b, const int c, const int d) +{ +#if defined(__clang__) || !__OPTIMIZE__ + u32x4 r = { v[a], v[b], v[c], v[d] }; + return r; +#else + return (u32x4) _mm_shuffle_epi32 ((__m128i) v, + a | b << 2 | c << 4 | d << 6); +#endif +} + +/* _extend_to_ */ +/* *INDENT-OFF* */ +#define _(f,t,i) \ +static_always_inline t \ +f##_extend_to_##t (f x) \ +{ return (t) _mm_cvt##i ((__m128i) x); } + +_(u8x16, u16x8, epu8_epi16) +_(u8x16, u32x4, epu8_epi32) +_(u8x16, u64x2, epu8_epi64) +_(u16x8, u32x4, epu16_epi32) +_(u16x8, u64x2, epu16_epi64) +_(u32x4, u64x2, epu32_epi64) + +_(i8x16, i16x8, epi8_epi16) +_(i8x16, i32x4, epi8_epi32) +_(i8x16, i64x2, epi8_epi64) +_(i16x8, i32x4, epi16_epi32) +_(i16x8, i64x2, epi16_epi64) +_(i32x4, i64x2, epi32_epi64) +#undef _ +/* *INDENT-ON* */ + +static_always_inline u64x2 +u64x2_gather (void *p0, void *p1) +{ + u64x2 r = { *(u64 *) p0, *(u64 *) p1 }; + return r; +} + +static_always_inline u32x4 +u32x4_gather (void *p0, void *p1, void *p2, void *p3, void *p4) +{ + u32x4 r = { *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3 }; + return r; +} + + +static_always_inline void +u64x2_scatter (u64x2 r, void *p0, void *p1) +{ + *(u64 *) p0 = r[0]; + *(u64 *) p1 = r[1]; +} + +static_always_inline void +u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3) +{ + *(u32 *) p0 = r[0]; + *(u32 *) p1 = r[1]; + *(u32 *) p2 = r[2]; + *(u32 *) p3 = r[3]; +} + +static_always_inline void +u64x2_scatter_one (u64x2 r, int index, void *p) +{ + *(u64 *) p = r[index]; +} + +static_always_inline void +u32x4_scatter_one (u32x4 r, int index, void *p) +{ + *(u32 *) p = r[index]; +} + +static_always_inline u8x16 +u8x16_is_greater (u8x16 v1, u8x16 v2) +{ + return (u8x16) _mm_cmpgt_epi8 ((__m128i) v1, (__m128i) v2); +} + +static_always_inline u8x16 +u8x16_blend (u8x16 v1, u8x16 v2, u8x16 mask) +{ + return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask); +} + + #endif /* included_vector_sse2_h */ /*