X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fvector_sse42.h;h=f86fad39b028a3898648e35254b021e166a1d41a;hb=df6d986f81f89ed46288ab508485700b4ed7dca1;hp=a2d737a48923d8dff947e9a4e9d542e44d2be0cb;hpb=f6adf1f8dbbdfb2822c391d7d1b132d2846bf004;p=vpp.git diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h index a2d737a4892..f86fad39b02 100644 --- a/src/vppinfra/vector_sse42.h +++ b/src/vppinfra/vector_sse42.h @@ -613,10 +613,25 @@ u8x16_msb_mask (u8x16 v) return _mm_movemask_epi8 ((__m128i) v); } +static_always_inline u16 +i8x16_msb_mask (i8x16 v) +{ + return _mm_movemask_epi8 ((__m128i) v); +} + #define CLIB_HAVE_VEC128_MSB_MASK #undef _signed_binop +static_always_inline u32x4 +u32x4_byte_swap (u32x4 v) +{ + u8x16 swap = { + 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 + }; + return (u32x4) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap); +} + static_always_inline u16x8 u16x8_byte_swap (u16x8 v) { @@ -626,12 +641,29 @@ u16x8_byte_swap (u16x8 v) return (u16x8) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap); } +static_always_inline u8x16 +u8x16_reflect (u8x16 v) +{ + u8x16 mask = { + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 + }; + return (u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) mask); +} + static_always_inline u32x4 u32x4_hadd (u32x4 v1, u32x4 v2) { return (u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2); } +static_always_inline u32 __clib_unused +u32x4_sum_elts (u32x4 sum4) +{ + sum4 += (u32x4) u8x16_align_right (sum4, sum4, 8); + sum4 += (u32x4) u8x16_align_right (sum4, sum4, 4); + return sum4[0]; +} + static_always_inline u8x16 u8x16_shuffle (u8x16 v, u8x16 m) { @@ -650,11 +682,11 @@ u32x4_shuffle (u32x4 v, const int a, const int b, const int c, const int d) #endif } -/* _extend_to_ */ +/* _from_ */ /* *INDENT-OFF* */ #define _(f,t,i) \ static_always_inline t \ -f##_extend_to_##t (f x) \ +t##_from_##f (f x) \ { return (t) _mm_cvt##i ((__m128i) x); } _(u8x16, u16x8, epu8_epi16) @@ -681,7 +713,7 @@ u64x2_gather (void *p0, void *p1) } static_always_inline u32x4 -u32x4_gather (void *p0, void *p1, void *p2, void *p3, void *p4) +u32x4_gather (void *p0, void *p1, void *p2, void *p3) { u32x4 r = { *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3 }; return r; @@ -728,6 +760,15 @@ u8x16_blend (u8x16 v1, u8x16 v2, u8x16 mask) return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask); } +static_always_inline u8x16 +u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c) +{ +#if __AVX512F__ + return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b, + (__m128i) c, 0x96); +#endif + return a ^ b ^ c; +} #endif /* included_vector_sse2_h */