return (u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2);
}
+static_always_inline u32 __clib_unused
+u32x4_sum_elts (u32x4 sum4)
+{
+ sum4 += (u32x4) u8x16_align_right (sum4, sum4, 8);
+ sum4 += (u32x4) u8x16_align_right (sum4, sum4, 4);
+ return sum4[0];
+}
+
static_always_inline u8x16
u8x16_shuffle (u8x16 v, u8x16 m)
{
#endif
}
-/* _extend_to_ */
+/* _from_ */
/* *INDENT-OFF* */
#define _(f,t,i) \
static_always_inline t \
-f##_extend_to_##t (f x) \
+t##_from_##f (f x) \
{ return (t) _mm_cvt##i ((__m128i) x); }
_(u8x16, u16x8, epu8_epi16)
}
static_always_inline u32x4
-u32x4_gather (void *p0, void *p1, void *p2, void *p3, void *p4)
+u32x4_gather (void *p0, void *p1, void *p2, void *p3)
{
u32x4 r = { *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3 };
return r;
return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask);
}
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __AVX512F__
+ return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
+ (__m128i) c, 0x96);
+#endif
+ return a ^ b ^ c;
+}
+
+#ifdef __AVX512F__
+static_always_inline u8x16
+u8x16_mask_load (u8x16 a, void *p, u16 mask)
+{
+ return (u8x16) _mm_mask_loadu_epi8 ((__m128i) a, mask, p);
+}
+#endif
#endif /* included_vector_sse2_h */