return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
}
-static_always_inline u32x8
-u32x16_extract_lo (u32x16 v)
-{
- return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
-}
-
-static_always_inline u32x8
-u32x16_extract_hi (u32x16 v)
-{
- return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
-}
-
-static_always_inline u8x32
-u8x64_extract_lo (u8x64 v)
-{
- return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
-}
-
-static_always_inline u8x32
-u8x64_extract_hi (u8x64 v)
-{
- return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
-}
+#define _(f, t) \
+ static_always_inline t f##_extract_lo (f v) \
+ { \
+ return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
+ } \
+ static_always_inline t f##_extract_hi (f v) \
+ { \
+ return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
+ }
+
+_ (u64x8, u64x4)
+_ (u32x16, u32x8)
+_ (u16x32, u16x16)
+_ (u8x64, u8x32)
+#undef _
static_always_inline u32
u32x16_min_scalar (u32x16 v)
}
static_always_inline u8x64
-u8x64_mask_load (u8x64 a, void *p, u64 mask)
+u8x64_shuffle (u8x64 v, u8x64 m)
{
- return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
+ return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
}
-static_always_inline void
-u8x64_mask_store (u8x64 a, void *p, u64 mask)
+#define u8x64_align_right(a, b, imm) \
+ (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
+
+static_always_inline u32
+u32x16_sum_elts (u32x16 sum16)
{
- _mm512_mask_storeu_epi8 (p, mask, (__m512i) a);
+ u32x8 sum8;
+ sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
+ sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
+ sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
+ return sum8[0] + sum8[4];
}
+#define _(t, m, p, i, e) \
+ static_always_inline t t##_mask_load (t a, void *p, m mask) \
+ { \
+ return (t) p##_mask_loadu_##e ((i) a, mask, p); \
+ } \
+ static_always_inline t t##_mask_load_zero (void *p, m mask) \
+ { \
+ return (t) p##_maskz_loadu_##e (mask, p); \
+ } \
+ static_always_inline void t##_mask_store (t a, void *p, m mask) \
+ { \
+ p##_mask_storeu_##e (p, mask, (i) a); \
+ }
+
+_ (u8x64, u64, _mm512, __m512i, epi8)
+_ (u8x32, u32, _mm256, __m256i, epi8)
+_ (u8x16, u16, _mm, __m128i, epi8)
+_ (u16x32, u32, _mm512, __m512i, epi16)
+_ (u16x16, u16, _mm256, __m256i, epi16)
+_ (u16x8, u8, _mm, __m128i, epi16)
+_ (u32x16, u16, _mm512, __m512i, epi32)
+_ (u32x8, u8, _mm256, __m256i, epi32)
+_ (u32x4, u8, _mm, __m128i, epi32)
+_ (u64x8, u8, _mm512, __m512i, epi64)
+_ (u64x4, u8, _mm256, __m256i, epi64)
+_ (u64x2, u8, _mm, __m128i, epi64)
+#undef _
+
+#ifdef CLIB_HAVE_VEC512
+#define CLIB_HAVE_VEC512_MASK_LOAD_STORE
+#endif
+#ifdef CLIB_HAVE_VEC256
+#define CLIB_HAVE_VEC256_MASK_LOAD_STORE
+#endif
+#ifdef CLIB_HAVE_VEC128
+#define CLIB_HAVE_VEC128_MASK_LOAD_STORE
+#endif
+
static_always_inline u8x64
u8x64_splat_u8x16 (u8x16 a)
{
return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
}
+#define _(t, m, e, p, it) \
+ static_always_inline m t##_is_equal_mask (t a, t b) \
+ { \
+ return p##_cmpeq_##e##_mask ((it) a, (it) b); \
+ }
+_ (u8x16, u16, epu8, _mm, __m128i)
+_ (u16x8, u8, epu16, _mm, __m128i)
+_ (u32x4, u8, epu32, _mm, __m128i)
+_ (u64x2, u8, epu64, _mm, __m128i)
+
+_ (u8x32, u32, epu8, _mm256, __m256i)
+_ (u16x16, u16, epu16, _mm256, __m256i)
+_ (u32x8, u8, epu32, _mm256, __m256i)
+_ (u64x4, u8, epu64, _mm256, __m256i)
+
+_ (u8x64, u64, epu8, _mm512, __m512i)
+_ (u16x32, u32, epu16, _mm512, __m512i)
+_ (u32x16, u16, epu32, _mm512, __m512i)
+_ (u64x8, u8, epu64, _mm512, __m512i)
+#undef _
+
+#define _(f, t, fn, it) \
+ static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
+_ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
+_ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
+_ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
+_ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
+#undef _
+
+#define _(vt, mt, p, it, epi) \
+ static_always_inline vt vt##_compress (vt a, mt mask) \
+ { \
+ return (vt) p##_maskz_compress_##epi (mask, (it) a); \
+ } \
+ static_always_inline vt vt##_expand (vt a, mt mask) \
+ { \
+ return (vt) p##_maskz_expand_##epi (mask, (it) a); \
+ } \
+ static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
+ { \
+ p##_mask_compressstoreu_##epi (p, mask, (it) v); \
+ }
+
+_ (u64x8, u8, _mm512, __m512i, epi64)
+_ (u32x16, u16, _mm512, __m512i, epi32)
+_ (u64x4, u8, _mm256, __m256i, epi64)
+_ (u32x8, u8, _mm256, __m256i, epi32)
+_ (u64x2, u8, _mm, __m128i, epi64)
+_ (u32x4, u8, _mm, __m128i, epi32)
+#ifdef __AVX512VBMI2__
+_ (u16x32, u32, _mm512, __m512i, epi16)
+_ (u8x64, u64, _mm512, __m512i, epi8)
+_ (u16x16, u16, _mm256, __m256i, epi16)
+_ (u8x32, u32, _mm256, __m256i, epi8)
+_ (u16x8, u8, _mm, __m128i, epi16)
+_ (u8x16, u16, _mm, __m128i, epi8)
+#endif
+#undef _
+
+#define CLIB_HAVE_VEC256_COMPRESS
+#define CLIB_HAVE_VEC512_COMPRESS
+
+#ifndef __AVX512VBMI2__
+static_always_inline u16x16
+u16x16_compress (u16x16 v, u16 mask)
+{
+ return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
+}
+
+static_always_inline u16x8
+u16x8_compress (u16x8 v, u8 mask)
+{
+ return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
+}
+#endif
+
static_always_inline void
u32x16_transpose (u32x16 m[16])
{