{ return (t##s##x##c) _mm512_set1_##i (x); } \
\
static_always_inline t##s##x##c \
+t##s##x##c##_load_aligned (void *p) \
+{ return (t##s##x##c) _mm512_load_si512 (p); } \
+\
+static_always_inline void \
+t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
+{ _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
+\
+static_always_inline t##s##x##c \
t##s##x##c##_load_unaligned (void *p) \
{ return (t##s##x##c) _mm512_loadu_si512 (p); } \
\
return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
}
+static_always_inline u8x64
+u8x64_shuffle (u8x64 v, u8x64 m)
+{
+ return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
+}
+
+#define u8x64_align_right(a, b, imm) \
+ (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
+
+static_always_inline u32
+u32x16_sum_elts (u32x16 sum16)
+{
+ u32x8 sum8;
+ sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
+ sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
+ sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
+ return sum8[0] + sum8[4];
+}
+
static_always_inline u8x64
u8x64_mask_load (u8x64 a, void *p, u64 mask)
{
return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
}
+static_always_inline u8
+u64x8_mask_is_equal (u64x8 a, u64x8 b)
+{
+ return _mm512_cmpeq_epu64_mask ((__m512i) a, (__m512i) b);
+}
+
static_always_inline void
u32x16_transpose (u32x16 m[16])
{