{ return (t##s##x##c) _mm512_set1_##i (x); } \
\
static_always_inline t##s##x##c \
+t##s##x##c##_load_aligned (void *p) \
+{ return (t##s##x##c) _mm512_load_si512 (p); } \
+\
+static_always_inline void \
+t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
+{ _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
+\
+static_always_inline t##s##x##c \
t##s##x##c##_load_unaligned (void *p) \
{ return (t##s##x##c) _mm512_loadu_si512 (p); } \
\
return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
}
+static_always_inline u8x32
+u8x64_extract_lo (u8x64 v)
+{
+ return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
+}
+
+static_always_inline u8x32
+u8x64_extract_hi (u8x64 v)
+{
+ return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
+}
+
static_always_inline u32
u32x16_min_scalar (u32x16 v)
{
#define u8x64_extract_u8x16(a, n) \
(u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
+#define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
+#define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
+
static_always_inline u8x64
u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
{
(__m512i) c, 0x96);
}
+static_always_inline u8x64
+u8x64_reflect_u8x16 (u8x64 x)
+{
+ static const u8x64 mask = {
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ };
+ return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
+}
+
+static_always_inline u8x64
+u8x64_shuffle (u8x64 v, u8x64 m)
+{
+ return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
+}
+
+#define u8x64_align_right(a, b, imm) \
+ (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
+
+static_always_inline u32
+u32x16_sum_elts (u32x16 sum16)
+{
+ u32x8 sum8;
+ sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
+ sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
+ sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
+ return sum8[0] + sum8[4];
+}
+
+static_always_inline u8x64
+u8x64_mask_load (u8x64 a, void *p, u64 mask)
+{
+ return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
+}
+
+static_always_inline void
+u8x64_mask_store (u8x64 a, void *p, u64 mask)
+{
+ _mm512_mask_storeu_epi8 (p, mask, (__m512i) a);
+}
+
+static_always_inline u8x64
+u8x64_splat_u8x16 (u8x16 a)
+{
+ return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
+}
+
+static_always_inline u32x16
+u32x16_splat_u32x4 (u32x4 a)
+{
+ return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
+}
+
+static_always_inline u32x16
+u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
+{
+ return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
+}
+
+static_always_inline u8x64
+u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
+{
+ return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
+}
+
+static_always_inline u8
+u64x8_mask_is_equal (u64x8 a, u64x8 b)
+{
+ return _mm512_cmpeq_epu64_mask ((__m512i) a, (__m512i) b);
+}
+
static_always_inline void
u32x16_transpose (u32x16 m[16])
{