+static_always_inline u8x64
+u8x64_reflect_u8x16 (u8x64 x)
+{
+ static const u8x64 mask = {
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ };
+ return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
+}
+
+static_always_inline u8x64
+u8x64_shuffle (u8x64 v, u8x64 m)
+{
+ return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
+}
+
+#define u8x64_align_right(a, b, imm) \
+ (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
+
+static_always_inline u32
+u32x16_sum_elts (u32x16 sum16)
+{
+ u32x8 sum8;
+ sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
+ sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
+ sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
+ return sum8[0] + sum8[4];
+}
+
+static_always_inline u8x64
+u8x64_mask_load (u8x64 a, void *p, u64 mask)
+{
+ return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
+}
+
+static_always_inline void
+u8x64_mask_store (u8x64 a, void *p, u64 mask)
+{
+ _mm512_mask_storeu_epi8 (p, mask, (__m512i) a);
+}
+
+static_always_inline u8x64
+u8x64_splat_u8x16 (u8x16 a)
+{
+ return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
+}
+
+static_always_inline u32x16
+u32x16_splat_u32x4 (u32x4 a)
+{
+ return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
+}
+
+static_always_inline u32x16
+u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
+{
+ return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
+}
+
+static_always_inline u8x64
+u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
+{
+ return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
+}
+