+/* _extract_lo, _extract_hi */
+/* *INDENT-OFF* */
+#define _(t1,t2) \
+always_inline t1 \
+t2##_extract_lo (t2 v) \
+{ return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
+\
+always_inline t1 \
+t2##_extract_hi (t2 v) \
+{ return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
+\
+always_inline t2 \
+t2##_insert_lo (t2 v1, t1 v2) \
+{ return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
+\
+always_inline t2 \
+t2##_insert_hi (t2 v1, t1 v2) \
+{ return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
+
+_(u8x16, u8x32)
+_(u16x8, u16x16)
+_(u32x4, u32x8)
+_(u64x2, u64x4)
+#undef _
+/* *INDENT-ON* */
+
+
+
+
+static_always_inline u32
+u8x32_msb_mask (u8x32 v)
+{
+ return _mm256_movemask_epi8 ((__m256i) v);
+}
+
+/* _from_ */
+/* *INDENT-OFF* */
+#define _(f,t,i) \
+static_always_inline t \
+t##_from_##f (f x) \
+{ return (t) _mm256_cvt##i ((__m128i) x); }
+
+_(u16x8, u32x8, epu16_epi32)
+_(u16x8, u64x4, epu16_epi64)
+_(u32x4, u64x4, epu32_epi64)
+_(u8x16, u16x16, epu8_epi64)
+_(u8x16, u32x8, epu8_epi32)
+_(u8x16, u64x4, epu8_epi64)
+_(i16x8, i32x8, epi16_epi32)
+_(i16x8, i64x4, epi16_epi64)
+_(i32x4, i64x4, epi32_epi64)
+_(i8x16, i16x16, epi8_epi64)
+_(i8x16, i32x8, epi8_epi32)
+_(i8x16, i64x4, epi8_epi64)
+#undef _
+/* *INDENT-ON* */
+
+static_always_inline u64x4
+u64x4_byte_swap (u64x4 v)
+{
+ u8x32 swap = {
+ 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
+ };
+ return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
+}
+
+static_always_inline u32x8
+u32x8_byte_swap (u32x8 v)
+{
+ u8x32 swap = {
+ 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
+ 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+ };
+ return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
+}
+
+static_always_inline u16x16
+u16x16_byte_swap (u16x16 v)
+{
+ u8x32 swap = {
+ 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
+ 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
+ };
+ return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
+}
+
+static_always_inline u8x32
+u8x32_shuffle (u8x32 v, u8x32 m)
+{
+ return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m);
+}
+
+#define u8x32_align_right(a, b, imm) \
+ (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
+
+static_always_inline u32
+u32x8_sum_elts (u32x8 sum8)
+{
+ sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
+ sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
+ return sum8[0] + sum8[4];
+}
+
+static_always_inline u32x8
+u32x8_hadd (u32x8 v1, u32x8 v2)
+{
+ return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
+}
+
+static_always_inline u16x16
+u16x16_mask_last (u16x16 v, u8 n_last)
+{
+ const u16x16 masks[17] = {
+ {0},
+ {-1},
+ {-1, -1},
+ {-1, -1, -1},
+ {-1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ };
+
+ ASSERT (n_last < 17);
+
+ return v & masks[16 - n_last];
+}
+
+#ifdef __AVX512F__
+static_always_inline u8x32
+u8x32_mask_load (u8x32 a, void *p, u32 mask)
+{
+ return (u8x32) _mm256_mask_loadu_epi8 ((__m256i) a, mask, p);
+}
+#endif
+
+static_always_inline f32x8
+f32x8_from_u32x8 (u32x8 v)