+u32x8_min (u32x8 a, u32x8 b)
+{
+ return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
+}
+
+static_always_inline u32
+u32x8_min_scalar (u32x8 v)
+{
+ return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
+ u32x8_extract_hi (v)));
+}
+
+static_always_inline void
+u32x8_transpose (u32x8 a[8])
+{
+ u64x4 r[8], x, y;
+
+ r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
+ r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
+ r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
+ r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
+ r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
+ r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
+ r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
+ r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
+
+ x = u64x4_interleave_lo (r[0], r[2]);
+ y = u64x4_interleave_lo (r[4], r[6]);
+ a[0] = u32x8_permute_lanes (x, y, 0x20);
+ a[4] = u32x8_permute_lanes (x, y, 0x31);
+
+ x = u64x4_interleave_hi (r[0], r[2]);
+ y = u64x4_interleave_hi (r[4], r[6]);
+ a[1] = u32x8_permute_lanes (x, y, 0x20);
+ a[5] = u32x8_permute_lanes (x, y, 0x31);
+
+ x = u64x4_interleave_lo (r[1], r[3]);
+ y = u64x4_interleave_lo (r[5], r[7]);
+ a[2] = u32x8_permute_lanes (x, y, 0x20);
+ a[6] = u32x8_permute_lanes (x, y, 0x31);
+
+ x = u64x4_interleave_hi (r[1], r[3]);
+ y = u64x4_interleave_hi (r[5], r[7]);
+ a[3] = u32x8_permute_lanes (x, y, 0x20);
+ a[7] = u32x8_permute_lanes (x, y, 0x31);
+}
+
+static_always_inline void
+u64x4_transpose (u64x4 a[8])
+{
+ u64x4 r[4];
+
+ r[0] = u64x4_interleave_lo (a[0], a[1]);
+ r[1] = u64x4_interleave_hi (a[0], a[1]);
+ r[2] = u64x4_interleave_lo (a[2], a[3]);
+ r[3] = u64x4_interleave_hi (a[2], a[3]);
+
+ a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
+ a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
+ a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
+ a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
+}
+
+static_always_inline u8x32
+u8x32_splat_u8x16 (u8x16 a)
+{
+ return (u8x32) _mm256_broadcastsi128_si256 ((__m128i) a);
+}
+
+static_always_inline u32x8
+u32x8_splat_u32x4 (u32x4 a)
+{
+ return (u32x8) _mm256_broadcastsi128_si256 ((__m128i) a);
+}
+
+static_always_inline u64x4
+u64x4_splat_u64x2 (u64x2 a)
+{
+ return (u64x4) _mm256_broadcastsi128_si256 ((__m128i) a);
+}
+
+static_always_inline u8x32
+u8x32_load_partial (u8 *data, uword n)
+{
+#if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
+ return u8x32_mask_load_zero (data, pow2_mask (n));
+#else
+ u8x32 r = {};
+ if (n > 16)
+ {
+ r = u8x32_insert_lo (r, *(u8x16u *) data);
+ r = u8x32_insert_hi (r, u8x16_load_partial (data + 16, n - 16));
+ }
+ else
+ r = u8x32_insert_lo (r, u8x16_load_partial (data, n));
+ return r;
+#endif
+}
+
+static_always_inline void
+u8x32_store_partial (u8x32 r, u8 *data, uword n)