return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
}
+#define u64x4_permute(v, m0, m1, m2, m3) \
+ (u64x4) _mm256_permute4x64_epi64 ( \
+ (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6))
+
/* _extract_lo, _extract_hi */
/* *INDENT-OFF* */
#define _(t1,t2) \
#undef _
/* *INDENT-ON* */
+/* 256 bit packs. */
+#define _(f, t, fn) \
+ always_inline t t##_pack (f lo, f hi) \
+ { \
+ return (t) fn ((__m256i) lo, (__m256i) hi); \
+ }
+_ (i16x16, i8x32, _mm256_packs_epi16)
+_ (i16x16, u8x32, _mm256_packus_epi16)
+_ (i32x8, i16x16, _mm256_packs_epi32)
+_ (i32x8, u16x16, _mm256_packus_epi32)
+#undef _
static_always_inline u32
u8x32_msb_mask (u8x32 v)
return _mm256_movemask_epi8 ((__m256i) v);
}
-/* _extend_to_ */
+static_always_inline u32
+i8x32_msb_mask (i8x32 v)
+{
+ return _mm256_movemask_epi8 ((__m256i) v);
+}
+
+/* _from_ */
/* *INDENT-OFF* */
#define _(f,t,i) \
static_always_inline t \
-f##_extend_to_##t (f x) \
+t##_from_##f (f x) \
{ return (t) _mm256_cvt##i ((__m128i) x); }
_(u16x8, u32x8, epu16_epi32)
_(u16x8, u64x4, epu16_epi64)
_(u32x4, u64x4, epu32_epi64)
-_(u8x16, u16x16, epu8_epi64)
+_ (u8x16, u16x16, epu8_epi16)
_(u8x16, u32x8, epu8_epi32)
_(u8x16, u64x4, epu8_epi64)
_(i16x8, i32x8, epi16_epi32)
_(i16x8, i64x4, epi16_epi64)
_(i32x4, i64x4, epi32_epi64)
-_(i8x16, i16x16, epi8_epi64)
+_ (i8x16, i16x16, epi8_epi16)
_(i8x16, i32x8, epi8_epi32)
_(i8x16, i64x4, epi8_epi64)
#undef _
return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
}
+#define u8x32_align_right(a, b, imm) \
+ (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
+
+#define u64x4_align_right(a, b, imm) \
+ (u64x4) _mm256_alignr_epi64 ((__m256i) a, (__m256i) b, imm)
+
+static_always_inline u32
+u32x8_sum_elts (u32x8 sum8)
+{
+ sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
+ sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
+ return sum8[0] + sum8[4];
+}
+
static_always_inline u32x8
u32x8_hadd (u32x8 v1, u32x8 v2)
{
return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
}
+static_always_inline u32
+u32x8_hxor (u32x8 v)
+{
+ u32x4 v4;
+ v4 = u32x8_extract_lo (v) ^ u32x8_extract_hi (v);
+ v4 ^= (u32x4) u8x16_align_right (v4, v4, 8);
+ v4 ^= (u32x4) u8x16_align_right (v4, v4, 4);
+ return v4[0];
+}
+
+static_always_inline u8x32
+u8x32_xor3 (u8x32 a, u8x32 b, u8x32 c)
+{
+#if __AVX512F__
+ return (u8x32) _mm256_ternarylogic_epi32 ((__m256i) a, (__m256i) b,
+ (__m256i) c, 0x96);
+#endif
+ return a ^ b ^ c;
+}
+
+static_always_inline u8x32
+u8x32_reflect_u8x16 (u8x32 x)
+{
+ static const u8x32 mask = {
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ };
+ return (u8x32) _mm256_shuffle_epi8 ((__m256i) x, (__m256i) mask);
+}
+
static_always_inline u16x16
u16x16_mask_last (u16x16 v, u8 n_last)
{
*(u32 *) p = r[index];
}
-static_always_inline u8x32
-u8x32_is_greater (u8x32 v1, u8x32 v2)
-{
- return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
-}
+#define u32x8_gather_u32(base, indices, scale) \
+ (u32x8) _mm256_i32gather_epi32 ((const int *) base, (__m256i) indices, scale)
+
+#ifdef __AVX512F__
+#define u32x8_scatter_u32(base, indices, v, scale) \
+ _mm256_i32scatter_epi32 (base, (__m256i) indices, (__m256i) v, scale)
+#else
+#define u32x8_scatter_u32(base, indices, v, scale) \
+ for (u32 i = 0; i < 8; i++) \
+ *((u32u *) ((u8 *) base + (scale) * (indices)[i])) = (v)[i];
+#endif
static_always_inline u8x32
u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
(__m256i) mask);
}
+#define u8x32_word_shift_left(a, n) \
+ (u8x32) _mm256_bslli_epi128 ((__m256i) a, n)
+#define u8x32_word_shift_right(a, n) \
+ (u8x32) _mm256_bsrli_epi128 ((__m256i) a, n)
+
#define u32x8_permute_lanes(a, b, m) \
(u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
#define u64x4_permute_lanes(a, b, m) \
a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
}
+static_always_inline u8x32
+u8x32_splat_u8x16 (u8x16 a)
+{
+ return (u8x32) _mm256_broadcastsi128_si256 ((__m128i) a);
+}
+
+static_always_inline u32x8
+u32x8_splat_u32x4 (u32x4 a)
+{
+ return (u32x8) _mm256_broadcastsi128_si256 ((__m128i) a);
+}
+
+static_always_inline u64x4
+u64x4_splat_u64x2 (u64x2 a)
+{
+ return (u64x4) _mm256_broadcastsi128_si256 ((__m128i) a);
+}
+
+static_always_inline u8x32
+u8x32_load_partial (u8 *data, uword n)
+{
+#if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
+ return u8x32_mask_load_zero (data, pow2_mask (n));
+#else
+ u8x32 r = {};
+ if (n > 16)
+ {
+ r = u8x32_insert_lo (r, *(u8x16u *) data);
+ r = u8x32_insert_hi (r, u8x16_load_partial (data + 16, n - 16));
+ }
+ else
+ r = u8x32_insert_lo (r, u8x16_load_partial (data, n));
+ return r;
+#endif
+}
+
+static_always_inline void
+u8x32_store_partial (u8x32 r, u8 *data, uword n)
+{
+#if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
+ u8x32_mask_store (r, data, pow2_mask (n));
+#else
+ if (n > 16)
+ {
+ *(u8x16u *) data = u8x32_extract_lo (r);
+ u8x16_store_partial (u8x32_extract_hi (r), data + 16, n - 16);
+ }
+ else
+ u8x16_store_partial (u8x32_extract_lo (r), data, n);
+#endif
+}
+
#endif /* included_vector_avx2_h */
/*