return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
}
+#define u64x4_permute(v, m0, m1, m2, m3) \
+ (u64x4) _mm256_permute4x64_epi64 ( \
+ (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6))
+
/* _extract_lo, _extract_hi */
/* *INDENT-OFF* */
#define _(t1,t2) \
#undef _
/* *INDENT-ON* */
+/* 256 bit packs. */
+#define _(f, t, fn) \
+ always_inline t t##_pack (f lo, f hi) \
+ { \
+ return (t) fn ((__m256i) lo, (__m256i) hi); \
+ }
+_ (i16x16, i8x32, _mm256_packs_epi16)
+_ (i16x16, u8x32, _mm256_packus_epi16)
+_ (i32x8, i16x16, _mm256_packs_epi32)
+_ (i32x8, u16x16, _mm256_packus_epi32)
+#undef _
static_always_inline u32
u8x32_msb_mask (u8x32 v)
return _mm256_movemask_epi8 ((__m256i) v);
}
+static_always_inline u32
+i8x32_msb_mask (i8x32 v)
+{
+ return _mm256_movemask_epi8 ((__m256i) v);
+}
+
/* _from_ */
/* *INDENT-OFF* */
#define _(f,t,i) \
return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
}
-static_always_inline u8x32
-u8x32_shuffle (u8x32 v, u8x32 m)
-{
- return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m);
-}
-
#define u8x32_align_right(a, b, imm) \
(u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
+#define u64x4_align_right(a, b, imm) \
+ (u64x4) _mm256_alignr_epi64 ((__m256i) a, (__m256i) b, imm)
+
static_always_inline u32
u32x8_sum_elts (u32x8 sum8)
{
return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
}
+static_always_inline u32
+u32x8_hxor (u32x8 v)
+{
+ u32x4 v4;
+ v4 = u32x8_extract_lo (v) ^ u32x8_extract_hi (v);
+ v4 ^= (u32x4) u8x16_align_right (v4, v4, 8);
+ v4 ^= (u32x4) u8x16_align_right (v4, v4, 4);
+ return v4[0];
+}
+
static_always_inline u16x16
u16x16_mask_last (u16x16 v, u8 n_last)
{
*(u32 *) p = r[index];
}
-static_always_inline u8x32
-u8x32_is_greater (u8x32 v1, u8x32 v2)
-{
- return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
-}
-
static_always_inline u8x32
u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
{