X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fvector_avx2.h;h=17271b8fcd0782edd165bc6398a8058a9b98bd5b;hb=f441b5d0e;hp=0511ec7e4e22bc40e11bf3a7944b4663e8db14af;hpb=94dbf952204e811c5ec7a39600c25f7b40387f45;p=vpp.git diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h index 0511ec7e4e2..17271b8fcd0 100644 --- a/src/vppinfra/vector_avx2.h +++ b/src/vppinfra/vector_avx2.h @@ -75,6 +75,10 @@ u32x8_permute (u32x8 v, u32x8 idx) return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx); } +#define u64x4_permute(v, m0, m1, m2, m3) \ + (u64x4) _mm256_permute4x64_epi64 ( \ + (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6)) + /* _extract_lo, _extract_hi */ /* *INDENT-OFF* */ #define _(t1,t2) \ @@ -101,8 +105,19 @@ _(u64x2, u64x4) #undef _ /* *INDENT-ON* */ +/* 256 bit packs. */ +#define _(f, t, fn) \ + always_inline t t##_pack (f lo, f hi) \ + { \ + return (t) fn ((__m256i) lo, (__m256i) hi); \ + } +_ (i16x16, i8x32, _mm256_packs_epi16) +_ (i16x16, u8x32, _mm256_packus_epi16) +_ (i32x8, i16x16, _mm256_packs_epi32) +_ (i32x8, u16x16, _mm256_packus_epi32) +#undef _ static_always_inline u32 u8x32_msb_mask (u8x32 v) @@ -110,23 +125,29 @@ u8x32_msb_mask (u8x32 v) return _mm256_movemask_epi8 ((__m256i) v); } -/* _extend_to_ */ +static_always_inline u32 +i8x32_msb_mask (i8x32 v) +{ + return _mm256_movemask_epi8 ((__m256i) v); +} + +/* _from_ */ /* *INDENT-OFF* */ #define _(f,t,i) \ static_always_inline t \ -f##_extend_to_##t (f x) \ +t##_from_##f (f x) \ { return (t) _mm256_cvt##i ((__m128i) x); } _(u16x8, u32x8, epu16_epi32) _(u16x8, u64x4, epu16_epi64) _(u32x4, u64x4, epu32_epi64) -_(u8x16, u16x16, epu8_epi64) +_ (u8x16, u16x16, epu8_epi16) _(u8x16, u32x8, epu8_epi32) _(u8x16, u64x4, epu8_epi64) _(i16x8, i32x8, epi16_epi32) _(i16x8, i64x4, epi16_epi64) _(i32x4, i64x4, epi32_epi64) -_(i8x16, i16x16, epi8_epi64) +_ (i8x16, i16x16, epi8_epi16) _(i8x16, i32x8, epi8_epi32) _(i8x16, i64x4, epi8_epi64) #undef _ @@ -162,15 +183,12 @@ u16x16_byte_swap (u16x16 v) return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap); } -static_always_inline u8x32 -u8x32_shuffle (u8x32 v, u8x32 m) -{ - return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m); -} - #define u8x32_align_right(a, b, imm) \ (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm) +#define u64x4_align_right(a, b, imm) \ + (u64x4) _mm256_alignr_epi64 ((__m256i) a, (__m256i) b, imm) + static_always_inline u32 u32x8_sum_elts (u32x8 sum8) { @@ -185,6 +203,36 @@ u32x8_hadd (u32x8 v1, u32x8 v2) return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2); } +static_always_inline u32 +u32x8_hxor (u32x8 v) +{ + u32x4 v4; + v4 = u32x8_extract_lo (v) ^ u32x8_extract_hi (v); + v4 ^= (u32x4) u8x16_align_right (v4, v4, 8); + v4 ^= (u32x4) u8x16_align_right (v4, v4, 4); + return v4[0]; +} + +static_always_inline u8x32 +u8x32_xor3 (u8x32 a, u8x32 b, u8x32 c) +{ +#if __AVX512F__ + return (u8x32) _mm256_ternarylogic_epi32 ((__m256i) a, (__m256i) b, + (__m256i) c, 0x96); +#endif + return a ^ b ^ c; +} + +static_always_inline u8x32 +u8x32_reflect_u8x16 (u8x32 x) +{ + static const u8x32 mask = { + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, + }; + return (u8x32) _mm256_shuffle_epi8 ((__m256i) x, (__m256i) mask); +} + static_always_inline u16x16 u16x16_mask_last (u16x16 v, u8 n_last) { @@ -213,14 +261,6 @@ u16x16_mask_last (u16x16 v, u8 n_last) return v & masks[16 - n_last]; } -#ifdef __AVX512F__ -static_always_inline u8x32 -u8x32_mask_load (u8x32 a, void *p, u32 mask) -{ - return (u8x32) _mm256_mask_loadu_epi8 ((__m256i) a, mask, p); -} -#endif - static_always_inline f32x8 f32x8_from_u32x8 (u32x8 v) { @@ -295,12 +335,6 @@ u32x8_scatter_one (u32x8 r, int index, void *p) *(u32 *) p = r[index]; } -static_always_inline u8x32 -u8x32_is_greater (u8x32 v1, u8x32 v2) -{ - return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2); -} - static_always_inline u8x32 u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask) { @@ -308,6 +342,11 @@ u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask) (__m256i) mask); } +#define u8x32_word_shift_left(a, n) \ + (u8x32) _mm256_bslli_epi128 ((__m256i) a, n) +#define u8x32_word_shift_right(a, n) \ + (u8x32) _mm256_bsrli_epi128 ((__m256i) a, n) + #define u32x8_permute_lanes(a, b, m) \ (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m) #define u64x4_permute_lanes(a, b, m) \ @@ -377,6 +416,52 @@ u64x4_transpose (u64x4 a[8]) a[3] = u64x4_permute_lanes (r[1], r[3], 0x31); } +static_always_inline u8x32 +u8x32_splat_u8x16 (u8x16 a) +{ + return (u8x32) _mm256_broadcastsi128_si256 ((__m128i) a); +} + +static_always_inline u32x8 +u32x8_splat_u32x4 (u32x4 a) +{ + return (u32x8) _mm256_broadcastsi128_si256 ((__m128i) a); +} + +static_always_inline u8x32 +u8x32_load_partial (u8 *data, uword n) +{ +#if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE) + return u8x32_mask_load_zero (data, pow2_mask (n)); +#else + u8x32 r = {}; + if (n > 16) + { + r = u8x32_insert_lo (r, *(u8x16u *) data); + r = u8x32_insert_hi (r, u8x16_load_partial (data + 16, n - 16)); + } + else + r = u8x32_insert_lo (r, u8x16_load_partial (data, n)); + return r; +#endif +} + +static_always_inline void +u8x32_store_partial (u8x32 r, u8 *data, uword n) +{ +#if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE) + u8x32_mask_store (r, data, pow2_mask (n)); +#else + if (n > 16) + { + *(u8x16u *) data = u8x32_extract_lo (r); + u8x16_store_partial (u8x32_extract_hi (r), data + 16, n - 16); + } + else + u8x16_store_partial (u8x32_extract_lo (r), data, n); +#endif +} + #endif /* included_vector_avx2_h */ /*