X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fvector_avx2.h;h=f38a3bdae73cc1627ae5093a5daafd8e90311f7b;hb=a8c720e301f3576506b2b284fe925b055398b638;hp=8cc1d77d63cc8e4fd1cb15a7635b22ef374e9ec8;hpb=dd648aac0615c416507de9097b6f50db16ad319c;p=vpp.git diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h index 8cc1d77d63c..f38a3bdae73 100644 --- a/src/vppinfra/vector_avx2.h +++ b/src/vppinfra/vector_avx2.h @@ -75,6 +75,10 @@ u32x8_permute (u32x8 v, u32x8 idx) return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx); } +#define u64x4_permute(v, m0, m1, m2, m3) \ + (u64x4) _mm256_permute4x64_epi64 ( \ + (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6)) + /* _extract_lo, _extract_hi */ /* *INDENT-OFF* */ #define _(t1,t2) \ @@ -101,8 +105,19 @@ _(u64x2, u64x4) #undef _ /* *INDENT-ON* */ +/* 256 bit packs. */ +#define _(f, t, fn) \ + always_inline t t##_pack (f lo, f hi) \ + { \ + return (t) fn ((__m256i) lo, (__m256i) hi); \ + } +_ (i16x16, i8x32, _mm256_packs_epi16) +_ (i16x16, u8x32, _mm256_packus_epi16) +_ (i32x8, i16x16, _mm256_packs_epi32) +_ (i32x8, u16x16, _mm256_packus_epi32) +#undef _ static_always_inline u32 u8x32_msb_mask (u8x32 v) @@ -110,23 +125,29 @@ u8x32_msb_mask (u8x32 v) return _mm256_movemask_epi8 ((__m256i) v); } -/* _extend_to_ */ +static_always_inline u32 +i8x32_msb_mask (i8x32 v) +{ + return _mm256_movemask_epi8 ((__m256i) v); +} + +/* _from_ */ /* *INDENT-OFF* */ #define _(f,t,i) \ static_always_inline t \ -f##_extend_to_##t (f x) \ +t##_from_##f (f x) \ { return (t) _mm256_cvt##i ((__m128i) x); } _(u16x8, u32x8, epu16_epi32) _(u16x8, u64x4, epu16_epi64) _(u32x4, u64x4, epu32_epi64) -_(u8x16, u16x16, epu8_epi64) +_ (u8x16, u16x16, epu8_epi16) _(u8x16, u32x8, epu8_epi32) _(u8x16, u64x4, epu8_epi64) _(i16x8, i32x8, epi16_epi32) _(i16x8, i64x4, epi16_epi64) _(i32x4, i64x4, epi32_epi64) -_(i8x16, i16x16, epi8_epi64) +_ (i8x16, i16x16, epi8_epi16) _(i8x16, i32x8, epi8_epi32) _(i8x16, i64x4, epi8_epi64) #undef _ @@ -162,6 +183,23 @@ u16x16_byte_swap (u16x16 v) return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap); } +static_always_inline u8x32 +u8x32_shuffle (u8x32 v, u8x32 m) +{ + return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m); +} + +#define u8x32_align_right(a, b, imm) \ + (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm) + +static_always_inline u32 +u32x8_sum_elts (u32x8 sum8) +{ + sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8); + sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4); + return sum8[0] + sum8[4]; +} + static_always_inline u32x8 u32x8_hadd (u32x8 v1, u32x8 v2) {