X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fvector_avx2.h;h=0511ec7e4e22bc40e11bf3a7944b4663e8db14af;hb=9fefa89169d296bff41a815fbefed2b41b1a4bb8;hp=c857ad49c0dfe4d5f870afb4a3ea0ca196cbc187;hpb=9f7e33d60c1a807175c03028493c18be50d26e06;p=vpp.git diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h index c857ad49c0d..0511ec7e4e2 100644 --- a/src/vppinfra/vector_avx2.h +++ b/src/vppinfra/vector_avx2.h @@ -132,6 +132,26 @@ _(i8x16, i64x4, epi8_epi64) #undef _ /* *INDENT-ON* */ +static_always_inline u64x4 +u64x4_byte_swap (u64x4 v) +{ + u8x32 swap = { + 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8, + }; + return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap); +} + +static_always_inline u32x8 +u32x8_byte_swap (u32x8 v) +{ + u8x32 swap = { + 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12, + 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 + }; + return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap); +} + static_always_inline u16x16 u16x16_byte_swap (u16x16 v) { @@ -142,6 +162,23 @@ u16x16_byte_swap (u16x16 v) return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap); } +static_always_inline u8x32 +u8x32_shuffle (u8x32 v, u8x32 m) +{ + return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m); +} + +#define u8x32_align_right(a, b, imm) \ + (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm) + +static_always_inline u32 +u32x8_sum_elts (u32x8 sum8) +{ + sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8); + sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4); + return sum8[0] + sum8[4]; +} + static_always_inline u32x8 u32x8_hadd (u32x8 v1, u32x8 v2) { @@ -176,6 +213,14 @@ u16x16_mask_last (u16x16 v, u8 n_last) return v & masks[16 - n_last]; } +#ifdef __AVX512F__ +static_always_inline u8x32 +u8x32_mask_load (u8x32 a, void *p, u32 mask) +{ + return (u8x32) _mm256_mask_loadu_epi8 ((__m256i) a, mask, p); +} +#endif + static_always_inline f32x8 f32x8_from_u32x8 (u32x8 v) { @@ -188,6 +233,9 @@ u32x8_from_f32x8 (f32x8 v) return (u32x8) _mm256_cvttps_epi32 ((__m256) v); } +#define u32x8_blend(a,b,m) \ + (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m) + #define u16x16_blend(v1, v2, mask) \ (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask) @@ -265,6 +313,19 @@ u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask) #define u64x4_permute_lanes(a, b, m) \ (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m) +static_always_inline u32x8 +u32x8_min (u32x8 a, u32x8 b) +{ + return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b); +} + +static_always_inline u32 +u32x8_min_scalar (u32x8 v) +{ + return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v), + u32x8_extract_hi (v))); +} + static_always_inline void u32x8_transpose (u32x8 a[8]) {