X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fvector_avx2.h;h=482bdd515c9c0c306d7d6853360d10e04c1698ac;hb=2de9c0f92bb486072d8371a24d9b23fd85e1aa80;hp=f651392559badeed9248a514e5d83a9ac0beaa11;hpb=afe56de947822bb981bd30242f4e3c2c469f9ecc;p=vpp.git diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h index f651392559b..482bdd515c9 100644 --- a/src/vppinfra/vector_avx2.h +++ b/src/vppinfra/vector_avx2.h @@ -19,14 +19,18 @@ #include #include +/* *INDENT-OFF* */ #define foreach_avx2_vec256i \ - _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64x) + _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64) #define foreach_avx2_vec256u \ - _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64x) + _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64) #define foreach_avx2_vec256f \ _(f,32,8,ps) _(f,64,4,pd) -/* splat, load_unaligned, store_unaligned, is_all_zero, is_all_equal */ +#define _mm256_set1_epi64 _mm256_set1_epi64x + +/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal, + is_all_equal */ #define _(t, s, c, i) \ static_always_inline t##s##x##c \ t##s##x##c##_splat (t##s x) \ @@ -45,40 +49,65 @@ t##s##x##c##_is_all_zero (t##s##x##c x) \ { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \ \ static_always_inline int \ +t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \ +{ return t##s##x##c##_is_all_zero (a ^ b); } \ +\ +static_always_inline int \ t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \ -{ return t##s##x##c##_is_all_zero (v != t##s##x##c##_splat (x)); }; \ +{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \ +\ +static_always_inline t##s##x##c \ +t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \ +{ return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \ \ +static_always_inline t##s##x##c \ +t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \ +{ return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \ + foreach_avx2_vec256i foreach_avx2_vec256u #undef _ - always_inline u32x8 +/* *INDENT-ON* */ + +always_inline u32x8 u32x8_permute (u32x8 v, u32x8 idx) { return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx); } -always_inline u32x4 -u32x8_extract_lo (u32x8 v) -{ - return (u32x4) _mm256_extracti128_si256 ((__m256i) v, 0); -} +/* _extract_lo, _extract_hi */ +/* *INDENT-OFF* */ +#define _(t1,t2) \ +always_inline t1 \ +t2##_extract_lo (t2 v) \ +{ return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \ +\ +always_inline t1 \ +t2##_extract_hi (t2 v) \ +{ return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \ +\ +always_inline t2 \ +t2##_insert_lo (t2 v1, t1 v2) \ +{ return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\ +\ +always_inline t2 \ +t2##_insert_hi (t2 v1, t1 v2) \ +{ return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\ -always_inline u32x4 -u32x8_extract_hi (u32x8 v) -{ - return (u32x4) _mm256_extracti128_si256 ((__m256i) v, 1); -} +_(u8x16, u8x32) +_(u16x8, u16x16) +_(u32x4, u32x8) +_(u64x2, u64x4) +#undef _ +/* *INDENT-ON* */ -always_inline u32x8 -u32x8_insert_lo (u32x8 v1, u32x4 v2) -{ - return (u32x8) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); -} -always_inline u32x8 -u32x8_insert_hi (u32x8 v1, u32x4 v2) + + +static_always_inline u32 +u8x32_msb_mask (u8x32 v) { - return (u32x8) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); + return _mm256_movemask_epi8 ((__m256i) v); } /* _extend_to_ */ @@ -103,6 +132,216 @@ _(i8x16, i64x4, epi8_epi64) #undef _ /* *INDENT-ON* */ +static_always_inline u32x8 +u32x8_byte_swap (u32x8 v) +{ + u8x32 swap = { + 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12, + 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 + }; + return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap); +} + +static_always_inline u16x16 +u16x16_byte_swap (u16x16 v) +{ + u8x32 swap = { + 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14, + 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 + }; + return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap); +} + +static_always_inline u32x8 +u32x8_hadd (u32x8 v1, u32x8 v2) +{ + return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2); +} + +static_always_inline u16x16 +u16x16_mask_last (u16x16 v, u8 n_last) +{ + const u16x16 masks[17] = { + {0}, + {-1}, + {-1, -1}, + {-1, -1, -1}, + {-1, -1, -1, -1}, + {-1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + }; + + ASSERT (n_last < 17); + + return v & masks[16 - n_last]; +} + +static_always_inline f32x8 +f32x8_from_u32x8 (u32x8 v) +{ + return (f32x8) _mm256_cvtepi32_ps ((__m256i) v); +} + +static_always_inline u32x8 +u32x8_from_f32x8 (f32x8 v) +{ + return (u32x8) _mm256_cvttps_epi32 ((__m256) v); +} + +#define u32x8_blend(a,b,m) \ + (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m) + +#define u16x16_blend(v1, v2, mask) \ + (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask) + +static_always_inline u64x4 +u64x4_gather (void *p0, void *p1, void *p2, void *p3) +{ + u64x4 r = { + *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3 + }; + return r; +} + +static_always_inline u32x8 +u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5, + void *p6, void *p7) +{ + u32x8 r = { + *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3, + *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7, + }; + return r; +} + + +static_always_inline void +u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3) +{ + *(u64 *) p0 = r[0]; + *(u64 *) p1 = r[1]; + *(u64 *) p2 = r[2]; + *(u64 *) p3 = r[3]; +} + +static_always_inline void +u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4, + void *p5, void *p6, void *p7) +{ + *(u32 *) p0 = r[0]; + *(u32 *) p1 = r[1]; + *(u32 *) p2 = r[2]; + *(u32 *) p3 = r[3]; + *(u32 *) p4 = r[4]; + *(u32 *) p5 = r[5]; + *(u32 *) p6 = r[6]; + *(u32 *) p7 = r[7]; +} + +static_always_inline void +u64x4_scatter_one (u64x4 r, int index, void *p) +{ + *(u64 *) p = r[index]; +} + +static_always_inline void +u32x8_scatter_one (u32x8 r, int index, void *p) +{ + *(u32 *) p = r[index]; +} + +static_always_inline u8x32 +u8x32_is_greater (u8x32 v1, u8x32 v2) +{ + return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2); +} + +static_always_inline u8x32 +u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask) +{ + return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2, + (__m256i) mask); +} + +#define u32x8_permute_lanes(a, b, m) \ + (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m) +#define u64x4_permute_lanes(a, b, m) \ + (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m) + +static_always_inline u32x8 +u32x8_min (u32x8 a, u32x8 b) +{ + return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b); +} + +static_always_inline u32 +u32x8_min_scalar (u32x8 v) +{ + return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v), + u32x8_extract_hi (v))); +} + +static_always_inline void +u32x8_transpose (u32x8 a[8]) +{ + u64x4 r[8], x, y; + + r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]); + r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]); + r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]); + r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]); + r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]); + r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]); + r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]); + r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]); + + x = u64x4_interleave_lo (r[0], r[2]); + y = u64x4_interleave_lo (r[4], r[6]); + a[0] = u32x8_permute_lanes (x, y, 0x20); + a[4] = u32x8_permute_lanes (x, y, 0x31); + + x = u64x4_interleave_hi (r[0], r[2]); + y = u64x4_interleave_hi (r[4], r[6]); + a[1] = u32x8_permute_lanes (x, y, 0x20); + a[5] = u32x8_permute_lanes (x, y, 0x31); + + x = u64x4_interleave_lo (r[1], r[3]); + y = u64x4_interleave_lo (r[5], r[7]); + a[2] = u32x8_permute_lanes (x, y, 0x20); + a[6] = u32x8_permute_lanes (x, y, 0x31); + + x = u64x4_interleave_hi (r[1], r[3]); + y = u64x4_interleave_hi (r[5], r[7]); + a[3] = u32x8_permute_lanes (x, y, 0x20); + a[7] = u32x8_permute_lanes (x, y, 0x31); +} + +static_always_inline void +u64x4_transpose (u64x4 a[8]) +{ + u64x4 r[4]; + + r[0] = u64x4_interleave_lo (a[0], a[1]); + r[1] = u64x4_interleave_hi (a[0], a[1]); + r[2] = u64x4_interleave_lo (a[2], a[3]); + r[3] = u64x4_interleave_hi (a[2], a[3]); + + a[0] = u64x4_permute_lanes (r[0], r[2], 0x20); + a[1] = u64x4_permute_lanes (r[1], r[3], 0x20); + a[2] = u64x4_permute_lanes (r[0], r[2], 0x31); + a[3] = u64x4_permute_lanes (r[1], r[3], 0x31); +} + #endif /* included_vector_avx2_h */ /*