X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fvector_neon.h;h=aef836573215186ae5f79a76212dd15ac3e3689e;hb=ad9d528;hp=6357d68fabb7e33db1e895de5ecb56f6d98b3892;hpb=b81831d122d59b55c7d42b39e4fd9e94b7d40aca;p=vpp.git diff --git a/src/vppinfra/vector_neon.h b/src/vppinfra/vector_neon.h index 6357d68fabb..aef83657321 100644 --- a/src/vppinfra/vector_neon.h +++ b/src/vppinfra/vector_neon.h @@ -17,109 +17,120 @@ #define included_vector_neon_h #include -/* Splats. */ - -#define u8x16_splat(i) vdupq_n_u8(i) -#define u16x8_splat(i) vdupq_n_u16(i) -#define i16x8_splat(i) vdupq_n_s16(i) -#define u32x4_splat(i) vdupq_n_u32(i) -#define i32x4_splat(i) vdupq_n_s32(i) - /* Arithmetic */ -#define u16x8_add(a,b) vaddq_u16(a,b) -#define i16x8_add(a,b) vaddq_s16(a,b) #define u16x8_sub_saturate(a,b) vsubq_u16(a,b) #define i16x8_sub_saturate(a,b) vsubq_s16(a,b) +/* Dummy. Aid making uniform macros */ +#define vreinterpretq_u8_u8(a) a - -/* Compare operations. */ -#define u8x16_is_equal(a,b) vceqq_u8(a,b) -#define i8x16_is_equal(a,b) vceqq_s8(a,b) -#define u16x8_is_equal(a,b) vceqq_u16(a,b) -#define i16x8_is_equal(a,b) vceqq_i16(a,b) -#define u32x4_is_equal(a,b) vceqq_u32(a,b) -#define i32x4_is_equal(a,b) vceqq_s32(a,b) -#define i8x16_is_greater(a,b) vcgtq_s8(a,b) -#define i16x8_is_greater(a,b) vcgtq_u8(a,b) -#define i32x4_is_greater(a,b) vcgtq_s32(a,b) - -always_inline u8x16 -u8x16_is_zero (u8x16 x) +/* Converts all ones/zeros compare mask to bitmap. */ +always_inline u32 +u8x16_compare_byte_mask (u8x16 v) { - u8x16 zero = { 0 }; - return u8x16_is_equal (x, zero); + uint8x16_t mask = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 + }; + /* v --> [0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x00, ... ] */ + uint8x16_t x = vandq_u8 (v, mask); + /* after v & mask, + * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */ + uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x))); + /* after merge, x64 --> [0x5D, 0x.. ] */ + return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8)); } -always_inline u16x8 -u16x8_is_zero (u16x8 x) +/* *INDENT-OFF* */ +#define foreach_neon_vec128i \ + _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64) +#define foreach_neon_vec128u \ + _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64) +#define foreach_neon_vec128f \ + _(f,32,4,f32) _(f,64,2,f64) + +#define _(t, s, c, i) \ +static_always_inline t##s##x##c \ +t##s##x##c##_splat (t##s x) \ +{ return (t##s##x##c) vdupq_n_##i (x); } \ +\ +static_always_inline t##s##x##c \ +t##s##x##c##_load_unaligned (void *p) \ +{ return (t##s##x##c) vld1q_##i (p); } \ +\ +static_always_inline void \ +t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \ +{ vst1q_##i (p, v); } \ +\ +static_always_inline int \ +t##s##x##c##_is_all_zero (t##s##x##c x) \ +{ return !(vaddvq_##i (x)); } \ +\ +static_always_inline int \ +t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \ +{ return t##s##x##c##_is_all_zero (a ^ b); } \ +\ +static_always_inline int \ +t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \ +{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \ +\ +static_always_inline u32 \ +t##s##x##c##_zero_byte_mask (t##s##x##c x) \ +{ uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i(0), x)); \ + return u8x16_compare_byte_mask (v); } \ + +foreach_neon_vec128i foreach_neon_vec128u + +#undef _ +/* *INDENT-ON* */ + +static_always_inline u16x8 +u16x8_byte_swap (u16x8 v) { - u16x8 zero = { 0 }; - return u16x8_is_equal (x, zero); + return (u16x8) vrev16q_u8 ((u8x16) v); } -always_inline u32x4 -u32x4_is_zero (u32x4 x) +static_always_inline u8x16 +u8x16_shuffle (u8x16 v, u8x16 m) { - u32x4 zero = { 0 }; - return u32x4_is_equal (x, zero); + return (u8x16) vqtbl1q_u8 (v, m); } -/* Converts all ones/zeros compare mask to bitmap. */ -always_inline u32 -u8x16_compare_byte_mask (u8x16 x) +static_always_inline u32x4 +u32x4_hadd (u32x4 v1, u32x4 v2) { - static int8_t const __attribute__ ((aligned (16))) xr[8] = - { - -7, -6, -5, -4, -3, -2, -1, 0}; - uint8x8_t mask_and = vdup_n_u8 (0x80); - int8x8_t mask_shift = vld1_s8 (xr); - - uint8x8_t lo = vget_low_u8 (x); - uint8x8_t hi = vget_high_u8 (x); - - lo = vand_u8 (lo, mask_and); - lo = vshl_u8 (lo, mask_shift); - - hi = vand_u8 (hi, mask_and); - hi = vshl_u8 (hi, mask_shift); - - lo = vpadd_u8 (lo, lo); - lo = vpadd_u8 (lo, lo); - lo = vpadd_u8 (lo, lo); - - hi = vpadd_u8 (hi, hi); - hi = vpadd_u8 (hi, hi); - hi = vpadd_u8 (hi, hi); + return (u32x4) vpaddq_u32 (v1, v2); +} - return ((hi[0] << 8) | (lo[0] & 0xff)); +static_always_inline u64x2 +u32x4_extend_to_u64x2 (u32x4 v) +{ + return vmovl_u32 (vget_low_u32 (v)); } -always_inline u32 -u16x8_zero_byte_mask (u16x8 input) +static_always_inline u64x2 +u32x4_extend_to_u64x2_high (u32x4 v) { - u8x16 vall_one = vdupq_n_u8 (0x0); - u8x16 res_values = { 0x01, 0x02, 0x04, 0x08, - 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, - 0x10, 0x20, 0x40, 0x80 - }; + return vmovl_high_u32 (v); +} - /* input --> [0x80, 0x40, 0x01, 0xf0, ... ] */ - u8x16 test_result = - vreinterpretq_u8_u16 (vceqq_u16 (input, vreinterpretq_u16_u8 (vall_one))); - u8x16 before_merge = vminq_u8 (test_result, res_values); - /*before_merge--> [0x80, 0x00, 0x00, 0x10, ... ] */ - /* u8x16 --> [a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p] */ - /* pair add until we have 2 uint64_t */ - u16x8 merge1 = vpaddlq_u8 (before_merge); - /* u16x8--> [a+b,c+d, e+f,g+h, i+j,k+l, m+n,o+p] */ - u32x4 merge2 = vpaddlq_u16 (merge1); - /* u32x4--> [a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p] */ - u64x2 merge3 = vpaddlq_u32 (merge2); - /* u64x2--> [a+b+c+d+e+f+g+h, i+j+k+l+m+n+o+p] */ - return (u32) (vgetq_lane_u64 (merge3, 1) << 8) + vgetq_lane_u64 (merge3, 0); +/* Creates a mask made up of the MSB of each byte of the source vector */ +static_always_inline u16 +u8x16_msb_mask (u8x16 v) +{ + int8x16_t shift = + { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 }; + /* v --> [0x80, 0x7F, 0xF0, 0xAF, 0xF0, 0x00, 0xF2, 0x00, ... ] */ + uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift); + /* after (v & 0x80) >> shift, + * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */ + uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x))); + /* after merge, x64 --> [0x5D, 0x.. ] */ + return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8)); } +#define CLIB_HAVE_VEC128_MSB_MASK + +#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE +#define CLIB_VEC128_SPLAT_DEFINED #endif /* included_vector_neon_h */ /*