X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fvector_neon.h;h=70a7bc0f11ef3187e223d4a2d3a53ba55529ba03;hb=683bdb6743e0568bbf02c1f4953d3f73e6f2b544;hp=70b05c608843f059338753de1aef85618502a087;hpb=cec484f080c06fde820de3a6695592deab21725f;p=vpp.git diff --git a/src/vppinfra/vector_neon.h b/src/vppinfra/vector_neon.h index 70b05c60884..70a7bc0f11e 100644 --- a/src/vppinfra/vector_neon.h +++ b/src/vppinfra/vector_neon.h @@ -88,12 +88,6 @@ u8x16_compare_byte_mask (u8x16 v) return u8x16_compare_byte_mask (v); \ } \ \ - static_always_inline u##s##x##c t##s##x##c##_is_greater (t##s##x##c a, \ - t##s##x##c b) \ - { \ - return (u##s##x##c) vcgtq_##i (a, b); \ - } \ - \ static_always_inline t##s##x##c t##s##x##c##_add_saturate (t##s##x##c a, \ t##s##x##c b) \ { \ @@ -129,12 +123,6 @@ u32x4_byte_swap (u32x4 v) return (u32x4) vrev32q_u8 ((u8x16) v); } -static_always_inline u8x16 -u8x16_shuffle (u8x16 v, u8x16 m) -{ - return (u8x16) vqtbl1q_u8 (v, m); -} - static_always_inline u32x4 u32x4_hadd (u32x4 v1, u32x4 v2) { @@ -211,6 +199,18 @@ u32x4_min_scalar (u32x4 v) #define u8x16_word_shift_left(x,n) vextq_u8(u8x16_splat (0), x, 16 - n) #define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n) +always_inline u32x4 +u32x4_interleave_hi (u32x4 a, u32x4 b) +{ + return (u32x4) vzip2q_u32 (a, b); +} + +always_inline u32x4 +u32x4_interleave_lo (u32x4 a, u32x4 b) +{ + return (u32x4) vzip1q_u32 (a, b); +} + static_always_inline u8x16 u8x16_reflect (u8x16 v) { @@ -231,6 +231,61 @@ __asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c) return a ^ b ^ c; } +static_always_inline u8x16 +u8x16_load_partial (u8 *data, uword n) +{ + u8x16 r = {}; + if (n > 7) + { + u64x2 r; + r[1] = *(u64u *) (data + n - 8); + r >>= (16 - n) * 8; + r[0] = *(u64u *) data; + return (u8x16) r; + } + else if (n > 3) + { + u32x4 r = {}; + r[1] = *(u32u *) (data + n - 4); + r >>= (8 - n) * 8; + r[0] = *(u32u *) data; + return (u8x16) r; + } + else if (n > 1) + { + u16x8 r = {}; + r[1] = *(u16u *) (data + n - 2); + r >>= (4 - n) * 8; + r[0] = *(u16u *) data; + return (u8x16) r; + } + else if (n > 0) + r[0] = *data; + return r; +} + +static_always_inline void +u8x16_store_partial (u8x16 r, u8 *data, uword n) +{ + if (n > 7) + { + *(u64u *) (data + n - 8) = ((u64x2) r)[1] << ((16 - n) * 8); + *(u64u *) data = ((u64x2) r)[0]; + } + else if (n > 3) + { + *(u32u *) (data + n - 4) = ((u32x4) r)[1] << ((8 - n) * 8); + *(u32u *) data = ((u32x4) r)[0]; + } + else if (n > 1) + { + *(u16u *) (data + n - 2) = ((u16x8) r)[1] << ((4 - n) * 8); + *(u16u *) data = ((u16x8) r)[0]; + } + else if (n > 0) + data[0] = r[0]; +} + #define CLIB_HAVE_VEC128_MSB_MASK #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE