#define u16x8_sub_saturate(a,b) vsubq_u16(a,b)
#define i16x8_sub_saturate(a,b) vsubq_s16(a,b)
-always_inline int
-u8x16_is_all_zero (u8x16 x)
-{
- return !(vaddvq_u8 (x));
-}
-
-always_inline int
-u16x8_is_all_zero (u16x8 x)
-{
- return !(vaddvq_u16 (x));
-}
-
-always_inline int
-u32x4_is_all_zero (u32x4 x)
-{
- return !(vaddvq_u32 (x));
-}
-
-always_inline int
-u64x2_is_all_zero (u64x2 x)
-{
- return !(vaddvq_u64 (x));
-}
-
/* Converts all ones/zeros compare mask to bitmap. */
always_inline u32
u8x16_compare_byte_mask (u8x16 x)
{
- static int8_t const __attribute__ ((aligned (16))) xr[8] =
- {
- -7, -6, -5, -4, -3, -2, -1, 0};
- uint8x8_t mask_and = vdup_n_u8 (0x80);
- int8x8_t mask_shift = vld1_s8 (xr);
-
- uint8x8_t lo = vget_low_u8 (x);
- uint8x8_t hi = vget_high_u8 (x);
-
- lo = vand_u8 (lo, mask_and);
- lo = vshl_u8 (lo, mask_shift);
-
- hi = vand_u8 (hi, mask_and);
- hi = vshl_u8 (hi, mask_shift);
-
- lo = vpadd_u8 (lo, lo);
- lo = vpadd_u8 (lo, lo);
- lo = vpadd_u8 (lo, lo);
-
- hi = vpadd_u8 (hi, hi);
- hi = vpadd_u8 (hi, hi);
- hi = vpadd_u8 (hi, hi);
-
- return ((hi[0] << 8) | (lo[0] & 0xff));
+ uint8x16_t mask_shift =
+ { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
+ uint8x16_t mask_and = vdupq_n_u8 (0x80);
+ x = vandq_u8 (x, mask_and);
+ x = vshlq_u8 (x, vreinterpretq_s8_u8 (mask_shift));
+ x = vpaddq_u8 (x, x);
+ x = vpaddq_u8 (x, x);
+ x = vpaddq_u8 (x, x);
+ return vgetq_lane_u8 (x, 0) | (vgetq_lane_u8 (x, 1) << 8);
}
always_inline u32
return (u32) (vgetq_lane_u64 (merge3, 1) << 8) + vgetq_lane_u64 (merge3, 0);
}
+always_inline u32
+u8x16_zero_byte_mask (u8x16 input)
+{
+ return u16x8_zero_byte_mask ((u16x8) input);
+}
+
+always_inline u32
+u32x4_zero_byte_mask (u32x4 input)
+{
+ return u16x8_zero_byte_mask ((u16x8) input);
+}
+
+always_inline u32
+u64x2_zero_byte_mask (u64x2 input)
+{
+ return u16x8_zero_byte_mask ((u16x8) input);
+}
+
+/* *INDENT-OFF* */
+#define foreach_neon_vec128i \
+ _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64)
+#define foreach_neon_vec128u \
+ _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64)
+#define foreach_neon_vec128f \
+ _(f,32,4,f32) _(f,64,2,f64)
+
+#define _(t, s, c, i) \
+static_always_inline t##s##x##c \
+t##s##x##c##_splat (t##s x) \
+{ return (t##s##x##c) vdupq_n_##i (x); } \
+\
+static_always_inline t##s##x##c \
+t##s##x##c##_load_unaligned (void *p) \
+{ return (t##s##x##c) vld1q_##i (p); } \
+\
+static_always_inline void \
+t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
+{ vst1q_##i (p, v); } \
+\
+static_always_inline int \
+t##s##x##c##_is_all_zero (t##s##x##c x) \
+{ return !(vaddvq_##i (x)); } \
+\
+static_always_inline int \
+t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
+{ return t##s##x##c##_is_all_zero (a ^ b); } \
+\
+static_always_inline int \
+t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
+{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \
+
+foreach_neon_vec128i foreach_neon_vec128u
+
+#undef _
+/* *INDENT-ON* */
+
+static_always_inline u16x8
+u16x8_byte_swap (u16x8 v)
+{
+ return (u16x8) vrev16q_u8 ((u8x16) v);
+}
+
+static_always_inline u8x16
+u8x16_shuffle (u8x16 v, u8x16 m)
+{
+ return (u8x16) vqtbl1q_u8 (v, m);
+}
+
+static_always_inline u32x4
+u32x4_hadd (u32x4 v1, u32x4 v2)
+{
+ return (u32x4) vpaddq_u32 (v1, v2);
+}
+
+static_always_inline u64x2
+u32x4_extend_to_u64x2 (u32x4 v)
+{
+ return vmovl_u32 (vget_low_u32 (v));
+}
+
+static_always_inline u64x2
+u32x4_extend_to_u64x2_high (u32x4 v)
+{
+ return vmovl_high_u32 (v);
+}
+
+#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
+#define CLIB_VEC128_SPLAT_DEFINED
#endif /* included_vector_neon_h */
/*