#define i16x8_sub_saturate(a,b) vsubq_s16(a,b)
/* Dummy. Aid making uniform macros */
#define vreinterpretq_u8_u8(a) a
+/* Implement the missing intrinsics to make uniform macros */
+#define vminvq_u64(x) \
+({ \
+ u64 x0 = vgetq_lane_u64(x, 0); \
+ u64 x1 = vgetq_lane_u64(x, 1); \
+ x0 < x1 ? x0 : x1; \
+})
/* Converts all ones/zeros compare mask to bitmap. */
always_inline u32
\
static_always_inline int \
t##s##x##c##_is_all_zero (t##s##x##c x) \
-{ return !(vaddvq_##i (x)); } \
+{ return !!(vminvq_u##s (vceqq_##i (vdupq_n_##i(0), x))); } \
\
static_always_inline int \
t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
-{ return t##s##x##c##_is_all_zero (a ^ b); } \
+{ return !!(vminvq_u##s (vceqq_##i (a, b))); } \
\
static_always_inline int \
t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
t##s##x##c##_zero_byte_mask (t##s##x##c x) \
{ uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i(0), x)); \
return u8x16_compare_byte_mask (v); } \
+\
+static_always_inline u##s##x##c \
+t##s##x##c##_is_greater (t##s##x##c a, t##s##x##c b) \
+{ return (u##s##x##c) vcgtq_##i (a, b); } \
+\
+static_always_inline t##s##x##c \
+t##s##x##c##_blend (t##s##x##c dst, t##s##x##c src, u##s##x##c mask) \
+{ return (t##s##x##c) vbslq_##i (mask, src, dst); }
foreach_neon_vec128i foreach_neon_vec128u
return (u16x8) vrev16q_u8 ((u8x16) v);
}
+static_always_inline u32x4
+u32x4_byte_swap (u32x4 v)
+{
+ return vrev64q_u32 (v);
+}
+
static_always_inline u8x16
u8x16_shuffle (u8x16 v, u8x16 m)
{
return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
}
+static_always_inline u64x2
+u64x2_gather (void *p0, void *p1)
+{
+ u64x2 r = vdupq_n_u64 (*(u64 *) p0);
+ r = vsetq_lane_u64 (*(u64 *) p1, r, 1);
+ return r;
+}
+
+static_always_inline u32x4
+u32x4_gather (void *p0, void *p1, void *p2, void *p3)
+{
+ u32x4 r = vdupq_n_u32 (*(u32 *) p0);
+ r = vsetq_lane_u32 (*(u32 *) p1, r, 1);
+ r = vsetq_lane_u32 (*(u32 *) p2, r, 2);
+ r = vsetq_lane_u32 (*(u32 *) p3, r, 3);
+ return r;
+}
+
+static_always_inline void
+u64x2_scatter (u64x2 r, void *p0, void *p1)
+{
+ *(u64 *) p0 = vgetq_lane_u64 (r, 0);
+ *(u64 *) p1 = vgetq_lane_u64 (r, 1);
+}
+
+static_always_inline void
+u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
+{
+ *(u32 *) p0 = vgetq_lane_u32 (r, 0);
+ *(u32 *) p1 = vgetq_lane_u32 (r, 1);
+ *(u32 *) p2 = vgetq_lane_u32 (r, 2);
+ *(u32 *) p3 = vgetq_lane_u32 (r, 3);
+}
+
+static_always_inline u32
+u32x4_min_scalar (u32x4 v)
+{
+ return vminvq_u32 (v);
+}
+
+#define u8x16_word_shift_left(x,n) vextq_u8(u8x16_splat (0), x, 16 - n)
+#define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n)
+
+static_always_inline u8x16
+u8x16_reflect (u8x16 v)
+{
+ u8x16 mask = {
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+ };
+ return (u8x16) vqtbl1q_u8 (v, mask);
+}
+
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1
+ u8x16 r;
+__asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):);
+ return r;
+#endif
+ return a ^ b ^ c;
+}
+
#define CLIB_HAVE_VEC128_MSB_MASK
#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE