/* Arithmetic */
#define u16x8_sub_saturate(a,b) vsubq_u16(a,b)
#define i16x8_sub_saturate(a,b) vsubq_s16(a,b)
+/* Dummy. Aid making uniform macros */
+#define vreinterpretq_u8_u8(a) a
+/* Implement the missing intrinsics to make uniform macros */
+#define vminvq_u64(x) \
+({ \
+ u64 x0 = vgetq_lane_u64(x, 0); \
+ u64 x1 = vgetq_lane_u64(x, 1); \
+ x0 < x1 ? x0 : x1; \
+})
/* Converts all ones/zeros compare mask to bitmap. */
always_inline u32
-u8x16_compare_byte_mask (u8x16 x)
+u8x16_compare_byte_mask (u8x16 v)
{
- uint8x16_t mask_shift =
- { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
- uint8x16_t mask_and = vdupq_n_u8 (0x80);
- x = vandq_u8 (x, mask_and);
- x = vshlq_u8 (x, vreinterpretq_s8_u8 (mask_shift));
- x = vpaddq_u8 (x, x);
- x = vpaddq_u8 (x, x);
- x = vpaddq_u8 (x, x);
- return vgetq_lane_u8 (x, 0) | (vgetq_lane_u8 (x, 1) << 8);
-}
-
-always_inline u32
-u16x8_zero_byte_mask (u16x8 input)
-{
- u8x16 vall_one = vdupq_n_u8 (0x0);
- u8x16 res_values = { 0x01, 0x02, 0x04, 0x08,
- 0x10, 0x20, 0x40, 0x80,
- 0x01, 0x02, 0x04, 0x08,
- 0x10, 0x20, 0x40, 0x80
+ uint8x16_t mask = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
};
-
- /* input --> [0x80, 0x40, 0x01, 0xf0, ... ] */
- u8x16 test_result =
- vreinterpretq_u8_u16 (vceqq_u16 (input, vreinterpretq_u16_u8 (vall_one)));
- u8x16 before_merge = vminq_u8 (test_result, res_values);
- /*before_merge--> [0x80, 0x00, 0x00, 0x10, ... ] */
- /* u8x16 --> [a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p] */
- /* pair add until we have 2 uint64_t */
- u16x8 merge1 = vpaddlq_u8 (before_merge);
- /* u16x8--> [a+b,c+d, e+f,g+h, i+j,k+l, m+n,o+p] */
- u32x4 merge2 = vpaddlq_u16 (merge1);
- /* u32x4--> [a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p] */
- u64x2 merge3 = vpaddlq_u32 (merge2);
- /* u64x2--> [a+b+c+d+e+f+g+h, i+j+k+l+m+n+o+p] */
- return (u32) (vgetq_lane_u64 (merge3, 1) << 8) + vgetq_lane_u64 (merge3, 0);
-}
-
-always_inline u32
-u8x16_zero_byte_mask (u8x16 input)
-{
- return u16x8_zero_byte_mask ((u16x8) input);
-}
-
-always_inline u32
-u32x4_zero_byte_mask (u32x4 input)
-{
- return u16x8_zero_byte_mask ((u16x8) input);
-}
-
-always_inline u32
-u64x2_zero_byte_mask (u64x2 input)
-{
- return u16x8_zero_byte_mask ((u16x8) input);
+ /* v --> [0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x00, ... ] */
+ uint8x16_t x = vandq_u8 (v, mask);
+ /* after v & mask,
+ * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
+ uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
+ /* after merge, x64 --> [0x5D, 0x.. ] */
+ return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
}
/* *INDENT-OFF* */
\
static_always_inline int \
t##s##x##c##_is_all_zero (t##s##x##c x) \
-{ return !(vaddvq_##i (x)); } \
+{ return !!(vminvq_u##s (vceqq_##i (vdupq_n_##i(0), x))); } \
\
static_always_inline int \
t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
-{ return t##s##x##c##_is_all_zero (a ^ b); } \
+{ return !!(vminvq_u##s (vceqq_##i (a, b))); } \
\
static_always_inline int \
t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \
+\
+static_always_inline u32 \
+t##s##x##c##_zero_byte_mask (t##s##x##c x) \
+{ uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i(0), x)); \
+ return u8x16_compare_byte_mask (v); } \
+\
+static_always_inline u##s##x##c \
+t##s##x##c##_is_greater (t##s##x##c a, t##s##x##c b) \
+{ return (u##s##x##c) vcgtq_##i (a, b); } \
+\
+static_always_inline t##s##x##c \
+t##s##x##c##_blend (t##s##x##c dst, t##s##x##c src, u##s##x##c mask) \
+{ return (t##s##x##c) vbslq_##i (mask, src, dst); }
foreach_neon_vec128i foreach_neon_vec128u
return (u16x8) vrev16q_u8 ((u8x16) v);
}
+static_always_inline u32x4
+u32x4_byte_swap (u32x4 v)
+{
+ return (u32x4) vrev32q_u8 ((u8x16) v);
+}
+
static_always_inline u8x16
u8x16_shuffle (u8x16 v, u8x16 m)
{
}
static_always_inline u64x2
-u32x4_extend_to_u64x2 (u32x4 v)
+u64x2_from_u32x4 (u32x4 v)
{
return vmovl_u32 (vget_low_u32 (v));
}
static_always_inline u64x2
-u32x4_extend_to_u64x2_high (u32x4 v)
+u64x2_from_u32x4_high (u32x4 v)
+{
+ return vmovl_high_u32 (v);
+}
+
+/* Creates a mask made up of the MSB of each byte of the source vector */
+static_always_inline u16
+u8x16_msb_mask (u8x16 v)
+{
+ int8x16_t shift =
+ { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
+ /* v --> [0x80, 0x7F, 0xF0, 0xAF, 0xF0, 0x00, 0xF2, 0x00, ... ] */
+ uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift);
+ /* after (v & 0x80) >> shift,
+ * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
+ uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
+ /* after merge, x64 --> [0x5D, 0x.. ] */
+ return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
+}
+
+static_always_inline u64x2
+u64x2_gather (void *p0, void *p1)
+{
+ u64x2 r = vdupq_n_u64 (*(u64 *) p0);
+ r = vsetq_lane_u64 (*(u64 *) p1, r, 1);
+ return r;
+}
+
+static_always_inline u32x4
+u32x4_gather (void *p0, void *p1, void *p2, void *p3)
+{
+ u32x4 r = vdupq_n_u32 (*(u32 *) p0);
+ r = vsetq_lane_u32 (*(u32 *) p1, r, 1);
+ r = vsetq_lane_u32 (*(u32 *) p2, r, 2);
+ r = vsetq_lane_u32 (*(u32 *) p3, r, 3);
+ return r;
+}
+
+static_always_inline void
+u64x2_scatter (u64x2 r, void *p0, void *p1)
+{
+ *(u64 *) p0 = vgetq_lane_u64 (r, 0);
+ *(u64 *) p1 = vgetq_lane_u64 (r, 1);
+}
+
+static_always_inline void
+u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
{
- return vmovl_high_u32 (vrev64q_u32 (v));
+ *(u32 *) p0 = vgetq_lane_u32 (r, 0);
+ *(u32 *) p1 = vgetq_lane_u32 (r, 1);
+ *(u32 *) p2 = vgetq_lane_u32 (r, 2);
+ *(u32 *) p3 = vgetq_lane_u32 (r, 3);
}
+static_always_inline u32
+u32x4_min_scalar (u32x4 v)
+{
+ return vminvq_u32 (v);
+}
+
+#define u8x16_word_shift_left(x,n) vextq_u8(u8x16_splat (0), x, 16 - n)
+#define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n)
+
+static_always_inline u8x16
+u8x16_reflect (u8x16 v)
+{
+ u8x16 mask = {
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+ };
+ return (u8x16) vqtbl1q_u8 (v, mask);
+}
+
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1
+ u8x16 r;
+__asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):);
+ return r;
+#endif
+ return a ^ b ^ c;
+}
+
+#define CLIB_HAVE_VEC128_MSB_MASK
+
#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
#define CLIB_VEC128_SPLAT_DEFINED
#endif /* included_vector_neon_h */