return (u16x8) vrev16q_u8 ((u8x16) v);
}
+static_always_inline u32x4
+u32x4_byte_swap (u32x4 v)
+{
+ return vrev64q_u32 (v);
+}
+
static_always_inline u8x16
u8x16_shuffle (u8x16 v, u8x16 m)
{
return vminvq_u32 (v);
}
-static_always_inline u8x16
-u8x16_word_shift_left (u8x16 x, const int n)
-{
- return vextq_u8 (u8x16_splat (0), x, 16 - n);
-}
-
-static_always_inline u8x16
-u8x16_word_shift_right (u8x16 x, const int n)
-{
- return vextq_u8 (x, u8x16_splat (0), n);
-}
+#define u8x16_word_shift_left(x,n) vextq_u8(u8x16_splat (0), x, 16 - n)
+#define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n)
static_always_inline u8x16
u8x16_reflect (u8x16 v)