return (u16x8) vrev16q_u8 ((u8x16) v);
}
+static_always_inline u32x4
+u32x4_byte_swap (u32x4 v)
+{
+ return (u32x4) vrev32q_u8 ((u8x16) v);
+}
+
static_always_inline u8x16
u8x16_shuffle (u8x16 v, u8x16 m)
{
}
static_always_inline u64x2
-u32x4_extend_to_u64x2 (u32x4 v)
+u64x2_from_u32x4 (u32x4 v)
{
return vmovl_u32 (vget_low_u32 (v));
}
static_always_inline u64x2
-u32x4_extend_to_u64x2_high (u32x4 v)
+u64x2_from_u32x4_high (u32x4 v)
{
return vmovl_high_u32 (v);
}
return vminvq_u32 (v);
}
-static_always_inline u8x16
-u8x16_word_shift_left (u8x16 x, const int n)
-{
- return vextq_u8 (u8x16_splat (0), x, 16 - n);
-}
-
-static_always_inline u8x16
-u8x16_word_shift_right (u8x16 x, const int n)
-{
- return vextq_u8 (x, u8x16_splat (0), n);
-}
+#define u8x16_word_shift_left(x,n) vextq_u8(u8x16_splat (0), x, 16 - n)
+#define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n)
static_always_inline u8x16
u8x16_reflect (u8x16 v)