+static_always_inline u32x4
+u32x4_hadd (u32x4 v1, u32x4 v2)
+{
+ return (u32x4) vpaddq_u32 (v1, v2);
+}
+
+static_always_inline u64x2
+u32x4_extend_to_u64x2 (u32x4 v)
+{
+ return vmovl_u32 (vget_low_u32 (v));
+}
+
+static_always_inline u64x2
+u32x4_extend_to_u64x2_high (u32x4 v)
+{
+ return vmovl_high_u32 (v);
+}
+
+/* Creates a mask made up of the MSB of each byte of the source vector */
+static_always_inline u16
+u8x16_msb_mask (u8x16 v)
+{
+ int8x16_t shift =
+ { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
+ /* v --> [0x80, 0x7F, 0xF0, 0xAF, 0xF0, 0x00, 0xF2, 0x00, ... ] */
+ uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift);
+ /* after (v & 0x80) >> shift,
+ * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
+ uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
+ /* after merge, x64 --> [0x5D, 0x.. ] */
+ return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
+}
+
+static_always_inline u64x2
+u64x2_gather (void *p0, void *p1)
+{
+ u64x2 r = vdupq_n_u64 (*(u64 *) p0);
+ r = vsetq_lane_u64 (*(u64 *) p1, r, 1);
+ return r;
+}
+
+static_always_inline u32x4
+u32x4_gather (void *p0, void *p1, void *p2, void *p3)