+static_always_inline u64x2
+u64x2_gather (void *p0, void *p1)
+{
+ u64x2 r = vdupq_n_u64 (*(u64 *) p0);
+ r = vsetq_lane_u64 (*(u64 *) p1, r, 1);
+ return r;
+}
+
+static_always_inline u32x4
+u32x4_gather (void *p0, void *p1, void *p2, void *p3)
+{
+ u32x4 r = vdupq_n_u32 (*(u32 *) p0);
+ r = vsetq_lane_u32 (*(u32 *) p1, r, 1);
+ r = vsetq_lane_u32 (*(u32 *) p2, r, 2);
+ r = vsetq_lane_u32 (*(u32 *) p3, r, 3);
+ return r;
+}
+
+static_always_inline void
+u64x2_scatter (u64x2 r, void *p0, void *p1)
+{
+ *(u64 *) p0 = vgetq_lane_u64 (r, 0);
+ *(u64 *) p1 = vgetq_lane_u64 (r, 1);
+}
+
+static_always_inline void
+u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
+{
+ *(u32 *) p0 = vgetq_lane_u32 (r, 0);
+ *(u32 *) p1 = vgetq_lane_u32 (r, 1);
+ *(u32 *) p2 = vgetq_lane_u32 (r, 2);
+ *(u32 *) p3 = vgetq_lane_u32 (r, 3);
+}
+
+static_always_inline u32
+u32x4_min_scalar (u32x4 v)
+{
+ return vminvq_u32 (v);
+}
+
+static_always_inline u8x16
+u8x16_word_shift_left (u8x16 x, const int n)
+{
+ return vextq_u8 (u8x16_splat (0), x, 16 - n);
+}
+
+static_always_inline u8x16
+u8x16_word_shift_right (u8x16 x, const int n)
+{
+ return vextq_u8 (x, u8x16_splat (0), n);
+}
+
+static_always_inline u8x16
+u8x16_reflect (u8x16 v)
+{
+ u8x16 mask = {
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+ };
+ return (u8x16) vqtbl1q_u8 (v, mask);
+}
+
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1
+ u8x16 r;
+__asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):);
+ return r;
+#endif
+ return a ^ b ^ c;
+}
+