nat: Include platform specific headers on FreeBSD
[vpp.git] / src / vppinfra / vector_neon.h
index 4e911ac..48644dd 100644 (file)
@@ -17,9 +17,6 @@
 #define included_vector_neon_h
 #include <arm_neon.h>
 
-/* Arithmetic */
-#define u16x8_sub_saturate(a,b) vsubq_u16(a,b)
-#define i16x8_sub_saturate(a,b) vsubq_s16(a,b)
 /* Dummy. Aid making uniform macros */
 #define vreinterpretq_u8_u8(a)  a
 /* Implement the missing intrinsics to make uniform macros */
@@ -46,7 +43,6 @@ u8x16_compare_byte_mask (u8x16 v)
   return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
 }
 
-/* *INDENT-OFF* */
 #define foreach_neon_vec128i \
   _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32)  _(i,64,2,s64)
 #define foreach_neon_vec128u \
@@ -54,48 +50,64 @@ u8x16_compare_byte_mask (u8x16 v)
 #define foreach_neon_vec128f \
   _(f,32,4,f32) _(f,64,2,f64)
 
-#define _(t, s, c, i) \
-static_always_inline t##s##x##c                                                \
-t##s##x##c##_splat (t##s x)                                            \
-{ return (t##s##x##c) vdupq_n_##i (x); }                               \
-\
-static_always_inline t##s##x##c                                                \
-t##s##x##c##_load_unaligned (void *p)                                  \
-{ return (t##s##x##c) vld1q_##i (p); }                                 \
-\
-static_always_inline void                                              \
-t##s##x##c##_store_unaligned (t##s##x##c v, void *p)                   \
-{ vst1q_##i (p, v); }                                                  \
-\
-static_always_inline int                                               \
-t##s##x##c##_is_all_zero (t##s##x##c x)                                        \
-{ return !!(vminvq_u##s (vceqq_##i (vdupq_n_##i(0), x))); }                                            \
-\
-static_always_inline int                                               \
-t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b)                     \
-{ return !!(vminvq_u##s (vceqq_##i (a, b))); }                         \
-\
-static_always_inline int                                               \
-t##s##x##c##_is_all_equal (t##s##x##c v, t##s x)                       \
-{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); };         \
-\
-static_always_inline u32                                               \
-t##s##x##c##_zero_byte_mask (t##s##x##c x)                     \
-{ uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i(0), x));  \
-  return u8x16_compare_byte_mask (v); } \
-\
-static_always_inline u##s##x##c                                                \
-t##s##x##c##_is_greater (t##s##x##c a, t##s##x##c b)                   \
-{ return (u##s##x##c) vcgtq_##i (a, b); }                              \
-\
-static_always_inline t##s##x##c                                                \
-t##s##x##c##_blend (t##s##x##c dst, t##s##x##c src, u##s##x##c mask)   \
-{ return (t##s##x##c) vbslq_##i (mask, src, dst); }
+#define _(t, s, c, i)                                                         \
+  static_always_inline t##s##x##c t##s##x##c##_splat (t##s x)                 \
+  {                                                                           \
+    return (t##s##x##c) vdupq_n_##i (x);                                      \
+  }                                                                           \
+                                                                              \
+  static_always_inline t##s##x##c t##s##x##c##_load_unaligned (void *p)       \
+  {                                                                           \
+    return (t##s##x##c) vld1q_##i (p);                                        \
+  }                                                                           \
+                                                                              \
+  static_always_inline void t##s##x##c##_store_unaligned (t##s##x##c v,       \
+                                                         void *p)            \
+  {                                                                           \
+    vst1q_##i (p, v);                                                         \
+  }                                                                           \
+                                                                              \
+  static_always_inline int t##s##x##c##_is_all_zero (t##s##x##c x)            \
+  {                                                                           \
+    return !!(vminvq_u##s (vceqq_##i (vdupq_n_##i (0), x)));                  \
+  }                                                                           \
+                                                                              \
+  static_always_inline int t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
+  {                                                                           \
+    return !!(vminvq_u##s (vceqq_##i (a, b)));                                \
+  }                                                                           \
+  static_always_inline int t##s##x##c##_is_all_equal (t##s##x##c v, t##s x)   \
+  {                                                                           \
+    return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x));                 \
+  };                                                                          \
+                                                                              \
+  static_always_inline u32 t##s##x##c##_zero_byte_mask (t##s##x##c x)         \
+  {                                                                           \
+    uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i (0), x));    \
+    return u8x16_compare_byte_mask (v);                                       \
+  }                                                                           \
+                                                                              \
+  static_always_inline t##s##x##c t##s##x##c##_add_saturate (t##s##x##c a,    \
+                                                            t##s##x##c b)    \
+  {                                                                           \
+    return (t##s##x##c) vqaddq_##i (a, b);                                    \
+  }                                                                           \
+                                                                              \
+  static_always_inline t##s##x##c t##s##x##c##_sub_saturate (t##s##x##c a,    \
+                                                            t##s##x##c b)    \
+  {                                                                           \
+    return (t##s##x##c) vqsubq_##i (a, b);                                    \
+  }                                                                           \
+                                                                              \
+  static_always_inline t##s##x##c t##s##x##c##_blend (                        \
+    t##s##x##c dst, t##s##x##c src, u##s##x##c mask)                          \
+  {                                                                           \
+    return (t##s##x##c) vbslq_##i (mask, src, dst);                           \
+  }
 
 foreach_neon_vec128i foreach_neon_vec128u
 
 #undef _
-/* *INDENT-ON* */
 
 static_always_inline u16x8
 u16x8_byte_swap (u16x8 v)
@@ -103,10 +115,10 @@ u16x8_byte_swap (u16x8 v)
   return (u16x8) vrev16q_u8 ((u8x16) v);
 }
 
-static_always_inline u8x16
-u8x16_shuffle (u8x16 v, u8x16 m)
+static_always_inline u32x4
+u32x4_byte_swap (u32x4 v)
 {
-  return (u8x16) vqtbl1q_u8 (v, m);
+  return (u32x4) vrev32q_u8 ((u8x16) v);
 }
 
 static_always_inline u32x4
@@ -116,13 +128,13 @@ u32x4_hadd (u32x4 v1, u32x4 v2)
 }
 
 static_always_inline u64x2
-u32x4_extend_to_u64x2 (u32x4 v)
+u64x2_from_u32x4 (u32x4 v)
 {
   return vmovl_u32 (vget_low_u32 (v));
 }
 
 static_always_inline u64x2
-u32x4_extend_to_u64x2_high (u32x4 v)
+u64x2_from_u32x4_high (u32x4 v)
 {
   return vmovl_high_u32 (v);
 }
@@ -160,6 +172,118 @@ u32x4_gather (void *p0, void *p1, void *p2, void *p3)
   return r;
 }
 
+static_always_inline void
+u64x2_scatter (u64x2 r, void *p0, void *p1)
+{
+  *(u64 *) p0 = vgetq_lane_u64 (r, 0);
+  *(u64 *) p1 = vgetq_lane_u64 (r, 1);
+}
+
+static_always_inline void
+u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
+{
+  *(u32 *) p0 = vgetq_lane_u32 (r, 0);
+  *(u32 *) p1 = vgetq_lane_u32 (r, 1);
+  *(u32 *) p2 = vgetq_lane_u32 (r, 2);
+  *(u32 *) p3 = vgetq_lane_u32 (r, 3);
+}
+
+static_always_inline u32
+u32x4_min_scalar (u32x4 v)
+{
+  return vminvq_u32 (v);
+}
+
+#define u8x16_word_shift_left(x,n)  vextq_u8(u8x16_splat (0), x, 16 - n)
+#define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n)
+
+always_inline u32x4
+u32x4_interleave_hi (u32x4 a, u32x4 b)
+{
+  return (u32x4) vzip2q_u32 (a, b);
+}
+
+always_inline u32x4
+u32x4_interleave_lo (u32x4 a, u32x4 b)
+{
+  return (u32x4) vzip1q_u32 (a, b);
+}
+
+static_always_inline u8x16
+u8x16_reflect (u8x16 v)
+{
+  u8x16 mask = {
+    15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+  };
+  return (u8x16) vqtbl1q_u8 (v, mask);
+}
+
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1
+  u8x16 r;
+__asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):);
+  return r;
+#endif
+  return a ^ b ^ c;
+}
+
+static_always_inline u8x16
+u8x16_load_partial (u8 *data, uword n)
+{
+  u8x16 r = {};
+  if (n > 7)
+    {
+      u64x2 r;
+      r[1] = *(u64u *) (data + n - 8);
+      r >>= (16 - n) * 8;
+      r[0] = *(u64u *) data;
+      return (u8x16) r;
+    }
+  else if (n > 3)
+    {
+      u32x4 r = {};
+      r[1] = *(u32u *) (data + n - 4);
+      r >>= (8 - n) * 8;
+      r[0] = *(u32u *) data;
+      return (u8x16) r;
+    }
+  else if (n > 1)
+    {
+      u16x8 r = {};
+      r[1] = *(u16u *) (data + n - 2);
+      r >>= (4 - n) * 8;
+      r[0] = *(u16u *) data;
+      return (u8x16) r;
+    }
+  else if (n > 0)
+    r[0] = *data;
+  return r;
+}
+
+static_always_inline void
+u8x16_store_partial (u8x16 r, u8 *data, uword n)
+{
+  if (n > 7)
+    {
+      *(u64u *) (data + n - 8) = ((u64x2) r)[1] << ((16 - n) * 8);
+      *(u64u *) data = ((u64x2) r)[0];
+    }
+  else if (n > 3)
+    {
+      *(u32u *) (data + n - 4) = ((u32x4) r)[1] << ((8 - n) * 8);
+      *(u32u *) data = ((u32x4) r)[0];
+    }
+  else if (n > 1)
+    {
+      *(u16u *) (data + n - 2) = ((u16x8) r)[1] << ((4 - n) * 8);
+      *(u16u *) data = ((u16x8) r)[0];
+    }
+  else if (n > 0)
+    data[0] = r[0];
+}
+
 #define CLIB_HAVE_VEC128_MSB_MASK
 
 #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE