2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_neon_h
17 #define included_vector_neon_h
20 /* Dummy. Aid making uniform macros */
21 #define vreinterpretq_u8_u8(a) a
22 /* Implement the missing intrinsics to make uniform macros */
23 #define vminvq_u64(x) \
25 u64 x0 = vgetq_lane_u64(x, 0); \
26 u64 x1 = vgetq_lane_u64(x, 1); \
30 /* Converts all ones/zeros compare mask to bitmap. */
32 u8x16_compare_byte_mask (u8x16 v)
34 uint8x16_t mask = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
35 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
37 /* v --> [0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x00, ... ] */
38 uint8x16_t x = vandq_u8 (v, mask);
40 * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
41 uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
42 /* after merge, x64 --> [0x5D, 0x.. ] */
43 return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
46 #define foreach_neon_vec128i \
47 _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64)
48 #define foreach_neon_vec128u \
49 _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64)
50 #define foreach_neon_vec128f \
51 _(f,32,4,f32) _(f,64,2,f64)
53 #define _(t, s, c, i) \
54 static_always_inline t##s##x##c t##s##x##c##_splat (t##s x) \
56 return (t##s##x##c) vdupq_n_##i (x); \
59 static_always_inline t##s##x##c t##s##x##c##_load_unaligned (void *p) \
61 return (t##s##x##c) vld1q_##i (p); \
64 static_always_inline void t##s##x##c##_store_unaligned (t##s##x##c v, \
70 static_always_inline int t##s##x##c##_is_all_zero (t##s##x##c x) \
72 return !!(vminvq_u##s (vceqq_##i (vdupq_n_##i (0), x))); \
75 static_always_inline int t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
77 return !!(vminvq_u##s (vceqq_##i (a, b))); \
79 static_always_inline int t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
81 return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); \
84 static_always_inline u32 t##s##x##c##_zero_byte_mask (t##s##x##c x) \
86 uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i (0), x)); \
87 return u8x16_compare_byte_mask (v); \
90 static_always_inline t##s##x##c t##s##x##c##_add_saturate (t##s##x##c a, \
93 return (t##s##x##c) vqaddq_##i (a, b); \
96 static_always_inline t##s##x##c t##s##x##c##_sub_saturate (t##s##x##c a, \
99 return (t##s##x##c) vqsubq_##i (a, b); \
102 static_always_inline t##s##x##c t##s##x##c##_blend ( \
103 t##s##x##c dst, t##s##x##c src, u##s##x##c mask) \
105 return (t##s##x##c) vbslq_##i (mask, src, dst); \
108 foreach_neon_vec128i foreach_neon_vec128u
112 static_always_inline u16x8
113 u16x8_byte_swap (u16x8 v)
115 return (u16x8) vrev16q_u8 ((u8x16) v);
118 static_always_inline u32x4
119 u32x4_byte_swap (u32x4 v)
121 return (u32x4) vrev32q_u8 ((u8x16) v);
124 static_always_inline u32x4
125 u32x4_hadd (u32x4 v1, u32x4 v2)
127 return (u32x4) vpaddq_u32 (v1, v2);
130 static_always_inline u64x2
131 u64x2_from_u32x4 (u32x4 v)
133 return vmovl_u32 (vget_low_u32 (v));
136 static_always_inline u64x2
137 u64x2_from_u32x4_high (u32x4 v)
139 return vmovl_high_u32 (v);
142 /* Creates a mask made up of the MSB of each byte of the source vector */
143 static_always_inline u16
144 u8x16_msb_mask (u8x16 v)
147 { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
148 /* v --> [0x80, 0x7F, 0xF0, 0xAF, 0xF0, 0x00, 0xF2, 0x00, ... ] */
149 uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift);
150 /* after (v & 0x80) >> shift,
151 * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
152 uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
153 /* after merge, x64 --> [0x5D, 0x.. ] */
154 return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
157 static_always_inline u64x2
158 u64x2_gather (void *p0, void *p1)
160 u64x2 r = vdupq_n_u64 (*(u64 *) p0);
161 r = vsetq_lane_u64 (*(u64 *) p1, r, 1);
165 static_always_inline u32x4
166 u32x4_gather (void *p0, void *p1, void *p2, void *p3)
168 u32x4 r = vdupq_n_u32 (*(u32 *) p0);
169 r = vsetq_lane_u32 (*(u32 *) p1, r, 1);
170 r = vsetq_lane_u32 (*(u32 *) p2, r, 2);
171 r = vsetq_lane_u32 (*(u32 *) p3, r, 3);
175 static_always_inline void
176 u64x2_scatter (u64x2 r, void *p0, void *p1)
178 *(u64 *) p0 = vgetq_lane_u64 (r, 0);
179 *(u64 *) p1 = vgetq_lane_u64 (r, 1);
182 static_always_inline void
183 u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
185 *(u32 *) p0 = vgetq_lane_u32 (r, 0);
186 *(u32 *) p1 = vgetq_lane_u32 (r, 1);
187 *(u32 *) p2 = vgetq_lane_u32 (r, 2);
188 *(u32 *) p3 = vgetq_lane_u32 (r, 3);
191 static_always_inline u32
192 u32x4_min_scalar (u32x4 v)
194 return vminvq_u32 (v);
197 #define u8x16_word_shift_left(x,n) vextq_u8(u8x16_splat (0), x, 16 - n)
198 #define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n)
201 u32x4_interleave_hi (u32x4 a, u32x4 b)
203 return (u32x4) vzip2q_u32 (a, b);
207 u32x4_interleave_lo (u32x4 a, u32x4 b)
209 return (u32x4) vzip1q_u32 (a, b);
212 static_always_inline u8x16
213 u8x16_reflect (u8x16 v)
216 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
218 return (u8x16) vqtbl1q_u8 (v, mask);
221 static_always_inline u8x16
222 u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
224 #if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1
226 __asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):);
232 static_always_inline u8x16
233 u8x16_load_partial (u8 *data, uword n)
239 r[1] = *(u64u *) (data + n - 8);
241 r[0] = *(u64u *) data;
247 r[1] = *(u32u *) (data + n - 4);
249 r[0] = *(u32u *) data;
255 r[1] = *(u16u *) (data + n - 2);
257 r[0] = *(u16u *) data;
265 static_always_inline void
266 u8x16_store_partial (u8x16 r, u8 *data, uword n)
270 *(u64u *) (data + n - 8) = ((u64x2) r)[1] << ((16 - n) * 8);
271 *(u64u *) data = ((u64x2) r)[0];
275 *(u32u *) (data + n - 4) = ((u32x4) r)[1] << ((8 - n) * 8);
276 *(u32u *) data = ((u32x4) r)[0];
280 *(u16u *) (data + n - 2) = ((u16x8) r)[1] << ((4 - n) * 8);
281 *(u16u *) data = ((u16x8) r)[0];
287 #define CLIB_HAVE_VEC128_MSB_MASK
289 #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
290 #define CLIB_VEC128_SPLAT_DEFINED
291 #endif /* included_vector_neon_h */
294 * fd.io coding-style-patch-verification: ON
297 * eval: (c-set-style "gnu")