2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_neon_h
17 #define included_vector_neon_h
21 #define u16x8_sub_saturate(a,b) vsubq_u16(a,b)
22 #define i16x8_sub_saturate(a,b) vsubq_s16(a,b)
24 /* Converts all ones/zeros compare mask to bitmap. */
26 u8x16_compare_byte_mask (u8x16 x)
28 uint8x16_t mask_shift =
29 { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
30 uint8x16_t mask_and = vdupq_n_u8 (0x80);
31 x = vandq_u8 (x, mask_and);
32 x = vshlq_u8 (x, vreinterpretq_s8_u8 (mask_shift));
36 return vgetq_lane_u8 (x, 0) | (vgetq_lane_u8 (x, 1) << 8);
40 u16x8_zero_byte_mask (u16x8 input)
42 u8x16 vall_one = vdupq_n_u8 (0x0);
43 u8x16 res_values = { 0x01, 0x02, 0x04, 0x08,
44 0x10, 0x20, 0x40, 0x80,
45 0x01, 0x02, 0x04, 0x08,
46 0x10, 0x20, 0x40, 0x80
49 /* input --> [0x80, 0x40, 0x01, 0xf0, ... ] */
51 vreinterpretq_u8_u16 (vceqq_u16 (input, vreinterpretq_u16_u8 (vall_one)));
52 u8x16 before_merge = vminq_u8 (test_result, res_values);
53 /*before_merge--> [0x80, 0x00, 0x00, 0x10, ... ] */
54 /* u8x16 --> [a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p] */
55 /* pair add until we have 2 uint64_t */
56 u16x8 merge1 = vpaddlq_u8 (before_merge);
57 /* u16x8--> [a+b,c+d, e+f,g+h, i+j,k+l, m+n,o+p] */
58 u32x4 merge2 = vpaddlq_u16 (merge1);
59 /* u32x4--> [a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p] */
60 u64x2 merge3 = vpaddlq_u32 (merge2);
61 /* u64x2--> [a+b+c+d+e+f+g+h, i+j+k+l+m+n+o+p] */
62 return (u32) (vgetq_lane_u64 (merge3, 1) << 8) + vgetq_lane_u64 (merge3, 0);
66 u8x16_zero_byte_mask (u8x16 input)
68 return u16x8_zero_byte_mask ((u16x8) input);
72 u32x4_zero_byte_mask (u32x4 input)
74 return u16x8_zero_byte_mask ((u16x8) input);
78 u64x2_zero_byte_mask (u64x2 input)
80 return u16x8_zero_byte_mask ((u16x8) input);
84 #define foreach_neon_vec128i \
85 _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64)
86 #define foreach_neon_vec128u \
87 _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64)
88 #define foreach_neon_vec128f \
89 _(f,32,4,f32) _(f,64,2,f64)
91 #define _(t, s, c, i) \
92 static_always_inline t##s##x##c \
93 t##s##x##c##_splat (t##s x) \
94 { return (t##s##x##c) vdupq_n_##i (x); } \
96 static_always_inline t##s##x##c \
97 t##s##x##c##_load_unaligned (void *p) \
98 { return (t##s##x##c) vld1q_##i (p); } \
100 static_always_inline void \
101 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
102 { vst1q_##i (p, v); } \
104 static_always_inline int \
105 t##s##x##c##_is_all_zero (t##s##x##c x) \
106 { return !(vaddvq_##i (x)); } \
108 static_always_inline int \
109 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
110 { return t##s##x##c##_is_all_zero (a ^ b); } \
112 static_always_inline int \
113 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
114 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \
116 foreach_neon_vec128i foreach_neon_vec128u
121 static_always_inline u16x8
122 u16x8_byte_swap (u16x8 v)
124 const u8 swap_pattern[] = {
125 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
127 u8x16 swap = vld1q_u8 (swap_pattern);
128 return (u16x8) vqtbl1q_u8 ((u8x16) v, swap);
131 static_always_inline u8x16
132 u8x16_shuffle (u8x16 v, u8x16 m)
134 return (u8x16) vqtbl1q_u8 (v, m);
137 #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
138 #define CLIB_VEC128_SPLAT_DEFINED
139 #endif /* included_vector_neon_h */
142 * fd.io coding-style-patch-verification: ON
145 * eval: (c-set-style "gnu")