2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx2_h
17 #define included_vector_avx2_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx2_vec256i \
24 _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64)
25 #define foreach_avx2_vec256u \
26 _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64)
27 #define foreach_avx2_vec256f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 #define _mm256_set1_epi64 _mm256_set1_epi64x
32 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
34 #define _(t, s, c, i) \
35 static_always_inline t##s##x##c \
36 t##s##x##c##_splat (t##s x) \
37 { return (t##s##x##c) _mm256_set1_##i (x); } \
39 static_always_inline t##s##x##c \
40 t##s##x##c##_load_unaligned (void *p) \
41 { return (t##s##x##c) _mm256_loadu_si256 (p); } \
43 static_always_inline void \
44 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
45 { _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
47 static_always_inline int \
48 t##s##x##c##_is_all_zero (t##s##x##c x) \
49 { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
51 static_always_inline int \
52 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
53 { return t##s##x##c##_is_all_zero (a ^ b); } \
55 static_always_inline int \
56 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
57 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
61 { return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
63 static_always_inline t##s##x##c \
64 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
65 { return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \
68 foreach_avx2_vec256i foreach_avx2_vec256u
73 u32x8_permute (u32x8 v, u32x8 idx)
75 return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
78 /* _extract_lo, _extract_hi */
82 t2##_extract_lo (t2 v) \
83 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
86 t2##_extract_hi (t2 v) \
87 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
90 t2##_insert_lo (t2 v1, t1 v2) \
91 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
94 t2##_insert_hi (t2 v1, t1 v2) \
95 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
107 static_always_inline u32
108 u8x32_msb_mask (u8x32 v)
110 return _mm256_movemask_epi8 ((__m256i) v);
116 static_always_inline t \
117 f##_extend_to_##t (f x) \
118 { return (t) _mm256_cvt##i ((__m128i) x); }
120 _(u16x8, u32x8, epu16_epi32)
121 _(u16x8, u64x4, epu16_epi64)
122 _(u32x4, u64x4, epu32_epi64)
123 _(u8x16, u16x16, epu8_epi64)
124 _(u8x16, u32x8, epu8_epi32)
125 _(u8x16, u64x4, epu8_epi64)
126 _(i16x8, i32x8, epi16_epi32)
127 _(i16x8, i64x4, epi16_epi64)
128 _(i32x4, i64x4, epi32_epi64)
129 _(i8x16, i16x16, epi8_epi64)
130 _(i8x16, i32x8, epi8_epi32)
131 _(i8x16, i64x4, epi8_epi64)
135 static_always_inline u32x8
136 u32x8_byte_swap (u32x8 v)
139 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
140 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
142 return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
145 static_always_inline u16x16
146 u16x16_byte_swap (u16x16 v)
149 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
150 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
152 return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
155 static_always_inline u32x8
156 u32x8_hadd (u32x8 v1, u32x8 v2)
158 return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
161 static_always_inline u16x16
162 u16x16_mask_last (u16x16 v, u8 n_last)
164 const u16x16 masks[17] = {
170 {-1, -1, -1, -1, -1},
171 {-1, -1, -1, -1, -1, -1},
172 {-1, -1, -1, -1, -1, -1, -1},
173 {-1, -1, -1, -1, -1, -1, -1, -1},
174 {-1, -1, -1, -1, -1, -1, -1, -1, -1},
175 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
176 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
177 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
178 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
179 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
180 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
181 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
184 ASSERT (n_last < 17);
186 return v & masks[16 - n_last];
189 static_always_inline f32x8
190 f32x8_from_u32x8 (u32x8 v)
192 return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
195 static_always_inline u32x8
196 u32x8_from_f32x8 (f32x8 v)
198 return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
201 #define u32x8_blend(a,b,m) \
202 (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m)
204 #define u16x16_blend(v1, v2, mask) \
205 (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)
207 static_always_inline u64x4
208 u64x4_gather (void *p0, void *p1, void *p2, void *p3)
211 *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
216 static_always_inline u32x8
217 u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
221 *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
222 *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
228 static_always_inline void
229 u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
237 static_always_inline void
238 u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
239 void *p5, void *p6, void *p7)
251 static_always_inline void
252 u64x4_scatter_one (u64x4 r, int index, void *p)
254 *(u64 *) p = r[index];
257 static_always_inline void
258 u32x8_scatter_one (u32x8 r, int index, void *p)
260 *(u32 *) p = r[index];
263 static_always_inline u8x32
264 u8x32_is_greater (u8x32 v1, u8x32 v2)
266 return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
269 static_always_inline u8x32
270 u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
272 return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
276 #define u32x8_permute_lanes(a, b, m) \
277 (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
278 #define u64x4_permute_lanes(a, b, m) \
279 (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
281 static_always_inline u32x8
282 u32x8_min (u32x8 a, u32x8 b)
284 return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
287 static_always_inline u32
288 u32x8_min_scalar (u32x8 v)
290 return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
291 u32x8_extract_hi (v)));
294 static_always_inline void
295 u32x8_transpose (u32x8 a[8])
299 r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
300 r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
301 r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
302 r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
303 r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
304 r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
305 r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
306 r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
308 x = u64x4_interleave_lo (r[0], r[2]);
309 y = u64x4_interleave_lo (r[4], r[6]);
310 a[0] = u32x8_permute_lanes (x, y, 0x20);
311 a[4] = u32x8_permute_lanes (x, y, 0x31);
313 x = u64x4_interleave_hi (r[0], r[2]);
314 y = u64x4_interleave_hi (r[4], r[6]);
315 a[1] = u32x8_permute_lanes (x, y, 0x20);
316 a[5] = u32x8_permute_lanes (x, y, 0x31);
318 x = u64x4_interleave_lo (r[1], r[3]);
319 y = u64x4_interleave_lo (r[5], r[7]);
320 a[2] = u32x8_permute_lanes (x, y, 0x20);
321 a[6] = u32x8_permute_lanes (x, y, 0x31);
323 x = u64x4_interleave_hi (r[1], r[3]);
324 y = u64x4_interleave_hi (r[5], r[7]);
325 a[3] = u32x8_permute_lanes (x, y, 0x20);
326 a[7] = u32x8_permute_lanes (x, y, 0x31);
329 static_always_inline void
330 u64x4_transpose (u64x4 a[8])
334 r[0] = u64x4_interleave_lo (a[0], a[1]);
335 r[1] = u64x4_interleave_hi (a[0], a[1]);
336 r[2] = u64x4_interleave_lo (a[2], a[3]);
337 r[3] = u64x4_interleave_hi (a[2], a[3]);
339 a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
340 a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
341 a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
342 a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
345 #endif /* included_vector_avx2_h */
348 * fd.io coding-style-patch-verification: ON
351 * eval: (c-set-style "gnu")