2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx2_h
17 #define included_vector_avx2_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx2_vec256i \
24 _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64)
25 #define foreach_avx2_vec256u \
26 _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64)
27 #define foreach_avx2_vec256f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 #define _mm256_set1_epi64 _mm256_set1_epi64x
32 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
34 #define _(t, s, c, i) \
35 static_always_inline t##s##x##c \
36 t##s##x##c##_splat (t##s x) \
37 { return (t##s##x##c) _mm256_set1_##i (x); } \
39 static_always_inline t##s##x##c \
40 t##s##x##c##_load_unaligned (void *p) \
41 { return (t##s##x##c) _mm256_loadu_si256 (p); } \
43 static_always_inline void \
44 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
45 { _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
47 static_always_inline int \
48 t##s##x##c##_is_all_zero (t##s##x##c x) \
49 { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
51 static_always_inline int \
52 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
53 { return t##s##x##c##_is_all_zero (a ^ b); } \
55 static_always_inline int \
56 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
57 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
61 { return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
63 static_always_inline t##s##x##c \
64 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
65 { return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \
68 foreach_avx2_vec256i foreach_avx2_vec256u
73 u32x8_permute (u32x8 v, u32x8 idx)
75 return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
78 #define u64x4_permute(v, m0, m1, m2, m3) \
79 (u64x4) _mm256_permute4x64_epi64 ( \
80 (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6))
82 /* _extract_lo, _extract_hi */
86 t2##_extract_lo (t2 v) \
87 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
90 t2##_extract_hi (t2 v) \
91 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
94 t2##_insert_lo (t2 v1, t1 v2) \
95 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
98 t2##_insert_hi (t2 v1, t1 v2) \
99 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
109 #define _(f, t, fn) \
110 always_inline t t##_pack (f lo, f hi) \
112 return (t) fn ((__m256i) lo, (__m256i) hi); \
115 _ (i16x16, i8x32, _mm256_packs_epi16)
116 _ (i16x16, u8x32, _mm256_packus_epi16)
117 _ (i32x8, i16x16, _mm256_packs_epi32)
118 _ (i32x8, u16x16, _mm256_packus_epi32)
122 static_always_inline u32
123 u8x32_msb_mask (u8x32 v)
125 return _mm256_movemask_epi8 ((__m256i) v);
128 static_always_inline u32
129 i8x32_msb_mask (i8x32 v)
131 return _mm256_movemask_epi8 ((__m256i) v);
137 static_always_inline t \
139 { return (t) _mm256_cvt##i ((__m128i) x); }
141 _(u16x8, u32x8, epu16_epi32)
142 _(u16x8, u64x4, epu16_epi64)
143 _(u32x4, u64x4, epu32_epi64)
144 _ (u8x16, u16x16, epu8_epi16)
145 _(u8x16, u32x8, epu8_epi32)
146 _(u8x16, u64x4, epu8_epi64)
147 _(i16x8, i32x8, epi16_epi32)
148 _(i16x8, i64x4, epi16_epi64)
149 _(i32x4, i64x4, epi32_epi64)
150 _ (i8x16, i16x16, epi8_epi16)
151 _(i8x16, i32x8, epi8_epi32)
152 _(i8x16, i64x4, epi8_epi64)
156 static_always_inline u64x4
157 u64x4_byte_swap (u64x4 v)
160 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
161 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
163 return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
166 static_always_inline u32x8
167 u32x8_byte_swap (u32x8 v)
170 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
171 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
173 return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
176 static_always_inline u16x16
177 u16x16_byte_swap (u16x16 v)
180 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
181 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
183 return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
186 #define u8x32_align_right(a, b, imm) \
187 (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
189 #define u64x4_align_right(a, b, imm) \
190 (u64x4) _mm256_alignr_epi64 ((__m256i) a, (__m256i) b, imm)
192 static_always_inline u32
193 u32x8_sum_elts (u32x8 sum8)
195 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
196 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
197 return sum8[0] + sum8[4];
200 static_always_inline u32x8
201 u32x8_hadd (u32x8 v1, u32x8 v2)
203 return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
206 static_always_inline u16x16
207 u16x16_mask_last (u16x16 v, u8 n_last)
209 const u16x16 masks[17] = {
215 {-1, -1, -1, -1, -1},
216 {-1, -1, -1, -1, -1, -1},
217 {-1, -1, -1, -1, -1, -1, -1},
218 {-1, -1, -1, -1, -1, -1, -1, -1},
219 {-1, -1, -1, -1, -1, -1, -1, -1, -1},
220 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
221 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
222 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
223 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
224 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
225 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
226 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
229 ASSERT (n_last < 17);
231 return v & masks[16 - n_last];
234 static_always_inline f32x8
235 f32x8_from_u32x8 (u32x8 v)
237 return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
240 static_always_inline u32x8
241 u32x8_from_f32x8 (f32x8 v)
243 return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
246 #define u32x8_blend(a,b,m) \
247 (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m)
249 #define u16x16_blend(v1, v2, mask) \
250 (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)
252 static_always_inline u64x4
253 u64x4_gather (void *p0, void *p1, void *p2, void *p3)
256 *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
261 static_always_inline u32x8
262 u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
266 *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
267 *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
273 static_always_inline void
274 u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
282 static_always_inline void
283 u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
284 void *p5, void *p6, void *p7)
296 static_always_inline void
297 u64x4_scatter_one (u64x4 r, int index, void *p)
299 *(u64 *) p = r[index];
302 static_always_inline void
303 u32x8_scatter_one (u32x8 r, int index, void *p)
305 *(u32 *) p = r[index];
308 static_always_inline u8x32
309 u8x32_is_greater (u8x32 v1, u8x32 v2)
311 return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
314 static_always_inline u8x32
315 u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
317 return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
321 #define u32x8_permute_lanes(a, b, m) \
322 (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
323 #define u64x4_permute_lanes(a, b, m) \
324 (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
326 static_always_inline u32x8
327 u32x8_min (u32x8 a, u32x8 b)
329 return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
332 static_always_inline u32
333 u32x8_min_scalar (u32x8 v)
335 return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
336 u32x8_extract_hi (v)));
339 static_always_inline void
340 u32x8_transpose (u32x8 a[8])
344 r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
345 r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
346 r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
347 r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
348 r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
349 r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
350 r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
351 r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
353 x = u64x4_interleave_lo (r[0], r[2]);
354 y = u64x4_interleave_lo (r[4], r[6]);
355 a[0] = u32x8_permute_lanes (x, y, 0x20);
356 a[4] = u32x8_permute_lanes (x, y, 0x31);
358 x = u64x4_interleave_hi (r[0], r[2]);
359 y = u64x4_interleave_hi (r[4], r[6]);
360 a[1] = u32x8_permute_lanes (x, y, 0x20);
361 a[5] = u32x8_permute_lanes (x, y, 0x31);
363 x = u64x4_interleave_lo (r[1], r[3]);
364 y = u64x4_interleave_lo (r[5], r[7]);
365 a[2] = u32x8_permute_lanes (x, y, 0x20);
366 a[6] = u32x8_permute_lanes (x, y, 0x31);
368 x = u64x4_interleave_hi (r[1], r[3]);
369 y = u64x4_interleave_hi (r[5], r[7]);
370 a[3] = u32x8_permute_lanes (x, y, 0x20);
371 a[7] = u32x8_permute_lanes (x, y, 0x31);
374 static_always_inline void
375 u64x4_transpose (u64x4 a[8])
379 r[0] = u64x4_interleave_lo (a[0], a[1]);
380 r[1] = u64x4_interleave_hi (a[0], a[1]);
381 r[2] = u64x4_interleave_lo (a[2], a[3]);
382 r[3] = u64x4_interleave_hi (a[2], a[3]);
384 a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
385 a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
386 a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
387 a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
390 #endif /* included_vector_avx2_h */
393 * fd.io coding-style-patch-verification: ON
396 * eval: (c-set-style "gnu")