2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx2_h
17 #define included_vector_avx2_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx2_vec256i \
24 _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64)
25 #define foreach_avx2_vec256u \
26 _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64)
27 #define foreach_avx2_vec256f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 #define _mm256_set1_epi64 _mm256_set1_epi64x
32 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
34 #define _(t, s, c, i) \
35 static_always_inline t##s##x##c \
36 t##s##x##c##_splat (t##s x) \
37 { return (t##s##x##c) _mm256_set1_##i (x); } \
39 static_always_inline t##s##x##c \
40 t##s##x##c##_load_unaligned (void *p) \
41 { return (t##s##x##c) _mm256_loadu_si256 (p); } \
43 static_always_inline void \
44 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
45 { _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
47 static_always_inline int \
48 t##s##x##c##_is_all_zero (t##s##x##c x) \
49 { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
51 static_always_inline int \
52 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
53 { return t##s##x##c##_is_all_zero (a ^ b); } \
55 static_always_inline int \
56 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
57 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
61 { return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
63 static_always_inline t##s##x##c \
64 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
65 { return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \
68 foreach_avx2_vec256i foreach_avx2_vec256u
73 u32x8_permute (u32x8 v, u32x8 idx)
75 return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
78 #define u64x4_permute(v, m0, m1, m2, m3) \
79 (u64x4) _mm256_permute4x64_epi64 ( \
80 (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6))
82 /* _extract_lo, _extract_hi */
86 t2##_extract_lo (t2 v) \
87 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
90 t2##_extract_hi (t2 v) \
91 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
94 t2##_insert_lo (t2 v1, t1 v2) \
95 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
98 t2##_insert_hi (t2 v1, t1 v2) \
99 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
109 #define _(f, t, fn) \
110 always_inline t t##_pack (f lo, f hi) \
112 return (t) fn ((__m256i) lo, (__m256i) hi); \
115 _ (i16x16, i8x32, _mm256_packs_epi16)
116 _ (i16x16, u8x32, _mm256_packus_epi16)
117 _ (i32x8, i16x16, _mm256_packs_epi32)
118 _ (i32x8, u16x16, _mm256_packus_epi32)
122 static_always_inline u32
123 u8x32_msb_mask (u8x32 v)
125 return _mm256_movemask_epi8 ((__m256i) v);
128 static_always_inline u32
129 i8x32_msb_mask (i8x32 v)
131 return _mm256_movemask_epi8 ((__m256i) v);
137 static_always_inline t \
139 { return (t) _mm256_cvt##i ((__m128i) x); }
141 _(u16x8, u32x8, epu16_epi32)
142 _(u16x8, u64x4, epu16_epi64)
143 _(u32x4, u64x4, epu32_epi64)
144 _ (u8x16, u16x16, epu8_epi16)
145 _(u8x16, u32x8, epu8_epi32)
146 _(u8x16, u64x4, epu8_epi64)
147 _(i16x8, i32x8, epi16_epi32)
148 _(i16x8, i64x4, epi16_epi64)
149 _(i32x4, i64x4, epi32_epi64)
150 _ (i8x16, i16x16, epi8_epi16)
151 _(i8x16, i32x8, epi8_epi32)
152 _(i8x16, i64x4, epi8_epi64)
156 static_always_inline u64x4
157 u64x4_byte_swap (u64x4 v)
160 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
161 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
163 return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
166 static_always_inline u32x8
167 u32x8_byte_swap (u32x8 v)
170 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
171 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
173 return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
176 static_always_inline u16x16
177 u16x16_byte_swap (u16x16 v)
180 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
181 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
183 return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
186 #define u8x32_align_right(a, b, imm) \
187 (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
189 #define u64x4_align_right(a, b, imm) \
190 (u64x4) _mm256_alignr_epi64 ((__m256i) a, (__m256i) b, imm)
192 static_always_inline u32
193 u32x8_sum_elts (u32x8 sum8)
195 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
196 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
197 return sum8[0] + sum8[4];
200 static_always_inline u32x8
201 u32x8_hadd (u32x8 v1, u32x8 v2)
203 return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
206 static_always_inline u32
210 v4 = u32x8_extract_lo (v) ^ u32x8_extract_hi (v);
211 v4 ^= (u32x4) u8x16_align_right (v4, v4, 8);
212 v4 ^= (u32x4) u8x16_align_right (v4, v4, 4);
216 static_always_inline u8x32
217 u8x32_xor3 (u8x32 a, u8x32 b, u8x32 c)
220 return (u8x32) _mm256_ternarylogic_epi32 ((__m256i) a, (__m256i) b,
226 static_always_inline u8x32
227 u8x32_reflect_u8x16 (u8x32 x)
229 static const u8x32 mask = {
230 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
231 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
233 return (u8x32) _mm256_shuffle_epi8 ((__m256i) x, (__m256i) mask);
236 static_always_inline u16x16
237 u16x16_mask_last (u16x16 v, u8 n_last)
239 const u16x16 masks[17] = {
245 {-1, -1, -1, -1, -1},
246 {-1, -1, -1, -1, -1, -1},
247 {-1, -1, -1, -1, -1, -1, -1},
248 {-1, -1, -1, -1, -1, -1, -1, -1},
249 {-1, -1, -1, -1, -1, -1, -1, -1, -1},
250 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
251 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
252 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
253 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
254 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
255 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
256 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
259 ASSERT (n_last < 17);
261 return v & masks[16 - n_last];
264 static_always_inline f32x8
265 f32x8_from_u32x8 (u32x8 v)
267 return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
270 static_always_inline u32x8
271 u32x8_from_f32x8 (f32x8 v)
273 return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
276 #define u32x8_blend(a,b,m) \
277 (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m)
279 #define u16x16_blend(v1, v2, mask) \
280 (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)
282 static_always_inline u64x4
283 u64x4_gather (void *p0, void *p1, void *p2, void *p3)
286 *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
291 static_always_inline u32x8
292 u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
296 *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
297 *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
303 static_always_inline void
304 u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
312 static_always_inline void
313 u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
314 void *p5, void *p6, void *p7)
326 static_always_inline void
327 u64x4_scatter_one (u64x4 r, int index, void *p)
329 *(u64 *) p = r[index];
332 static_always_inline void
333 u32x8_scatter_one (u32x8 r, int index, void *p)
335 *(u32 *) p = r[index];
338 static_always_inline u8x32
339 u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
341 return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
345 #define u8x32_word_shift_left(a, n) \
346 (u8x32) _mm256_bslli_epi128 ((__m256i) a, n)
347 #define u8x32_word_shift_right(a, n) \
348 (u8x32) _mm256_bsrli_epi128 ((__m256i) a, n)
350 #define u32x8_permute_lanes(a, b, m) \
351 (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
352 #define u64x4_permute_lanes(a, b, m) \
353 (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
355 static_always_inline u32x8
356 u32x8_min (u32x8 a, u32x8 b)
358 return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
361 static_always_inline u32
362 u32x8_min_scalar (u32x8 v)
364 return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
365 u32x8_extract_hi (v)));
368 static_always_inline void
369 u32x8_transpose (u32x8 a[8])
373 r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
374 r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
375 r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
376 r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
377 r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
378 r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
379 r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
380 r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
382 x = u64x4_interleave_lo (r[0], r[2]);
383 y = u64x4_interleave_lo (r[4], r[6]);
384 a[0] = u32x8_permute_lanes (x, y, 0x20);
385 a[4] = u32x8_permute_lanes (x, y, 0x31);
387 x = u64x4_interleave_hi (r[0], r[2]);
388 y = u64x4_interleave_hi (r[4], r[6]);
389 a[1] = u32x8_permute_lanes (x, y, 0x20);
390 a[5] = u32x8_permute_lanes (x, y, 0x31);
392 x = u64x4_interleave_lo (r[1], r[3]);
393 y = u64x4_interleave_lo (r[5], r[7]);
394 a[2] = u32x8_permute_lanes (x, y, 0x20);
395 a[6] = u32x8_permute_lanes (x, y, 0x31);
397 x = u64x4_interleave_hi (r[1], r[3]);
398 y = u64x4_interleave_hi (r[5], r[7]);
399 a[3] = u32x8_permute_lanes (x, y, 0x20);
400 a[7] = u32x8_permute_lanes (x, y, 0x31);
403 static_always_inline void
404 u64x4_transpose (u64x4 a[8])
408 r[0] = u64x4_interleave_lo (a[0], a[1]);
409 r[1] = u64x4_interleave_hi (a[0], a[1]);
410 r[2] = u64x4_interleave_lo (a[2], a[3]);
411 r[3] = u64x4_interleave_hi (a[2], a[3]);
413 a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
414 a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
415 a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
416 a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
419 static_always_inline u8x32
420 u8x32_splat_u8x16 (u8x16 a)
422 return (u8x32) _mm256_broadcastsi128_si256 ((__m128i) a);
425 static_always_inline u32x8
426 u32x8_splat_u32x4 (u32x4 a)
428 return (u32x8) _mm256_broadcastsi128_si256 ((__m128i) a);
431 static_always_inline u8x32
432 u8x32_load_partial (u8 *data, uword n)
434 #if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
435 return u8x32_mask_load_zero (data, pow2_mask (n));
440 r = u8x32_insert_lo (r, *(u8x16u *) data);
441 r = u8x32_insert_hi (r, u8x16_load_partial (data + 16, n - 16));
444 r = u8x32_insert_lo (r, u8x16_load_partial (data, n));
449 static_always_inline void
450 u8x32_store_partial (u8x32 r, u8 *data, uword n)
452 #if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
453 u8x32_mask_store (r, data, pow2_mask (n));
457 *(u8x16u *) data = u8x32_extract_lo (r);
458 u8x16_store_partial (u8x32_extract_hi (r), data + 16, n - 16);
461 u8x16_store_partial (u8x32_extract_lo (r), data, n);
465 #endif /* included_vector_avx2_h */
468 * fd.io coding-style-patch-verification: ON
471 * eval: (c-set-style "gnu")