2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx2_h
17 #define included_vector_avx2_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx2_vec256i \
24 _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64)
25 #define foreach_avx2_vec256u \
26 _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64)
27 #define foreach_avx2_vec256f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 #define _mm256_set1_epi64 _mm256_set1_epi64x
32 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
34 #define _(t, s, c, i) \
35 static_always_inline t##s##x##c \
36 t##s##x##c##_splat (t##s x) \
37 { return (t##s##x##c) _mm256_set1_##i (x); } \
39 static_always_inline t##s##x##c \
40 t##s##x##c##_load_unaligned (void *p) \
41 { return (t##s##x##c) _mm256_loadu_si256 (p); } \
43 static_always_inline void \
44 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
45 { _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
47 static_always_inline int \
48 t##s##x##c##_is_all_zero (t##s##x##c x) \
49 { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
51 static_always_inline int \
52 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
53 { return t##s##x##c##_is_all_zero (a ^ b); } \
55 static_always_inline int \
56 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
57 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
61 { return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
63 static_always_inline t##s##x##c \
64 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
65 { return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \
68 foreach_avx2_vec256i foreach_avx2_vec256u
73 u32x8_permute (u32x8 v, u32x8 idx)
75 return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
78 #define u64x4_permute(v, m0, m1, m2, m3) \
79 (u64x4) _mm256_permute4x64_epi64 ( \
80 (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6))
82 /* _extract_lo, _extract_hi */
86 t2##_extract_lo (t2 v) \
87 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
90 t2##_extract_hi (t2 v) \
91 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
94 t2##_insert_lo (t2 v1, t1 v2) \
95 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
98 t2##_insert_hi (t2 v1, t1 v2) \
99 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
109 #define _(f, t, fn) \
110 always_inline t t##_pack (f lo, f hi) \
112 return (t) fn ((__m256i) lo, (__m256i) hi); \
115 _ (i16x16, i8x32, _mm256_packs_epi16)
116 _ (i16x16, u8x32, _mm256_packus_epi16)
117 _ (i32x8, i16x16, _mm256_packs_epi32)
118 _ (i32x8, u16x16, _mm256_packus_epi32)
122 static_always_inline u32
123 u8x32_msb_mask (u8x32 v)
125 return _mm256_movemask_epi8 ((__m256i) v);
128 static_always_inline u32
129 i8x32_msb_mask (i8x32 v)
131 return _mm256_movemask_epi8 ((__m256i) v);
137 static_always_inline t \
139 { return (t) _mm256_cvt##i ((__m128i) x); }
141 _(u16x8, u32x8, epu16_epi32)
142 _(u16x8, u64x4, epu16_epi64)
143 _(u32x4, u64x4, epu32_epi64)
144 _ (u8x16, u16x16, epu8_epi16)
145 _(u8x16, u32x8, epu8_epi32)
146 _(u8x16, u64x4, epu8_epi64)
147 _(i16x8, i32x8, epi16_epi32)
148 _(i16x8, i64x4, epi16_epi64)
149 _(i32x4, i64x4, epi32_epi64)
150 _ (i8x16, i16x16, epi8_epi16)
151 _(i8x16, i32x8, epi8_epi32)
152 _(i8x16, i64x4, epi8_epi64)
156 static_always_inline u64x4
157 u64x4_byte_swap (u64x4 v)
160 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
161 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
163 return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
166 static_always_inline u32x8
167 u32x8_byte_swap (u32x8 v)
170 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
171 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
173 return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
176 static_always_inline u16x16
177 u16x16_byte_swap (u16x16 v)
180 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
181 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
183 return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
186 #define u8x32_align_right(a, b, imm) \
187 (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
189 #define u64x4_align_right(a, b, imm) \
190 (u64x4) _mm256_alignr_epi64 ((__m256i) a, (__m256i) b, imm)
192 static_always_inline u32
193 u32x8_sum_elts (u32x8 sum8)
195 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
196 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
197 return sum8[0] + sum8[4];
200 static_always_inline u32x8
201 u32x8_hadd (u32x8 v1, u32x8 v2)
203 return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
206 static_always_inline u32
210 v4 = u32x8_extract_lo (v) ^ u32x8_extract_hi (v);
211 v4 ^= (u32x4) u8x16_align_right (v4, v4, 8);
212 v4 ^= (u32x4) u8x16_align_right (v4, v4, 4);
216 static_always_inline u8x32
217 u8x32_xor3 (u8x32 a, u8x32 b, u8x32 c)
220 return (u8x32) _mm256_ternarylogic_epi32 ((__m256i) a, (__m256i) b,
226 static_always_inline u16x16
227 u16x16_mask_last (u16x16 v, u8 n_last)
229 const u16x16 masks[17] = {
235 {-1, -1, -1, -1, -1},
236 {-1, -1, -1, -1, -1, -1},
237 {-1, -1, -1, -1, -1, -1, -1},
238 {-1, -1, -1, -1, -1, -1, -1, -1},
239 {-1, -1, -1, -1, -1, -1, -1, -1, -1},
240 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
241 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
242 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
243 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
244 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
245 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
246 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
249 ASSERT (n_last < 17);
251 return v & masks[16 - n_last];
254 static_always_inline f32x8
255 f32x8_from_u32x8 (u32x8 v)
257 return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
260 static_always_inline u32x8
261 u32x8_from_f32x8 (f32x8 v)
263 return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
266 #define u32x8_blend(a,b,m) \
267 (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m)
269 #define u16x16_blend(v1, v2, mask) \
270 (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)
272 static_always_inline u64x4
273 u64x4_gather (void *p0, void *p1, void *p2, void *p3)
276 *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
281 static_always_inline u32x8
282 u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
286 *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
287 *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
293 static_always_inline void
294 u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
302 static_always_inline void
303 u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
304 void *p5, void *p6, void *p7)
316 static_always_inline void
317 u64x4_scatter_one (u64x4 r, int index, void *p)
319 *(u64 *) p = r[index];
322 static_always_inline void
323 u32x8_scatter_one (u32x8 r, int index, void *p)
325 *(u32 *) p = r[index];
328 static_always_inline u8x32
329 u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
331 return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
335 #define u32x8_permute_lanes(a, b, m) \
336 (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
337 #define u64x4_permute_lanes(a, b, m) \
338 (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
340 static_always_inline u32x8
341 u32x8_min (u32x8 a, u32x8 b)
343 return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
346 static_always_inline u32
347 u32x8_min_scalar (u32x8 v)
349 return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
350 u32x8_extract_hi (v)));
353 static_always_inline void
354 u32x8_transpose (u32x8 a[8])
358 r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
359 r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
360 r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
361 r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
362 r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
363 r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
364 r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
365 r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
367 x = u64x4_interleave_lo (r[0], r[2]);
368 y = u64x4_interleave_lo (r[4], r[6]);
369 a[0] = u32x8_permute_lanes (x, y, 0x20);
370 a[4] = u32x8_permute_lanes (x, y, 0x31);
372 x = u64x4_interleave_hi (r[0], r[2]);
373 y = u64x4_interleave_hi (r[4], r[6]);
374 a[1] = u32x8_permute_lanes (x, y, 0x20);
375 a[5] = u32x8_permute_lanes (x, y, 0x31);
377 x = u64x4_interleave_lo (r[1], r[3]);
378 y = u64x4_interleave_lo (r[5], r[7]);
379 a[2] = u32x8_permute_lanes (x, y, 0x20);
380 a[6] = u32x8_permute_lanes (x, y, 0x31);
382 x = u64x4_interleave_hi (r[1], r[3]);
383 y = u64x4_interleave_hi (r[5], r[7]);
384 a[3] = u32x8_permute_lanes (x, y, 0x20);
385 a[7] = u32x8_permute_lanes (x, y, 0x31);
388 static_always_inline void
389 u64x4_transpose (u64x4 a[8])
393 r[0] = u64x4_interleave_lo (a[0], a[1]);
394 r[1] = u64x4_interleave_hi (a[0], a[1]);
395 r[2] = u64x4_interleave_lo (a[2], a[3]);
396 r[3] = u64x4_interleave_hi (a[2], a[3]);
398 a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
399 a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
400 a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
401 a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
404 static_always_inline u8x32
405 u8x32_splat_u8x16 (u8x16 a)
407 return (u8x32) _mm256_broadcastsi128_si256 ((__m128i) a);
410 #endif /* included_vector_avx2_h */
413 * fd.io coding-style-patch-verification: ON
416 * eval: (c-set-style "gnu")