2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx2_h
17 #define included_vector_avx2_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx2_vec256i \
24 _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64)
25 #define foreach_avx2_vec256u \
26 _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64)
27 #define foreach_avx2_vec256f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 #define _mm256_set1_epi64 _mm256_set1_epi64x
32 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
34 #define _(t, s, c, i) \
35 static_always_inline t##s##x##c \
36 t##s##x##c##_splat (t##s x) \
37 { return (t##s##x##c) _mm256_set1_##i (x); } \
39 static_always_inline t##s##x##c \
40 t##s##x##c##_load_unaligned (void *p) \
41 { return (t##s##x##c) _mm256_loadu_si256 (p); } \
43 static_always_inline void \
44 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
45 { _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
47 static_always_inline int \
48 t##s##x##c##_is_all_zero (t##s##x##c x) \
49 { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
51 static_always_inline int \
52 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
53 { return t##s##x##c##_is_all_zero (a ^ b); } \
55 static_always_inline int \
56 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
57 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
61 { return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
63 static_always_inline t##s##x##c \
64 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
65 { return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \
68 foreach_avx2_vec256i foreach_avx2_vec256u
73 u32x8_permute (u32x8 v, u32x8 idx)
75 return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
78 #define u64x4_permute(v, m0, m1, m2, m3) \
79 (u64x4) _mm256_permute4x64_epi64 ( \
80 (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6))
82 /* _extract_lo, _extract_hi */
86 t2##_extract_lo (t2 v) \
87 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
90 t2##_extract_hi (t2 v) \
91 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
94 t2##_insert_lo (t2 v1, t1 v2) \
95 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
98 t2##_insert_hi (t2 v1, t1 v2) \
99 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
109 #define _(f, t, fn) \
110 always_inline t t##_pack (f lo, f hi) \
112 return (t) fn ((__m256i) lo, (__m256i) hi); \
115 _ (i16x16, i8x32, _mm256_packs_epi16)
116 _ (i16x16, u8x32, _mm256_packus_epi16)
117 _ (i32x8, i16x16, _mm256_packs_epi32)
118 _ (i32x8, u16x16, _mm256_packus_epi32)
122 static_always_inline u32
123 u8x32_msb_mask (u8x32 v)
125 return _mm256_movemask_epi8 ((__m256i) v);
128 static_always_inline u32
129 i8x32_msb_mask (i8x32 v)
131 return _mm256_movemask_epi8 ((__m256i) v);
137 static_always_inline t \
139 { return (t) _mm256_cvt##i ((__m128i) x); }
141 _(u16x8, u32x8, epu16_epi32)
142 _(u16x8, u64x4, epu16_epi64)
143 _(u32x4, u64x4, epu32_epi64)
144 _ (u8x16, u16x16, epu8_epi16)
145 _(u8x16, u32x8, epu8_epi32)
146 _(u8x16, u64x4, epu8_epi64)
147 _(i16x8, i32x8, epi16_epi32)
148 _(i16x8, i64x4, epi16_epi64)
149 _(i32x4, i64x4, epi32_epi64)
150 _ (i8x16, i16x16, epi8_epi16)
151 _(i8x16, i32x8, epi8_epi32)
152 _(i8x16, i64x4, epi8_epi64)
156 static_always_inline u64x4
157 u64x4_byte_swap (u64x4 v)
160 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
161 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
163 return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
166 static_always_inline u32x8
167 u32x8_byte_swap (u32x8 v)
170 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
171 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
173 return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
176 static_always_inline u16x16
177 u16x16_byte_swap (u16x16 v)
180 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
181 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
183 return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
186 static_always_inline u8x32
187 u8x32_shuffle (u8x32 v, u8x32 m)
189 return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m);
192 #define u8x32_align_right(a, b, imm) \
193 (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
195 #define u64x4_align_right(a, b, imm) \
196 (u64x4) _mm256_alignr_epi64 ((__m256i) a, (__m256i) b, imm)
198 static_always_inline u32
199 u32x8_sum_elts (u32x8 sum8)
201 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
202 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
203 return sum8[0] + sum8[4];
206 static_always_inline u32x8
207 u32x8_hadd (u32x8 v1, u32x8 v2)
209 return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
212 static_always_inline u16x16
213 u16x16_mask_last (u16x16 v, u8 n_last)
215 const u16x16 masks[17] = {
221 {-1, -1, -1, -1, -1},
222 {-1, -1, -1, -1, -1, -1},
223 {-1, -1, -1, -1, -1, -1, -1},
224 {-1, -1, -1, -1, -1, -1, -1, -1},
225 {-1, -1, -1, -1, -1, -1, -1, -1, -1},
226 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
227 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
228 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
229 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
230 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
231 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
232 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
235 ASSERT (n_last < 17);
237 return v & masks[16 - n_last];
240 static_always_inline f32x8
241 f32x8_from_u32x8 (u32x8 v)
243 return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
246 static_always_inline u32x8
247 u32x8_from_f32x8 (f32x8 v)
249 return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
252 #define u32x8_blend(a,b,m) \
253 (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m)
255 #define u16x16_blend(v1, v2, mask) \
256 (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)
258 static_always_inline u64x4
259 u64x4_gather (void *p0, void *p1, void *p2, void *p3)
262 *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
267 static_always_inline u32x8
268 u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
272 *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
273 *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
279 static_always_inline void
280 u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
288 static_always_inline void
289 u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
290 void *p5, void *p6, void *p7)
302 static_always_inline void
303 u64x4_scatter_one (u64x4 r, int index, void *p)
305 *(u64 *) p = r[index];
308 static_always_inline void
309 u32x8_scatter_one (u32x8 r, int index, void *p)
311 *(u32 *) p = r[index];
314 static_always_inline u8x32
315 u8x32_is_greater (u8x32 v1, u8x32 v2)
317 return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
320 static_always_inline u8x32
321 u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
323 return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
327 #define u32x8_permute_lanes(a, b, m) \
328 (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
329 #define u64x4_permute_lanes(a, b, m) \
330 (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
332 static_always_inline u32x8
333 u32x8_min (u32x8 a, u32x8 b)
335 return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
338 static_always_inline u32
339 u32x8_min_scalar (u32x8 v)
341 return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
342 u32x8_extract_hi (v)));
345 static_always_inline void
346 u32x8_transpose (u32x8 a[8])
350 r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
351 r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
352 r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
353 r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
354 r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
355 r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
356 r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
357 r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
359 x = u64x4_interleave_lo (r[0], r[2]);
360 y = u64x4_interleave_lo (r[4], r[6]);
361 a[0] = u32x8_permute_lanes (x, y, 0x20);
362 a[4] = u32x8_permute_lanes (x, y, 0x31);
364 x = u64x4_interleave_hi (r[0], r[2]);
365 y = u64x4_interleave_hi (r[4], r[6]);
366 a[1] = u32x8_permute_lanes (x, y, 0x20);
367 a[5] = u32x8_permute_lanes (x, y, 0x31);
369 x = u64x4_interleave_lo (r[1], r[3]);
370 y = u64x4_interleave_lo (r[5], r[7]);
371 a[2] = u32x8_permute_lanes (x, y, 0x20);
372 a[6] = u32x8_permute_lanes (x, y, 0x31);
374 x = u64x4_interleave_hi (r[1], r[3]);
375 y = u64x4_interleave_hi (r[5], r[7]);
376 a[3] = u32x8_permute_lanes (x, y, 0x20);
377 a[7] = u32x8_permute_lanes (x, y, 0x31);
380 static_always_inline void
381 u64x4_transpose (u64x4 a[8])
385 r[0] = u64x4_interleave_lo (a[0], a[1]);
386 r[1] = u64x4_interleave_hi (a[0], a[1]);
387 r[2] = u64x4_interleave_lo (a[2], a[3]);
388 r[3] = u64x4_interleave_hi (a[2], a[3]);
390 a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
391 a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
392 a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
393 a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
396 #endif /* included_vector_avx2_h */
399 * fd.io coding-style-patch-verification: ON
402 * eval: (c-set-style "gnu")