2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx2_h
17 #define included_vector_avx2_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx2_vec256i \
24 _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64)
25 #define foreach_avx2_vec256u \
26 _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64)
27 #define foreach_avx2_vec256f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 #define _mm256_set1_epi64 _mm256_set1_epi64x
32 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
34 #define _(t, s, c, i) \
35 static_always_inline t##s##x##c \
36 t##s##x##c##_splat (t##s x) \
37 { return (t##s##x##c) _mm256_set1_##i (x); } \
39 static_always_inline t##s##x##c \
40 t##s##x##c##_load_unaligned (void *p) \
41 { return (t##s##x##c) _mm256_loadu_si256 (p); } \
43 static_always_inline void \
44 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
45 { _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
47 static_always_inline int \
48 t##s##x##c##_is_all_zero (t##s##x##c x) \
49 { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
51 static_always_inline int \
52 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
53 { return t##s##x##c##_is_all_zero (a ^ b); } \
55 static_always_inline int \
56 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
57 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
61 { return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
63 static_always_inline t##s##x##c \
64 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
65 { return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \
68 foreach_avx2_vec256i foreach_avx2_vec256u
73 u32x8_permute (u32x8 v, u32x8 idx)
75 return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
78 /* _extract_lo, _extract_hi */
82 t2##_extract_lo (t2 v) \
83 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
86 t2##_extract_hi (t2 v) \
87 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
90 t2##_insert_lo (t2 v1, t1 v2) \
91 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
94 t2##_insert_hi (t2 v1, t1 v2) \
95 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
107 static_always_inline u32
108 u8x32_msb_mask (u8x32 v)
110 return _mm256_movemask_epi8 ((__m256i) v);
116 static_always_inline t \
118 { return (t) _mm256_cvt##i ((__m128i) x); }
120 _(u16x8, u32x8, epu16_epi32)
121 _(u16x8, u64x4, epu16_epi64)
122 _(u32x4, u64x4, epu32_epi64)
123 _(u8x16, u16x16, epu8_epi64)
124 _(u8x16, u32x8, epu8_epi32)
125 _(u8x16, u64x4, epu8_epi64)
126 _(i16x8, i32x8, epi16_epi32)
127 _(i16x8, i64x4, epi16_epi64)
128 _(i32x4, i64x4, epi32_epi64)
129 _(i8x16, i16x16, epi8_epi64)
130 _(i8x16, i32x8, epi8_epi32)
131 _(i8x16, i64x4, epi8_epi64)
135 static_always_inline u64x4
136 u64x4_byte_swap (u64x4 v)
139 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
140 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
142 return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
145 static_always_inline u32x8
146 u32x8_byte_swap (u32x8 v)
149 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
150 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
152 return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
155 static_always_inline u16x16
156 u16x16_byte_swap (u16x16 v)
159 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
160 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
162 return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
165 static_always_inline u8x32
166 u8x32_shuffle (u8x32 v, u8x32 m)
168 return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m);
171 #define u8x32_align_right(a, b, imm) \
172 (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
174 static_always_inline u32
175 u32x8_sum_elts (u32x8 sum8)
177 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
178 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
179 return sum8[0] + sum8[4];
182 static_always_inline u32x8
183 u32x8_hadd (u32x8 v1, u32x8 v2)
185 return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
188 static_always_inline u16x16
189 u16x16_mask_last (u16x16 v, u8 n_last)
191 const u16x16 masks[17] = {
197 {-1, -1, -1, -1, -1},
198 {-1, -1, -1, -1, -1, -1},
199 {-1, -1, -1, -1, -1, -1, -1},
200 {-1, -1, -1, -1, -1, -1, -1, -1},
201 {-1, -1, -1, -1, -1, -1, -1, -1, -1},
202 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
203 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
204 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
205 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
206 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
207 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
208 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
211 ASSERT (n_last < 17);
213 return v & masks[16 - n_last];
217 static_always_inline u8x32
218 u8x32_mask_load (u8x32 a, void *p, u32 mask)
220 return (u8x32) _mm256_mask_loadu_epi8 ((__m256i) a, mask, p);
224 static_always_inline f32x8
225 f32x8_from_u32x8 (u32x8 v)
227 return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
230 static_always_inline u32x8
231 u32x8_from_f32x8 (f32x8 v)
233 return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
236 #define u32x8_blend(a,b,m) \
237 (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m)
239 #define u16x16_blend(v1, v2, mask) \
240 (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)
242 static_always_inline u64x4
243 u64x4_gather (void *p0, void *p1, void *p2, void *p3)
246 *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
251 static_always_inline u32x8
252 u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
256 *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
257 *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
263 static_always_inline void
264 u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
272 static_always_inline void
273 u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
274 void *p5, void *p6, void *p7)
286 static_always_inline void
287 u64x4_scatter_one (u64x4 r, int index, void *p)
289 *(u64 *) p = r[index];
292 static_always_inline void
293 u32x8_scatter_one (u32x8 r, int index, void *p)
295 *(u32 *) p = r[index];
298 static_always_inline u8x32
299 u8x32_is_greater (u8x32 v1, u8x32 v2)
301 return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
304 static_always_inline u8x32
305 u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
307 return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
311 #define u32x8_permute_lanes(a, b, m) \
312 (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
313 #define u64x4_permute_lanes(a, b, m) \
314 (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
316 static_always_inline u32x8
317 u32x8_min (u32x8 a, u32x8 b)
319 return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
322 static_always_inline u32
323 u32x8_min_scalar (u32x8 v)
325 return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
326 u32x8_extract_hi (v)));
329 static_always_inline void
330 u32x8_transpose (u32x8 a[8])
334 r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
335 r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
336 r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
337 r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
338 r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
339 r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
340 r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
341 r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
343 x = u64x4_interleave_lo (r[0], r[2]);
344 y = u64x4_interleave_lo (r[4], r[6]);
345 a[0] = u32x8_permute_lanes (x, y, 0x20);
346 a[4] = u32x8_permute_lanes (x, y, 0x31);
348 x = u64x4_interleave_hi (r[0], r[2]);
349 y = u64x4_interleave_hi (r[4], r[6]);
350 a[1] = u32x8_permute_lanes (x, y, 0x20);
351 a[5] = u32x8_permute_lanes (x, y, 0x31);
353 x = u64x4_interleave_lo (r[1], r[3]);
354 y = u64x4_interleave_lo (r[5], r[7]);
355 a[2] = u32x8_permute_lanes (x, y, 0x20);
356 a[6] = u32x8_permute_lanes (x, y, 0x31);
358 x = u64x4_interleave_hi (r[1], r[3]);
359 y = u64x4_interleave_hi (r[5], r[7]);
360 a[3] = u32x8_permute_lanes (x, y, 0x20);
361 a[7] = u32x8_permute_lanes (x, y, 0x31);
364 static_always_inline void
365 u64x4_transpose (u64x4 a[8])
369 r[0] = u64x4_interleave_lo (a[0], a[1]);
370 r[1] = u64x4_interleave_hi (a[0], a[1]);
371 r[2] = u64x4_interleave_lo (a[2], a[3]);
372 r[3] = u64x4_interleave_hi (a[2], a[3]);
374 a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
375 a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
376 a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
377 a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
380 #endif /* included_vector_avx2_h */
383 * fd.io coding-style-patch-verification: ON
386 * eval: (c-set-style "gnu")