2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx512_h
17 #define included_vector_avx512_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25 #define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27 #define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
31 is_all_equal, is_zero_mask */
32 #define _(t, s, c, i) \
33 static_always_inline t##s##x##c \
34 t##s##x##c##_splat (t##s x) \
35 { return (t##s##x##c) _mm512_set1_##i (x); } \
37 static_always_inline t##s##x##c \
38 t##s##x##c##_load_aligned (void *p) \
39 { return (t##s##x##c) _mm512_load_si512 (p); } \
41 static_always_inline void \
42 t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43 { _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
45 static_always_inline t##s##x##c \
46 t##s##x##c##_load_unaligned (void *p) \
47 { return (t##s##x##c) _mm512_loadu_si512 (p); } \
49 static_always_inline void \
50 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51 { _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
53 static_always_inline int \
54 t##s##x##c##_is_all_zero (t##s##x##c v) \
55 { return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
57 static_always_inline int \
58 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
59 { return t##s##x##c##_is_all_zero (a ^ b); } \
61 static_always_inline int \
62 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
65 static_always_inline u##c \
66 t##s##x##c##_is_zero_mask (t##s##x##c v) \
67 { return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
69 static_always_inline t##s##x##c \
70 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71 { return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
73 static_always_inline t##s##x##c \
74 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75 { return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
78 foreach_avx512_vec512i foreach_avx512_vec512u
82 static_always_inline u32
83 u16x32_msb_mask (u16x32 v)
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
88 static_always_inline u32x16
89 u32x16_byte_swap (u32x16 v)
92 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
93 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
94 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
95 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
97 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
100 static_always_inline u16x32
101 u16x32_byte_swap (u16x32 v)
104 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
105 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
106 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
107 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
109 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
113 static_always_inline t f##_extract_lo (f v) \
115 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
117 static_always_inline t f##_extract_hi (f v) \
119 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
128 static_always_inline u32
129 u32x16_min_scalar (u32x16 v)
131 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
132 u32x16_extract_hi (v)));
135 static_always_inline u32x16
136 u32x16_insert_lo (u32x16 r, u32x8 v)
138 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
141 static_always_inline u32x16
142 u32x16_insert_hi (u32x16 r, u32x8 v)
144 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
147 static_always_inline u64x8
148 u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
150 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
155 #define u32x16_ternary_logic(a, b, c, d) \
156 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
158 #define u8x64_insert_u8x16(a, b, n) \
159 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
161 #define u8x64_extract_u8x16(a, n) \
162 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
164 #define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
165 #define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
167 static_always_inline u8x64
168 u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
170 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
174 static_always_inline u8x64
175 u8x64_reflect_u8x16 (u8x64 x)
177 static const u8x64 mask = {
178 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
179 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
180 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
181 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
183 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
186 static_always_inline u8x64
187 u8x64_shuffle (u8x64 v, u8x64 m)
189 return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
192 #define u8x64_align_right(a, b, imm) \
193 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
195 static_always_inline u32
196 u32x16_sum_elts (u32x16 sum16)
199 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
200 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
201 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
202 return sum8[0] + sum8[4];
205 #define _(t, m, p, i, e) \
206 static_always_inline t t##_mask_load (t a, void *p, m mask) \
208 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
210 static_always_inline t t##_mask_load_zero (void *p, m mask) \
212 return (t) p##_maskz_loadu_##e (mask, p); \
214 static_always_inline void t##_mask_store (t a, void *p, m mask) \
216 p##_mask_storeu_##e (p, mask, (i) a); \
219 _ (u8x64, u64, _mm512, __m512i, epi8)
220 _ (u8x32, u32, _mm256, __m256i, epi8)
221 _ (u8x16, u16, _mm, __m128i, epi8)
222 _ (u16x32, u32, _mm512, __m512i, epi16)
223 _ (u16x16, u16, _mm256, __m256i, epi16)
224 _ (u16x8, u8, _mm, __m128i, epi16)
225 _ (u32x16, u16, _mm512, __m512i, epi32)
226 _ (u32x8, u8, _mm256, __m256i, epi32)
227 _ (u32x4, u8, _mm, __m128i, epi32)
228 _ (u64x8, u8, _mm512, __m512i, epi64)
229 _ (u64x4, u8, _mm256, __m256i, epi64)
230 _ (u64x2, u8, _mm, __m128i, epi64)
233 #ifdef CLIB_HAVE_VEC512
234 #define CLIB_HAVE_VEC512_MASK_LOAD_STORE
236 #ifdef CLIB_HAVE_VEC256
237 #define CLIB_HAVE_VEC256_MASK_LOAD_STORE
239 #ifdef CLIB_HAVE_VEC128
240 #define CLIB_HAVE_VEC128_MASK_LOAD_STORE
243 static_always_inline u8x64
244 u8x64_splat_u8x16 (u8x16 a)
246 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
249 static_always_inline u32x16
250 u32x16_splat_u32x4 (u32x4 a)
252 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
255 static_always_inline u32x16
256 u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
258 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
261 static_always_inline u8x64
262 u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
264 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
267 #define _(t, m, e, p, it) \
268 static_always_inline m t##_is_equal_mask (t a, t b) \
270 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
272 _ (u8x16, u16, epu8, _mm, __m128i)
273 _ (u16x8, u8, epu16, _mm, __m128i)
274 _ (u32x4, u8, epu32, _mm, __m128i)
275 _ (u64x2, u8, epu64, _mm, __m128i)
277 _ (u8x32, u32, epu8, _mm256, __m256i)
278 _ (u16x16, u16, epu16, _mm256, __m256i)
279 _ (u32x8, u8, epu32, _mm256, __m256i)
280 _ (u64x4, u8, epu64, _mm256, __m256i)
282 _ (u8x64, u64, epu8, _mm512, __m512i)
283 _ (u16x32, u32, epu16, _mm512, __m512i)
284 _ (u32x16, u16, epu32, _mm512, __m512i)
285 _ (u64x8, u8, epu64, _mm512, __m512i)
288 #define _(f, t, fn, it) \
289 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
290 _ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
291 _ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
292 _ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
293 _ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
296 #define _(vt, mt, p, it, epi) \
297 static_always_inline vt vt##_compress (vt a, mt mask) \
299 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
301 static_always_inline vt vt##_expand (vt a, mt mask) \
303 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
305 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
307 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
310 _ (u64x8, u8, _mm512, __m512i, epi64)
311 _ (u32x16, u16, _mm512, __m512i, epi32)
312 _ (u64x4, u8, _mm256, __m256i, epi64)
313 _ (u32x8, u8, _mm256, __m256i, epi32)
314 _ (u64x2, u8, _mm, __m128i, epi64)
315 _ (u32x4, u8, _mm, __m128i, epi32)
316 #ifdef __AVX512VBMI2__
317 _ (u16x32, u32, _mm512, __m512i, epi16)
318 _ (u8x64, u64, _mm512, __m512i, epi8)
319 _ (u16x16, u16, _mm256, __m256i, epi16)
320 _ (u8x32, u32, _mm256, __m256i, epi8)
321 _ (u16x8, u8, _mm, __m128i, epi16)
322 _ (u8x16, u16, _mm, __m128i, epi8)
326 #define CLIB_HAVE_VEC256_COMPRESS
327 #define CLIB_HAVE_VEC512_COMPRESS
329 #ifndef __AVX512VBMI2__
330 static_always_inline u16x16
331 u16x16_compress (u16x16 v, u16 mask)
333 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
336 static_always_inline u16x8
337 u16x8_compress (u16x8 v, u8 mask)
339 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
343 static_always_inline void
344 u32x16_transpose (u32x16 m[16])
346 __m512i r[16], a, b, c, d, x, y;
349 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
350 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
351 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
352 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
355 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
356 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
357 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
358 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
359 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
360 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
361 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
362 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
364 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
365 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
366 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
367 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
368 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
369 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
370 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
371 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
373 a = _mm512_unpacklo_epi64 (r[0], r[1]);
374 b = _mm512_unpacklo_epi64 (r[2], r[3]);
375 c = _mm512_unpacklo_epi64 (r[4], r[5]);
376 d = _mm512_unpacklo_epi64 (r[6], r[7]);
377 x = _mm512_permutex2var_epi64 (a, pm1, b);
378 y = _mm512_permutex2var_epi64 (c, pm1, d);
379 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
380 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
381 x = _mm512_permutex2var_epi64 (a, pm2, b);
382 y = _mm512_permutex2var_epi64 (c, pm2, d);
383 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
384 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
386 a = _mm512_unpacklo_epi64 (r[8], r[9]);
387 b = _mm512_unpacklo_epi64 (r[10], r[11]);
388 c = _mm512_unpacklo_epi64 (r[12], r[13]);
389 d = _mm512_unpacklo_epi64 (r[14], r[15]);
390 x = _mm512_permutex2var_epi64 (a, pm1, b);
391 y = _mm512_permutex2var_epi64 (c, pm1, d);
392 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
393 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
394 x = _mm512_permutex2var_epi64 (a, pm2, b);
395 y = _mm512_permutex2var_epi64 (c, pm2, d);
396 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
397 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
399 a = _mm512_unpackhi_epi64 (r[0], r[1]);
400 b = _mm512_unpackhi_epi64 (r[2], r[3]);
401 c = _mm512_unpackhi_epi64 (r[4], r[5]);
402 d = _mm512_unpackhi_epi64 (r[6], r[7]);
403 x = _mm512_permutex2var_epi64 (a, pm1, b);
404 y = _mm512_permutex2var_epi64 (c, pm1, d);
405 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
406 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
407 x = _mm512_permutex2var_epi64 (a, pm2, b);
408 y = _mm512_permutex2var_epi64 (c, pm2, d);
409 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
410 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
412 a = _mm512_unpackhi_epi64 (r[8], r[9]);
413 b = _mm512_unpackhi_epi64 (r[10], r[11]);
414 c = _mm512_unpackhi_epi64 (r[12], r[13]);
415 d = _mm512_unpackhi_epi64 (r[14], r[15]);
416 x = _mm512_permutex2var_epi64 (a, pm1, b);
417 y = _mm512_permutex2var_epi64 (c, pm1, d);
418 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
419 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
420 x = _mm512_permutex2var_epi64 (a, pm2, b);
421 y = _mm512_permutex2var_epi64 (c, pm2, d);
422 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
423 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
428 static_always_inline void
429 u64x8_transpose (u64x8 m[8])
434 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
435 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
436 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
437 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
440 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
441 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
442 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
443 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
444 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
445 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
446 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
447 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
449 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
450 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
451 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
452 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
453 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
454 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
455 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
456 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
458 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
459 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
460 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
461 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
462 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
463 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
464 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
465 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
468 #endif /* included_vector_avx512_h */
470 * fd.io coding-style-patch-verification: ON
473 * eval: (c-set-style "gnu")