2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx512_h
17 #define included_vector_avx512_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25 #define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27 #define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
31 is_all_equal, is_zero_mask */
32 #define _(t, s, c, i) \
33 static_always_inline t##s##x##c \
34 t##s##x##c##_splat (t##s x) \
35 { return (t##s##x##c) _mm512_set1_##i (x); } \
37 static_always_inline t##s##x##c \
38 t##s##x##c##_load_aligned (void *p) \
39 { return (t##s##x##c) _mm512_load_si512 (p); } \
41 static_always_inline void \
42 t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43 { _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
45 static_always_inline t##s##x##c \
46 t##s##x##c##_load_unaligned (void *p) \
47 { return (t##s##x##c) _mm512_loadu_si512 (p); } \
49 static_always_inline void \
50 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51 { _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
53 static_always_inline int \
54 t##s##x##c##_is_all_zero (t##s##x##c v) \
55 { return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
57 static_always_inline int \
58 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
59 { return t##s##x##c##_is_all_zero (a ^ b); } \
61 static_always_inline int \
62 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
65 static_always_inline u##c \
66 t##s##x##c##_is_zero_mask (t##s##x##c v) \
67 { return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
69 static_always_inline t##s##x##c \
70 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71 { return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
73 static_always_inline t##s##x##c \
74 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75 { return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
78 foreach_avx512_vec512i foreach_avx512_vec512u
82 static_always_inline u32
83 u16x32_msb_mask (u16x32 v)
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
90 always_inline t t##_pack (f lo, f hi) \
92 return (t) fn ((__m512i) lo, (__m512i) hi); \
95 _ (i16x32, i8x64, _mm512_packs_epi16)
96 _ (i16x32, u8x64, _mm512_packus_epi16)
97 _ (i32x16, i16x32, _mm512_packs_epi32)
98 _ (i32x16, u16x32, _mm512_packus_epi32)
101 static_always_inline u64x8
102 u64x8_byte_swap (u64x8 v)
105 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
106 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
107 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
108 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
110 return (u64x8) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
113 static_always_inline u32x16
114 u32x16_byte_swap (u32x16 v)
117 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
118 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
119 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
120 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
122 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
125 static_always_inline u16x32
126 u16x32_byte_swap (u16x32 v)
129 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
130 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
131 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
132 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
134 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
138 static_always_inline t f##_extract_lo (f v) \
140 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
142 static_always_inline t f##_extract_hi (f v) \
144 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
153 static_always_inline u32
154 u32x16_min_scalar (u32x16 v)
156 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
157 u32x16_extract_hi (v)));
160 static_always_inline u32x16
161 u32x16_insert_lo (u32x16 r, u32x8 v)
163 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
166 static_always_inline u32x16
167 u32x16_insert_hi (u32x16 r, u32x8 v)
169 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
172 static_always_inline u64x8
173 u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
175 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
180 #define u32x16_ternary_logic(a, b, c, d) \
181 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
183 #define u8x64_insert_u8x16(a, b, n) \
184 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
186 #define u8x64_extract_u8x16(a, n) \
187 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
189 #define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
190 #define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
192 static_always_inline u8x64
193 u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
195 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
199 static_always_inline u64x8
200 u64x8_xor3 (u64x8 a, u64x8 b, u64x8 c)
202 return (u64x8) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
206 static_always_inline u8x64
207 u8x64_reflect_u8x16 (u8x64 x)
209 static const u8x64 mask = {
210 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
211 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
212 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
213 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
215 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
218 #define u8x64_align_right(a, b, imm) \
219 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
221 #define u64x8_align_right(a, b, imm) \
222 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
224 static_always_inline u32
225 u32x16_sum_elts (u32x16 sum16)
228 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
229 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
230 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
231 return sum8[0] + sum8[4];
234 #define _(t, m, p, i, e) \
235 static_always_inline t t##_mask_load (t a, void *p, m mask) \
237 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
239 static_always_inline t t##_mask_load_zero (void *p, m mask) \
241 return (t) p##_maskz_loadu_##e (mask, p); \
243 static_always_inline void t##_mask_store (t a, void *p, m mask) \
245 p##_mask_storeu_##e (p, mask, (i) a); \
248 _ (u8x64, u64, _mm512, __m512i, epi8)
249 _ (u8x32, u32, _mm256, __m256i, epi8)
250 _ (u8x16, u16, _mm, __m128i, epi8)
251 _ (u16x32, u32, _mm512, __m512i, epi16)
252 _ (u16x16, u16, _mm256, __m256i, epi16)
253 _ (u16x8, u8, _mm, __m128i, epi16)
254 _ (u32x16, u16, _mm512, __m512i, epi32)
255 _ (u32x8, u8, _mm256, __m256i, epi32)
256 _ (u32x4, u8, _mm, __m128i, epi32)
257 _ (u64x8, u8, _mm512, __m512i, epi64)
258 _ (u64x4, u8, _mm256, __m256i, epi64)
259 _ (u64x2, u8, _mm, __m128i, epi64)
262 #define _(t, m, p, i, e) \
263 static_always_inline t t##_mask_and (t a, t b, m mask) \
265 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
267 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
269 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
271 static_always_inline t t##_mask_xor (t a, t b, m mask) \
273 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
275 static_always_inline t t##_mask_or (t a, t b, m mask) \
277 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
279 _ (u32x16, u16, _mm512, __m512i, epi32)
280 _ (u32x8, u8, _mm256, __m256i, epi32)
281 _ (u32x4, u8, _mm, __m128i, epi32)
282 _ (u64x8, u8, _mm512, __m512i, epi64)
283 _ (u64x4, u8, _mm256, __m256i, epi64)
284 _ (u64x2, u8, _mm, __m128i, epi64)
287 #ifdef CLIB_HAVE_VEC512
288 #define CLIB_HAVE_VEC512_MASK_LOAD_STORE
289 #define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
291 #ifdef CLIB_HAVE_VEC256
292 #define CLIB_HAVE_VEC256_MASK_LOAD_STORE
293 #define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
295 #ifdef CLIB_HAVE_VEC128
296 #define CLIB_HAVE_VEC128_MASK_LOAD_STORE
297 #define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
300 static_always_inline u8x64
301 u8x64_splat_u8x16 (u8x16 a)
303 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
306 static_always_inline u32x16
307 u32x16_splat_u32x4 (u32x4 a)
309 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
312 static_always_inline u32x16
313 u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
315 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
318 static_always_inline u8x64
319 u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
321 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
324 static_always_inline u8x64
325 u8x64_permute (u8x64 idx, u8x64 a)
327 return (u8x64) _mm512_permutexvar_epi8 ((__m512i) idx, (__m512i) a);
330 static_always_inline u8x64
331 u8x64_permute2 (u8x64 idx, u8x64 a, u8x64 b)
333 return (u8x64) _mm512_permutex2var_epi8 ((__m512i) a, (__m512i) idx,
337 #define _(t, m, e, p, it) \
338 static_always_inline m t##_is_equal_mask (t a, t b) \
340 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
342 _ (u8x16, u16, epu8, _mm, __m128i)
343 _ (u16x8, u8, epu16, _mm, __m128i)
344 _ (u32x4, u8, epu32, _mm, __m128i)
345 _ (u64x2, u8, epu64, _mm, __m128i)
347 _ (u8x32, u32, epu8, _mm256, __m256i)
348 _ (u16x16, u16, epu16, _mm256, __m256i)
349 _ (u32x8, u8, epu32, _mm256, __m256i)
350 _ (u64x4, u8, epu64, _mm256, __m256i)
352 _ (u8x64, u64, epu8, _mm512, __m512i)
353 _ (u16x32, u32, epu16, _mm512, __m512i)
354 _ (u32x16, u16, epu32, _mm512, __m512i)
355 _ (u64x8, u8, epu64, _mm512, __m512i)
358 #define _(t, m, e, p, it) \
359 static_always_inline m t##_is_not_equal_mask (t a, t b) \
361 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
363 _ (u8x16, u16, epu8, _mm, __m128i)
364 _ (u16x8, u8, epu16, _mm, __m128i)
365 _ (u32x4, u8, epu32, _mm, __m128i)
366 _ (u64x2, u8, epu64, _mm, __m128i)
368 _ (u8x32, u32, epu8, _mm256, __m256i)
369 _ (u16x16, u16, epu16, _mm256, __m256i)
370 _ (u32x8, u8, epu32, _mm256, __m256i)
371 _ (u64x4, u8, epu64, _mm256, __m256i)
373 _ (u8x64, u64, epu8, _mm512, __m512i)
374 _ (u16x32, u32, epu16, _mm512, __m512i)
375 _ (u32x16, u16, epu32, _mm512, __m512i)
376 _ (u64x8, u8, epu64, _mm512, __m512i)
379 #define _(f, t, fn, it) \
380 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
381 _ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
382 _ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
383 _ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
384 _ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
387 #define _(vt, mt, p, it, epi) \
388 static_always_inline vt vt##_compress (vt a, mt mask) \
390 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
392 static_always_inline vt vt##_expand (vt a, mt mask) \
394 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
396 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
398 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
401 _ (u64x8, u8, _mm512, __m512i, epi64)
402 _ (u32x16, u16, _mm512, __m512i, epi32)
403 _ (u64x4, u8, _mm256, __m256i, epi64)
404 _ (u32x8, u8, _mm256, __m256i, epi32)
405 _ (u64x2, u8, _mm, __m128i, epi64)
406 _ (u32x4, u8, _mm, __m128i, epi32)
407 #ifdef __AVX512VBMI2__
408 _ (u16x32, u32, _mm512, __m512i, epi16)
409 _ (u8x64, u64, _mm512, __m512i, epi8)
410 _ (u16x16, u16, _mm256, __m256i, epi16)
411 _ (u8x32, u32, _mm256, __m256i, epi8)
412 _ (u16x8, u8, _mm, __m128i, epi16)
413 _ (u8x16, u16, _mm, __m128i, epi8)
417 #ifdef CLIB_HAVE_VEC256
418 #define CLIB_HAVE_VEC256_COMPRESS
419 #ifdef __AVX512VBMI2__
420 #define CLIB_HAVE_VEC256_COMPRESS_U8_U16
424 #ifdef CLIB_HAVE_VEC512
425 #define CLIB_HAVE_VEC512_COMPRESS
426 #ifdef __AVX512VBMI2__
427 #define CLIB_HAVE_VEC512_COMPRESS_U8_U16
432 #ifndef __AVX512VBMI2__
433 static_always_inline u16x16
434 u16x16_compress (u16x16 v, u16 mask)
436 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
439 static_always_inline u16x8
440 u16x8_compress (u16x8 v, u8 mask)
442 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
446 static_always_inline u64
449 v ^= u64x8_align_right (v, v, 4);
450 v ^= u64x8_align_right (v, v, 2);
454 static_always_inline void
455 u32x16_transpose (u32x16 m[16])
457 __m512i r[16], a, b, c, d, x, y;
460 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
461 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
462 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
463 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
466 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
467 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
468 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
469 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
470 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
471 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
472 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
473 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
475 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
476 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
477 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
478 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
479 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
480 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
481 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
482 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
484 a = _mm512_unpacklo_epi64 (r[0], r[1]);
485 b = _mm512_unpacklo_epi64 (r[2], r[3]);
486 c = _mm512_unpacklo_epi64 (r[4], r[5]);
487 d = _mm512_unpacklo_epi64 (r[6], r[7]);
488 x = _mm512_permutex2var_epi64 (a, pm1, b);
489 y = _mm512_permutex2var_epi64 (c, pm1, d);
490 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
491 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
492 x = _mm512_permutex2var_epi64 (a, pm2, b);
493 y = _mm512_permutex2var_epi64 (c, pm2, d);
494 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
495 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
497 a = _mm512_unpacklo_epi64 (r[8], r[9]);
498 b = _mm512_unpacklo_epi64 (r[10], r[11]);
499 c = _mm512_unpacklo_epi64 (r[12], r[13]);
500 d = _mm512_unpacklo_epi64 (r[14], r[15]);
501 x = _mm512_permutex2var_epi64 (a, pm1, b);
502 y = _mm512_permutex2var_epi64 (c, pm1, d);
503 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
504 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
505 x = _mm512_permutex2var_epi64 (a, pm2, b);
506 y = _mm512_permutex2var_epi64 (c, pm2, d);
507 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
508 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
510 a = _mm512_unpackhi_epi64 (r[0], r[1]);
511 b = _mm512_unpackhi_epi64 (r[2], r[3]);
512 c = _mm512_unpackhi_epi64 (r[4], r[5]);
513 d = _mm512_unpackhi_epi64 (r[6], r[7]);
514 x = _mm512_permutex2var_epi64 (a, pm1, b);
515 y = _mm512_permutex2var_epi64 (c, pm1, d);
516 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
517 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
518 x = _mm512_permutex2var_epi64 (a, pm2, b);
519 y = _mm512_permutex2var_epi64 (c, pm2, d);
520 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
521 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
523 a = _mm512_unpackhi_epi64 (r[8], r[9]);
524 b = _mm512_unpackhi_epi64 (r[10], r[11]);
525 c = _mm512_unpackhi_epi64 (r[12], r[13]);
526 d = _mm512_unpackhi_epi64 (r[14], r[15]);
527 x = _mm512_permutex2var_epi64 (a, pm1, b);
528 y = _mm512_permutex2var_epi64 (c, pm1, d);
529 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
530 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
531 x = _mm512_permutex2var_epi64 (a, pm2, b);
532 y = _mm512_permutex2var_epi64 (c, pm2, d);
533 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
534 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
539 static_always_inline void
540 u64x8_transpose (u64x8 m[8])
545 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
546 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
547 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
548 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
551 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
552 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
553 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
554 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
555 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
556 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
557 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
558 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
560 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
561 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
562 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
563 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
564 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
565 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
566 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
567 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
569 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
570 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
571 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
572 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
573 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
574 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
575 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
576 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
579 #endif /* included_vector_avx512_h */
581 * fd.io coding-style-patch-verification: ON
584 * eval: (c-set-style "gnu")