2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx512_h
17 #define included_vector_avx512_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25 #define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27 #define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
31 is_all_equal, is_zero_mask */
32 #define _(t, s, c, i) \
33 static_always_inline t##s##x##c t##s##x##c##_splat (t##s x) \
35 return (t##s##x##c) _mm512_set1_##i (x); \
38 static_always_inline t##s##x##c t##s##x##c##_load_aligned (void *p) \
40 return (t##s##x##c) _mm512_load_si512 (p); \
43 static_always_inline void t##s##x##c##_store_aligned (t##s##x##c v, \
46 _mm512_store_si512 ((__m512i *) p, (__m512i) v); \
49 static_always_inline t##s##x##c t##s##x##c##_load_unaligned (void *p) \
51 return (t##s##x##c) _mm512_loadu_si512 (p); \
54 static_always_inline void t##s##x##c##_store_unaligned (t##s##x##c v, \
57 _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); \
60 static_always_inline int t##s##x##c##_is_all_zero (t##s##x##c v) \
62 return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); \
65 static_always_inline int t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
67 return (_mm512_cmpneq_epi64_mask ((__m512i) a, (__m512i) b) == 0); \
70 static_always_inline int t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
72 return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); \
75 static_always_inline u##c t##s##x##c##_is_zero_mask (t##s##x##c v) \
77 return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); \
80 static_always_inline t##s##x##c t##s##x##c##_interleave_lo (t##s##x##c a, \
83 return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); \
86 static_always_inline t##s##x##c t##s##x##c##_interleave_hi (t##s##x##c a, \
89 return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); \
92 foreach_avx512_vec512i foreach_avx512_vec512u
96 static_always_inline u32
97 u16x32_msb_mask (u16x32 v)
99 return (u32) _mm512_movepi16_mask ((__m512i) v);
103 #define _(f, t, fn) \
104 always_inline t t##_pack (f lo, f hi) \
106 return (t) fn ((__m512i) lo, (__m512i) hi); \
109 _ (i16x32, i8x64, _mm512_packs_epi16)
110 _ (i16x32, u8x64, _mm512_packus_epi16)
111 _ (i32x16, i16x32, _mm512_packs_epi32)
112 _ (i32x16, u16x32, _mm512_packus_epi32)
115 static_always_inline u64x8
116 u64x8_byte_swap (u64x8 v)
119 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
120 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
121 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
122 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
124 return (u64x8) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
127 static_always_inline u32x16
128 u32x16_byte_swap (u32x16 v)
131 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
132 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
133 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
134 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
136 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
139 static_always_inline u16x32
140 u16x32_byte_swap (u16x32 v)
143 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
144 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
145 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
146 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
148 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
152 static_always_inline t f##_extract_lo (f v) \
154 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
156 static_always_inline t f##_extract_hi (f v) \
158 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
167 static_always_inline u32
168 u32x16_min_scalar (u32x16 v)
170 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
171 u32x16_extract_hi (v)));
174 static_always_inline u32x16
175 u32x16_insert_lo (u32x16 r, u32x8 v)
177 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
180 static_always_inline u32x16
181 u32x16_insert_hi (u32x16 r, u32x8 v)
183 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
186 static_always_inline u64x8
187 u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
189 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
194 #define u32x16_ternary_logic(a, b, c, d) \
195 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
197 #define u8x64_insert_u8x16(a, b, n) \
198 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
200 #define u8x64_extract_u8x16(a, n) \
201 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
203 #define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
204 #define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
206 static_always_inline u8x64
207 u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
209 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
213 static_always_inline u64x8
214 u64x8_xor3 (u64x8 a, u64x8 b, u64x8 c)
216 return (u64x8) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
220 static_always_inline u8x64
221 u8x64_reflect_u8x16 (u8x64 x)
223 static const u8x64 mask = {
224 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
225 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
226 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
227 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
229 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
232 #define u8x64_align_right(a, b, imm) \
233 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
235 #define u64x8_align_right(a, b, imm) \
236 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
238 static_always_inline u32
239 u32x16_sum_elts (u32x16 sum16)
242 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
243 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
244 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
245 return sum8[0] + sum8[4];
248 #define _(t, m, p, i, e) \
249 static_always_inline t t##_mask_load (t a, void *p, m mask) \
251 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
253 static_always_inline t t##_mask_load_zero (void *p, m mask) \
255 return (t) p##_maskz_loadu_##e (mask, p); \
257 static_always_inline void t##_mask_store (t a, void *p, m mask) \
259 p##_mask_storeu_##e (p, mask, (i) a); \
262 _ (u8x64, u64, _mm512, __m512i, epi8)
263 _ (u8x32, u32, _mm256, __m256i, epi8)
264 _ (u8x16, u16, _mm, __m128i, epi8)
265 _ (u16x32, u32, _mm512, __m512i, epi16)
266 _ (u16x16, u16, _mm256, __m256i, epi16)
267 _ (u16x8, u8, _mm, __m128i, epi16)
268 _ (u32x16, u16, _mm512, __m512i, epi32)
269 _ (u32x8, u8, _mm256, __m256i, epi32)
270 _ (u32x4, u8, _mm, __m128i, epi32)
271 _ (u64x8, u8, _mm512, __m512i, epi64)
272 _ (u64x4, u8, _mm256, __m256i, epi64)
273 _ (u64x2, u8, _mm, __m128i, epi64)
276 #define _(t, m, p, i, e) \
277 static_always_inline t t##_mask_and (t a, t b, m mask) \
279 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
281 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
283 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
285 static_always_inline t t##_mask_xor (t a, t b, m mask) \
287 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
289 static_always_inline t t##_mask_or (t a, t b, m mask) \
291 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
293 _ (u32x16, u16, _mm512, __m512i, epi32)
294 _ (u32x8, u8, _mm256, __m256i, epi32)
295 _ (u32x4, u8, _mm, __m128i, epi32)
296 _ (u64x8, u8, _mm512, __m512i, epi64)
297 _ (u64x4, u8, _mm256, __m256i, epi64)
298 _ (u64x2, u8, _mm, __m128i, epi64)
301 #ifdef CLIB_HAVE_VEC512
302 #define CLIB_HAVE_VEC512_MASK_LOAD_STORE
303 #define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
305 #ifdef CLIB_HAVE_VEC256
306 #define CLIB_HAVE_VEC256_MASK_LOAD_STORE
307 #define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
309 #ifdef CLIB_HAVE_VEC128
310 #define CLIB_HAVE_VEC128_MASK_LOAD_STORE
311 #define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
314 static_always_inline u8x64
315 u8x64_splat_u8x16 (u8x16 a)
317 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
320 static_always_inline u32x16
321 u32x16_splat_u32x4 (u32x4 a)
323 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
326 static_always_inline u32x16
327 u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
329 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
332 static_always_inline u8x64
333 u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
335 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
338 static_always_inline u8x64
339 u8x64_permute (u8x64 idx, u8x64 a)
341 return (u8x64) _mm512_permutexvar_epi8 ((__m512i) idx, (__m512i) a);
344 static_always_inline u8x64
345 u8x64_permute2 (u8x64 idx, u8x64 a, u8x64 b)
347 return (u8x64) _mm512_permutex2var_epi8 ((__m512i) a, (__m512i) idx,
351 #define _(t, m, e, p, it) \
352 static_always_inline m t##_is_equal_mask (t a, t b) \
354 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
356 _ (u8x16, u16, epu8, _mm, __m128i)
357 _ (u16x8, u8, epu16, _mm, __m128i)
358 _ (u32x4, u8, epu32, _mm, __m128i)
359 _ (u64x2, u8, epu64, _mm, __m128i)
361 _ (u8x32, u32, epu8, _mm256, __m256i)
362 _ (u16x16, u16, epu16, _mm256, __m256i)
363 _ (u32x8, u8, epu32, _mm256, __m256i)
364 _ (u64x4, u8, epu64, _mm256, __m256i)
366 _ (u8x64, u64, epu8, _mm512, __m512i)
367 _ (u16x32, u32, epu16, _mm512, __m512i)
368 _ (u32x16, u16, epu32, _mm512, __m512i)
369 _ (u64x8, u8, epu64, _mm512, __m512i)
372 #define _(t, m, e, p, it) \
373 static_always_inline m t##_is_not_equal_mask (t a, t b) \
375 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
377 _ (u8x16, u16, epu8, _mm, __m128i)
378 _ (u16x8, u8, epu16, _mm, __m128i)
379 _ (u32x4, u8, epu32, _mm, __m128i)
380 _ (u64x2, u8, epu64, _mm, __m128i)
382 _ (u8x32, u32, epu8, _mm256, __m256i)
383 _ (u16x16, u16, epu16, _mm256, __m256i)
384 _ (u32x8, u8, epu32, _mm256, __m256i)
385 _ (u64x4, u8, epu64, _mm256, __m256i)
387 _ (u8x64, u64, epu8, _mm512, __m512i)
388 _ (u16x32, u32, epu16, _mm512, __m512i)
389 _ (u32x16, u16, epu32, _mm512, __m512i)
390 _ (u64x8, u8, epu64, _mm512, __m512i)
393 #define _(f, t, fn, it) \
394 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
395 _ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
396 _ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
397 _ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
398 _ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
401 #define _(vt, mt, p, it, epi) \
402 static_always_inline vt vt##_compress (vt a, mt mask) \
404 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
406 static_always_inline vt vt##_expand (vt a, mt mask) \
408 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
410 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
412 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
415 _ (u64x8, u8, _mm512, __m512i, epi64)
416 _ (u32x16, u16, _mm512, __m512i, epi32)
417 _ (u64x4, u8, _mm256, __m256i, epi64)
418 _ (u32x8, u8, _mm256, __m256i, epi32)
419 _ (u64x2, u8, _mm, __m128i, epi64)
420 _ (u32x4, u8, _mm, __m128i, epi32)
421 #ifdef __AVX512VBMI2__
422 _ (u16x32, u32, _mm512, __m512i, epi16)
423 _ (u8x64, u64, _mm512, __m512i, epi8)
424 _ (u16x16, u16, _mm256, __m256i, epi16)
425 _ (u8x32, u32, _mm256, __m256i, epi8)
426 _ (u16x8, u8, _mm, __m128i, epi16)
427 _ (u8x16, u16, _mm, __m128i, epi8)
431 #ifdef CLIB_HAVE_VEC256
432 #define CLIB_HAVE_VEC256_COMPRESS
433 #ifdef __AVX512VBMI2__
434 #define CLIB_HAVE_VEC256_COMPRESS_U8_U16
438 #ifdef CLIB_HAVE_VEC512
439 #define CLIB_HAVE_VEC512_COMPRESS
440 #ifdef __AVX512VBMI2__
441 #define CLIB_HAVE_VEC512_COMPRESS_U8_U16
446 #ifndef __AVX512VBMI2__
447 static_always_inline u16x16
448 u16x16_compress (u16x16 v, u16 mask)
450 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
453 static_always_inline u16x8
454 u16x8_compress (u16x8 v, u8 mask)
456 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
460 static_always_inline u64
463 v ^= u64x8_align_right (v, v, 4);
464 v ^= u64x8_align_right (v, v, 2);
468 static_always_inline void
469 u32x16_transpose (u32x16 m[16])
471 __m512i r[16], a, b, c, d, x, y;
474 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
475 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
476 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
477 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
480 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
481 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
482 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
483 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
484 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
485 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
486 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
487 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
489 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
490 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
491 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
492 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
493 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
494 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
495 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
496 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
498 a = _mm512_unpacklo_epi64 (r[0], r[1]);
499 b = _mm512_unpacklo_epi64 (r[2], r[3]);
500 c = _mm512_unpacklo_epi64 (r[4], r[5]);
501 d = _mm512_unpacklo_epi64 (r[6], r[7]);
502 x = _mm512_permutex2var_epi64 (a, pm1, b);
503 y = _mm512_permutex2var_epi64 (c, pm1, d);
504 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
505 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
506 x = _mm512_permutex2var_epi64 (a, pm2, b);
507 y = _mm512_permutex2var_epi64 (c, pm2, d);
508 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
509 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
511 a = _mm512_unpacklo_epi64 (r[8], r[9]);
512 b = _mm512_unpacklo_epi64 (r[10], r[11]);
513 c = _mm512_unpacklo_epi64 (r[12], r[13]);
514 d = _mm512_unpacklo_epi64 (r[14], r[15]);
515 x = _mm512_permutex2var_epi64 (a, pm1, b);
516 y = _mm512_permutex2var_epi64 (c, pm1, d);
517 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
518 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
519 x = _mm512_permutex2var_epi64 (a, pm2, b);
520 y = _mm512_permutex2var_epi64 (c, pm2, d);
521 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
522 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
524 a = _mm512_unpackhi_epi64 (r[0], r[1]);
525 b = _mm512_unpackhi_epi64 (r[2], r[3]);
526 c = _mm512_unpackhi_epi64 (r[4], r[5]);
527 d = _mm512_unpackhi_epi64 (r[6], r[7]);
528 x = _mm512_permutex2var_epi64 (a, pm1, b);
529 y = _mm512_permutex2var_epi64 (c, pm1, d);
530 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
531 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
532 x = _mm512_permutex2var_epi64 (a, pm2, b);
533 y = _mm512_permutex2var_epi64 (c, pm2, d);
534 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
535 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
537 a = _mm512_unpackhi_epi64 (r[8], r[9]);
538 b = _mm512_unpackhi_epi64 (r[10], r[11]);
539 c = _mm512_unpackhi_epi64 (r[12], r[13]);
540 d = _mm512_unpackhi_epi64 (r[14], r[15]);
541 x = _mm512_permutex2var_epi64 (a, pm1, b);
542 y = _mm512_permutex2var_epi64 (c, pm1, d);
543 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
544 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
545 x = _mm512_permutex2var_epi64 (a, pm2, b);
546 y = _mm512_permutex2var_epi64 (c, pm2, d);
547 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
548 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
553 static_always_inline void
554 u64x8_transpose (u64x8 m[8])
559 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
560 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
561 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
562 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
565 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
566 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
567 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
568 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
569 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
570 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
571 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
572 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
574 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
575 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
576 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
577 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
578 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
579 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
580 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
581 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
583 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
584 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
585 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
586 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
587 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
588 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
589 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
590 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
593 #endif /* included_vector_avx512_h */
595 * fd.io coding-style-patch-verification: ON
598 * eval: (c-set-style "gnu")