2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx512_h
17 #define included_vector_avx512_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25 #define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27 #define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
31 is_all_equal, is_zero_mask */
32 #define _(t, s, c, i) \
33 static_always_inline t##s##x##c \
34 t##s##x##c##_splat (t##s x) \
35 { return (t##s##x##c) _mm512_set1_##i (x); } \
37 static_always_inline t##s##x##c \
38 t##s##x##c##_load_aligned (void *p) \
39 { return (t##s##x##c) _mm512_load_si512 (p); } \
41 static_always_inline void \
42 t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43 { _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
45 static_always_inline t##s##x##c \
46 t##s##x##c##_load_unaligned (void *p) \
47 { return (t##s##x##c) _mm512_loadu_si512 (p); } \
49 static_always_inline void \
50 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51 { _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
53 static_always_inline int \
54 t##s##x##c##_is_all_zero (t##s##x##c v) \
55 { return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
57 static_always_inline int \
58 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
59 { return t##s##x##c##_is_all_zero (a ^ b); } \
61 static_always_inline int \
62 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
65 static_always_inline u##c \
66 t##s##x##c##_is_zero_mask (t##s##x##c v) \
67 { return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
69 static_always_inline t##s##x##c \
70 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71 { return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
73 static_always_inline t##s##x##c \
74 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75 { return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
78 foreach_avx512_vec512i foreach_avx512_vec512u
82 static_always_inline u32
83 u16x32_msb_mask (u16x32 v)
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
90 always_inline t t##_pack (f lo, f hi) \
92 return (t) fn ((__m512i) lo, (__m512i) hi); \
95 _ (i16x32, i8x64, _mm512_packs_epi16)
96 _ (i16x32, u8x64, _mm512_packus_epi16)
97 _ (i32x16, i16x32, _mm512_packs_epi32)
98 _ (i32x16, u16x32, _mm512_packus_epi32)
101 static_always_inline u64x8
102 u64x8_byte_swap (u64x8 v)
105 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
106 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
107 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
108 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
110 return (u64x8) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
113 static_always_inline u32x16
114 u32x16_byte_swap (u32x16 v)
117 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
118 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
119 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
120 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
122 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
125 static_always_inline u16x32
126 u16x32_byte_swap (u16x32 v)
129 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
130 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
131 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
132 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
134 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
138 static_always_inline t f##_extract_lo (f v) \
140 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
142 static_always_inline t f##_extract_hi (f v) \
144 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
153 static_always_inline u32
154 u32x16_min_scalar (u32x16 v)
156 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
157 u32x16_extract_hi (v)));
160 static_always_inline u32x16
161 u32x16_insert_lo (u32x16 r, u32x8 v)
163 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
166 static_always_inline u32x16
167 u32x16_insert_hi (u32x16 r, u32x8 v)
169 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
172 static_always_inline u64x8
173 u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
175 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
180 #define u32x16_ternary_logic(a, b, c, d) \
181 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
183 #define u8x64_insert_u8x16(a, b, n) \
184 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
186 #define u8x64_extract_u8x16(a, n) \
187 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
189 #define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
190 #define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
192 static_always_inline u8x64
193 u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
195 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
199 static_always_inline u64x8
200 u64x8_xor3 (u64x8 a, u64x8 b, u64x8 c)
202 return (u64x8) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
206 static_always_inline u8x64
207 u8x64_reflect_u8x16 (u8x64 x)
209 static const u8x64 mask = {
210 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
211 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
212 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
213 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
215 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
218 #define u8x64_align_right(a, b, imm) \
219 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
221 #define u64x8_align_right(a, b, imm) \
222 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
224 static_always_inline u32
225 u32x16_sum_elts (u32x16 sum16)
228 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
229 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
230 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
231 return sum8[0] + sum8[4];
234 #define _(t, m, p, i, e) \
235 static_always_inline t t##_mask_load (t a, void *p, m mask) \
237 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
239 static_always_inline t t##_mask_load_zero (void *p, m mask) \
241 return (t) p##_maskz_loadu_##e (mask, p); \
243 static_always_inline void t##_mask_store (t a, void *p, m mask) \
245 p##_mask_storeu_##e (p, mask, (i) a); \
248 _ (u8x64, u64, _mm512, __m512i, epi8)
249 _ (u8x32, u32, _mm256, __m256i, epi8)
250 _ (u8x16, u16, _mm, __m128i, epi8)
251 _ (u16x32, u32, _mm512, __m512i, epi16)
252 _ (u16x16, u16, _mm256, __m256i, epi16)
253 _ (u16x8, u8, _mm, __m128i, epi16)
254 _ (u32x16, u16, _mm512, __m512i, epi32)
255 _ (u32x8, u8, _mm256, __m256i, epi32)
256 _ (u32x4, u8, _mm, __m128i, epi32)
257 _ (u64x8, u8, _mm512, __m512i, epi64)
258 _ (u64x4, u8, _mm256, __m256i, epi64)
259 _ (u64x2, u8, _mm, __m128i, epi64)
262 #define _(t, m, p, i, e) \
263 static_always_inline t t##_mask_and (t a, t b, m mask) \
265 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
267 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
269 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
271 static_always_inline t t##_mask_xor (t a, t b, m mask) \
273 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
275 static_always_inline t t##_mask_or (t a, t b, m mask) \
277 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
279 _ (u32x16, u16, _mm512, __m512i, epi32)
280 _ (u32x8, u8, _mm256, __m256i, epi32)
281 _ (u32x4, u8, _mm, __m128i, epi32)
282 _ (u64x8, u8, _mm512, __m512i, epi64)
283 _ (u64x4, u8, _mm256, __m256i, epi64)
284 _ (u64x2, u8, _mm, __m128i, epi64)
287 #ifdef CLIB_HAVE_VEC512
288 #define CLIB_HAVE_VEC512_MASK_LOAD_STORE
289 #define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
291 #ifdef CLIB_HAVE_VEC256
292 #define CLIB_HAVE_VEC256_MASK_LOAD_STORE
293 #define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
295 #ifdef CLIB_HAVE_VEC128
296 #define CLIB_HAVE_VEC128_MASK_LOAD_STORE
297 #define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
300 static_always_inline u8x64
301 u8x64_splat_u8x16 (u8x16 a)
303 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
306 static_always_inline u32x16
307 u32x16_splat_u32x4 (u32x4 a)
309 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
312 static_always_inline u32x16
313 u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
315 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
318 static_always_inline u8x64
319 u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
321 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
324 static_always_inline u8x64
325 u8x64_permute (u8x64 v, u8x64 idx)
327 return (u8x64) _mm512_permutexvar_epi8 ((__m512i) v, (__m512i) idx);
330 #define _(t, m, e, p, it) \
331 static_always_inline m t##_is_equal_mask (t a, t b) \
333 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
335 _ (u8x16, u16, epu8, _mm, __m128i)
336 _ (u16x8, u8, epu16, _mm, __m128i)
337 _ (u32x4, u8, epu32, _mm, __m128i)
338 _ (u64x2, u8, epu64, _mm, __m128i)
340 _ (u8x32, u32, epu8, _mm256, __m256i)
341 _ (u16x16, u16, epu16, _mm256, __m256i)
342 _ (u32x8, u8, epu32, _mm256, __m256i)
343 _ (u64x4, u8, epu64, _mm256, __m256i)
345 _ (u8x64, u64, epu8, _mm512, __m512i)
346 _ (u16x32, u32, epu16, _mm512, __m512i)
347 _ (u32x16, u16, epu32, _mm512, __m512i)
348 _ (u64x8, u8, epu64, _mm512, __m512i)
351 #define _(t, m, e, p, it) \
352 static_always_inline m t##_is_not_equal_mask (t a, t b) \
354 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
356 _ (u8x16, u16, epu8, _mm, __m128i)
357 _ (u16x8, u8, epu16, _mm, __m128i)
358 _ (u32x4, u8, epu32, _mm, __m128i)
359 _ (u64x2, u8, epu64, _mm, __m128i)
361 _ (u8x32, u32, epu8, _mm256, __m256i)
362 _ (u16x16, u16, epu16, _mm256, __m256i)
363 _ (u32x8, u8, epu32, _mm256, __m256i)
364 _ (u64x4, u8, epu64, _mm256, __m256i)
366 _ (u8x64, u64, epu8, _mm512, __m512i)
367 _ (u16x32, u32, epu16, _mm512, __m512i)
368 _ (u32x16, u16, epu32, _mm512, __m512i)
369 _ (u64x8, u8, epu64, _mm512, __m512i)
372 #define _(f, t, fn, it) \
373 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
374 _ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
375 _ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
376 _ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
377 _ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
380 #define _(vt, mt, p, it, epi) \
381 static_always_inline vt vt##_compress (vt a, mt mask) \
383 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
385 static_always_inline vt vt##_expand (vt a, mt mask) \
387 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
389 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
391 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
394 _ (u64x8, u8, _mm512, __m512i, epi64)
395 _ (u32x16, u16, _mm512, __m512i, epi32)
396 _ (u64x4, u8, _mm256, __m256i, epi64)
397 _ (u32x8, u8, _mm256, __m256i, epi32)
398 _ (u64x2, u8, _mm, __m128i, epi64)
399 _ (u32x4, u8, _mm, __m128i, epi32)
400 #ifdef __AVX512VBMI2__
401 _ (u16x32, u32, _mm512, __m512i, epi16)
402 _ (u8x64, u64, _mm512, __m512i, epi8)
403 _ (u16x16, u16, _mm256, __m256i, epi16)
404 _ (u8x32, u32, _mm256, __m256i, epi8)
405 _ (u16x8, u8, _mm, __m128i, epi16)
406 _ (u8x16, u16, _mm, __m128i, epi8)
410 #ifdef CLIB_HAVE_VEC256
411 #define CLIB_HAVE_VEC256_COMPRESS
412 #ifdef __AVX512VBMI2__
413 #define CLIB_HAVE_VEC256_COMPRESS_U8_U16
417 #ifdef CLIB_HAVE_VEC512
418 #define CLIB_HAVE_VEC512_COMPRESS
419 #ifdef __AVX512VBMI2__
420 #define CLIB_HAVE_VEC512_COMPRESS_U8_U16
425 #ifndef __AVX512VBMI2__
426 static_always_inline u16x16
427 u16x16_compress (u16x16 v, u16 mask)
429 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
432 static_always_inline u16x8
433 u16x8_compress (u16x8 v, u8 mask)
435 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
439 static_always_inline void
440 u32x16_transpose (u32x16 m[16])
442 __m512i r[16], a, b, c, d, x, y;
445 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
446 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
447 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
448 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
451 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
452 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
453 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
454 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
455 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
456 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
457 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
458 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
460 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
461 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
462 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
463 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
464 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
465 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
466 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
467 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
469 a = _mm512_unpacklo_epi64 (r[0], r[1]);
470 b = _mm512_unpacklo_epi64 (r[2], r[3]);
471 c = _mm512_unpacklo_epi64 (r[4], r[5]);
472 d = _mm512_unpacklo_epi64 (r[6], r[7]);
473 x = _mm512_permutex2var_epi64 (a, pm1, b);
474 y = _mm512_permutex2var_epi64 (c, pm1, d);
475 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
476 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
477 x = _mm512_permutex2var_epi64 (a, pm2, b);
478 y = _mm512_permutex2var_epi64 (c, pm2, d);
479 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
480 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
482 a = _mm512_unpacklo_epi64 (r[8], r[9]);
483 b = _mm512_unpacklo_epi64 (r[10], r[11]);
484 c = _mm512_unpacklo_epi64 (r[12], r[13]);
485 d = _mm512_unpacklo_epi64 (r[14], r[15]);
486 x = _mm512_permutex2var_epi64 (a, pm1, b);
487 y = _mm512_permutex2var_epi64 (c, pm1, d);
488 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
489 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
490 x = _mm512_permutex2var_epi64 (a, pm2, b);
491 y = _mm512_permutex2var_epi64 (c, pm2, d);
492 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
493 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
495 a = _mm512_unpackhi_epi64 (r[0], r[1]);
496 b = _mm512_unpackhi_epi64 (r[2], r[3]);
497 c = _mm512_unpackhi_epi64 (r[4], r[5]);
498 d = _mm512_unpackhi_epi64 (r[6], r[7]);
499 x = _mm512_permutex2var_epi64 (a, pm1, b);
500 y = _mm512_permutex2var_epi64 (c, pm1, d);
501 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
502 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
503 x = _mm512_permutex2var_epi64 (a, pm2, b);
504 y = _mm512_permutex2var_epi64 (c, pm2, d);
505 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
506 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
508 a = _mm512_unpackhi_epi64 (r[8], r[9]);
509 b = _mm512_unpackhi_epi64 (r[10], r[11]);
510 c = _mm512_unpackhi_epi64 (r[12], r[13]);
511 d = _mm512_unpackhi_epi64 (r[14], r[15]);
512 x = _mm512_permutex2var_epi64 (a, pm1, b);
513 y = _mm512_permutex2var_epi64 (c, pm1, d);
514 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
515 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
516 x = _mm512_permutex2var_epi64 (a, pm2, b);
517 y = _mm512_permutex2var_epi64 (c, pm2, d);
518 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
519 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
524 static_always_inline void
525 u64x8_transpose (u64x8 m[8])
530 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
531 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
532 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
533 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
536 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
537 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
538 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
539 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
540 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
541 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
542 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
543 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
545 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
546 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
547 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
548 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
549 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
550 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
551 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
552 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
554 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
555 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
556 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
557 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
558 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
559 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
560 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
561 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
564 #endif /* included_vector_avx512_h */
566 * fd.io coding-style-patch-verification: ON
569 * eval: (c-set-style "gnu")