2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx512_h
17 #define included_vector_avx512_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25 #define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27 #define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
31 is_all_equal, is_zero_mask */
32 #define _(t, s, c, i) \
33 static_always_inline t##s##x##c \
34 t##s##x##c##_splat (t##s x) \
35 { return (t##s##x##c) _mm512_set1_##i (x); } \
37 static_always_inline t##s##x##c \
38 t##s##x##c##_load_aligned (void *p) \
39 { return (t##s##x##c) _mm512_load_si512 (p); } \
41 static_always_inline void \
42 t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43 { _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
45 static_always_inline t##s##x##c \
46 t##s##x##c##_load_unaligned (void *p) \
47 { return (t##s##x##c) _mm512_loadu_si512 (p); } \
49 static_always_inline void \
50 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51 { _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
53 static_always_inline int \
54 t##s##x##c##_is_all_zero (t##s##x##c v) \
55 { return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
57 static_always_inline int \
58 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
59 { return t##s##x##c##_is_all_zero (a ^ b); } \
61 static_always_inline int \
62 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
65 static_always_inline u##c \
66 t##s##x##c##_is_zero_mask (t##s##x##c v) \
67 { return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
69 static_always_inline t##s##x##c \
70 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71 { return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
73 static_always_inline t##s##x##c \
74 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75 { return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
78 foreach_avx512_vec512i foreach_avx512_vec512u
82 static_always_inline u32
83 u16x32_msb_mask (u16x32 v)
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
90 always_inline t t##_pack (f lo, f hi) \
92 return (t) fn ((__m512i) lo, (__m512i) hi); \
95 _ (i16x32, i8x64, _mm512_packs_epi16)
96 _ (i16x32, u8x64, _mm512_packus_epi16)
97 _ (i32x16, i16x32, _mm512_packs_epi32)
98 _ (i32x16, u16x32, _mm512_packus_epi32)
101 static_always_inline u32x16
102 u32x16_byte_swap (u32x16 v)
105 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
106 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
107 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
108 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
110 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
113 static_always_inline u16x32
114 u16x32_byte_swap (u16x32 v)
117 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
118 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
119 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
120 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
122 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
126 static_always_inline t f##_extract_lo (f v) \
128 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
130 static_always_inline t f##_extract_hi (f v) \
132 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
141 static_always_inline u32
142 u32x16_min_scalar (u32x16 v)
144 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
145 u32x16_extract_hi (v)));
148 static_always_inline u32x16
149 u32x16_insert_lo (u32x16 r, u32x8 v)
151 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
154 static_always_inline u32x16
155 u32x16_insert_hi (u32x16 r, u32x8 v)
157 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
160 static_always_inline u64x8
161 u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
163 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
168 #define u32x16_ternary_logic(a, b, c, d) \
169 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
171 #define u8x64_insert_u8x16(a, b, n) \
172 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
174 #define u8x64_extract_u8x16(a, n) \
175 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
177 #define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
178 #define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
180 static_always_inline u8x64
181 u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
183 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
187 static_always_inline u8x64
188 u8x64_reflect_u8x16 (u8x64 x)
190 static const u8x64 mask = {
191 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
192 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
193 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
194 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
196 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
199 #define u8x64_align_right(a, b, imm) \
200 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
202 #define u64x8_align_right(a, b, imm) \
203 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
205 static_always_inline u32
206 u32x16_sum_elts (u32x16 sum16)
209 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
210 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
211 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
212 return sum8[0] + sum8[4];
215 #define _(t, m, p, i, e) \
216 static_always_inline t t##_mask_load (t a, void *p, m mask) \
218 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
220 static_always_inline t t##_mask_load_zero (void *p, m mask) \
222 return (t) p##_maskz_loadu_##e (mask, p); \
224 static_always_inline void t##_mask_store (t a, void *p, m mask) \
226 p##_mask_storeu_##e (p, mask, (i) a); \
229 _ (u8x64, u64, _mm512, __m512i, epi8)
230 _ (u8x32, u32, _mm256, __m256i, epi8)
231 _ (u8x16, u16, _mm, __m128i, epi8)
232 _ (u16x32, u32, _mm512, __m512i, epi16)
233 _ (u16x16, u16, _mm256, __m256i, epi16)
234 _ (u16x8, u8, _mm, __m128i, epi16)
235 _ (u32x16, u16, _mm512, __m512i, epi32)
236 _ (u32x8, u8, _mm256, __m256i, epi32)
237 _ (u32x4, u8, _mm, __m128i, epi32)
238 _ (u64x8, u8, _mm512, __m512i, epi64)
239 _ (u64x4, u8, _mm256, __m256i, epi64)
240 _ (u64x2, u8, _mm, __m128i, epi64)
243 #define _(t, m, p, i, e) \
244 static_always_inline t t##_mask_and (t a, t b, m mask) \
246 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
248 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
250 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
252 static_always_inline t t##_mask_xor (t a, t b, m mask) \
254 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
256 static_always_inline t t##_mask_or (t a, t b, m mask) \
258 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
260 _ (u32x16, u16, _mm512, __m512i, epi32)
261 _ (u32x8, u8, _mm256, __m256i, epi32)
262 _ (u32x4, u8, _mm, __m128i, epi32)
263 _ (u64x8, u8, _mm512, __m512i, epi64)
264 _ (u64x4, u8, _mm256, __m256i, epi64)
265 _ (u64x2, u8, _mm, __m128i, epi64)
268 #ifdef CLIB_HAVE_VEC512
269 #define CLIB_HAVE_VEC512_MASK_LOAD_STORE
270 #define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
272 #ifdef CLIB_HAVE_VEC256
273 #define CLIB_HAVE_VEC256_MASK_LOAD_STORE
274 #define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
276 #ifdef CLIB_HAVE_VEC128
277 #define CLIB_HAVE_VEC128_MASK_LOAD_STORE
278 #define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
281 static_always_inline u8x64
282 u8x64_splat_u8x16 (u8x16 a)
284 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
287 static_always_inline u32x16
288 u32x16_splat_u32x4 (u32x4 a)
290 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
293 static_always_inline u32x16
294 u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
296 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
299 static_always_inline u8x64
300 u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
302 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
305 #define _(t, m, e, p, it) \
306 static_always_inline m t##_is_equal_mask (t a, t b) \
308 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
310 _ (u8x16, u16, epu8, _mm, __m128i)
311 _ (u16x8, u8, epu16, _mm, __m128i)
312 _ (u32x4, u8, epu32, _mm, __m128i)
313 _ (u64x2, u8, epu64, _mm, __m128i)
315 _ (u8x32, u32, epu8, _mm256, __m256i)
316 _ (u16x16, u16, epu16, _mm256, __m256i)
317 _ (u32x8, u8, epu32, _mm256, __m256i)
318 _ (u64x4, u8, epu64, _mm256, __m256i)
320 _ (u8x64, u64, epu8, _mm512, __m512i)
321 _ (u16x32, u32, epu16, _mm512, __m512i)
322 _ (u32x16, u16, epu32, _mm512, __m512i)
323 _ (u64x8, u8, epu64, _mm512, __m512i)
326 #define _(t, m, e, p, it) \
327 static_always_inline m t##_is_not_equal_mask (t a, t b) \
329 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
331 _ (u8x16, u16, epu8, _mm, __m128i)
332 _ (u16x8, u8, epu16, _mm, __m128i)
333 _ (u32x4, u8, epu32, _mm, __m128i)
334 _ (u64x2, u8, epu64, _mm, __m128i)
336 _ (u8x32, u32, epu8, _mm256, __m256i)
337 _ (u16x16, u16, epu16, _mm256, __m256i)
338 _ (u32x8, u8, epu32, _mm256, __m256i)
339 _ (u64x4, u8, epu64, _mm256, __m256i)
341 _ (u8x64, u64, epu8, _mm512, __m512i)
342 _ (u16x32, u32, epu16, _mm512, __m512i)
343 _ (u32x16, u16, epu32, _mm512, __m512i)
344 _ (u64x8, u8, epu64, _mm512, __m512i)
347 #define _(f, t, fn, it) \
348 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
349 _ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
350 _ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
351 _ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
352 _ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
355 #define _(vt, mt, p, it, epi) \
356 static_always_inline vt vt##_compress (vt a, mt mask) \
358 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
360 static_always_inline vt vt##_expand (vt a, mt mask) \
362 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
364 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
366 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
369 _ (u64x8, u8, _mm512, __m512i, epi64)
370 _ (u32x16, u16, _mm512, __m512i, epi32)
371 _ (u64x4, u8, _mm256, __m256i, epi64)
372 _ (u32x8, u8, _mm256, __m256i, epi32)
373 _ (u64x2, u8, _mm, __m128i, epi64)
374 _ (u32x4, u8, _mm, __m128i, epi32)
375 #ifdef __AVX512VBMI2__
376 _ (u16x32, u32, _mm512, __m512i, epi16)
377 _ (u8x64, u64, _mm512, __m512i, epi8)
378 _ (u16x16, u16, _mm256, __m256i, epi16)
379 _ (u8x32, u32, _mm256, __m256i, epi8)
380 _ (u16x8, u8, _mm, __m128i, epi16)
381 _ (u8x16, u16, _mm, __m128i, epi8)
385 #ifdef CLIB_HAVE_VEC256
386 #define CLIB_HAVE_VEC256_COMPRESS
387 #ifdef __AVX512VBMI2__
388 #define CLIB_HAVE_VEC256_COMPRESS_U8_U16
392 #ifdef CLIB_HAVE_VEC512
393 #define CLIB_HAVE_VEC512_COMPRESS
394 #ifdef __AVX512VBMI2__
395 #define CLIB_HAVE_VEC512_COMPRESS_U8_U16
400 #ifndef __AVX512VBMI2__
401 static_always_inline u16x16
402 u16x16_compress (u16x16 v, u16 mask)
404 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
407 static_always_inline u16x8
408 u16x8_compress (u16x8 v, u8 mask)
410 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
414 static_always_inline void
415 u32x16_transpose (u32x16 m[16])
417 __m512i r[16], a, b, c, d, x, y;
420 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
421 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
422 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
423 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
426 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
427 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
428 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
429 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
430 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
431 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
432 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
433 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
435 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
436 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
437 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
438 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
439 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
440 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
441 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
442 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
444 a = _mm512_unpacklo_epi64 (r[0], r[1]);
445 b = _mm512_unpacklo_epi64 (r[2], r[3]);
446 c = _mm512_unpacklo_epi64 (r[4], r[5]);
447 d = _mm512_unpacklo_epi64 (r[6], r[7]);
448 x = _mm512_permutex2var_epi64 (a, pm1, b);
449 y = _mm512_permutex2var_epi64 (c, pm1, d);
450 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
451 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
452 x = _mm512_permutex2var_epi64 (a, pm2, b);
453 y = _mm512_permutex2var_epi64 (c, pm2, d);
454 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
455 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
457 a = _mm512_unpacklo_epi64 (r[8], r[9]);
458 b = _mm512_unpacklo_epi64 (r[10], r[11]);
459 c = _mm512_unpacklo_epi64 (r[12], r[13]);
460 d = _mm512_unpacklo_epi64 (r[14], r[15]);
461 x = _mm512_permutex2var_epi64 (a, pm1, b);
462 y = _mm512_permutex2var_epi64 (c, pm1, d);
463 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
464 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
465 x = _mm512_permutex2var_epi64 (a, pm2, b);
466 y = _mm512_permutex2var_epi64 (c, pm2, d);
467 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
468 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
470 a = _mm512_unpackhi_epi64 (r[0], r[1]);
471 b = _mm512_unpackhi_epi64 (r[2], r[3]);
472 c = _mm512_unpackhi_epi64 (r[4], r[5]);
473 d = _mm512_unpackhi_epi64 (r[6], r[7]);
474 x = _mm512_permutex2var_epi64 (a, pm1, b);
475 y = _mm512_permutex2var_epi64 (c, pm1, d);
476 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
477 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
478 x = _mm512_permutex2var_epi64 (a, pm2, b);
479 y = _mm512_permutex2var_epi64 (c, pm2, d);
480 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
481 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
483 a = _mm512_unpackhi_epi64 (r[8], r[9]);
484 b = _mm512_unpackhi_epi64 (r[10], r[11]);
485 c = _mm512_unpackhi_epi64 (r[12], r[13]);
486 d = _mm512_unpackhi_epi64 (r[14], r[15]);
487 x = _mm512_permutex2var_epi64 (a, pm1, b);
488 y = _mm512_permutex2var_epi64 (c, pm1, d);
489 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
490 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
491 x = _mm512_permutex2var_epi64 (a, pm2, b);
492 y = _mm512_permutex2var_epi64 (c, pm2, d);
493 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
494 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
499 static_always_inline void
500 u64x8_transpose (u64x8 m[8])
505 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
506 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
507 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
508 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
511 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
512 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
513 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
514 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
515 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
516 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
517 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
518 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
520 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
521 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
522 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
523 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
524 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
525 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
526 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
527 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
529 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
530 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
531 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
532 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
533 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
534 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
535 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
536 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
539 #endif /* included_vector_avx512_h */
541 * fd.io coding-style-patch-verification: ON
544 * eval: (c-set-style "gnu")