2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx512_h
17 #define included_vector_avx512_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25 #define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27 #define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
31 is_all_equal, is_zero_mask */
32 #define _(t, s, c, i) \
33 static_always_inline t##s##x##c \
34 t##s##x##c##_splat (t##s x) \
35 { return (t##s##x##c) _mm512_set1_##i (x); } \
37 static_always_inline t##s##x##c \
38 t##s##x##c##_load_aligned (void *p) \
39 { return (t##s##x##c) _mm512_load_si512 (p); } \
41 static_always_inline void \
42 t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43 { _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
45 static_always_inline t##s##x##c \
46 t##s##x##c##_load_unaligned (void *p) \
47 { return (t##s##x##c) _mm512_loadu_si512 (p); } \
49 static_always_inline void \
50 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51 { _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
53 static_always_inline int \
54 t##s##x##c##_is_all_zero (t##s##x##c v) \
55 { return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
57 static_always_inline int \
58 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
59 { return t##s##x##c##_is_all_zero (a ^ b); } \
61 static_always_inline int \
62 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
65 static_always_inline u##c \
66 t##s##x##c##_is_zero_mask (t##s##x##c v) \
67 { return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
69 static_always_inline t##s##x##c \
70 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71 { return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
73 static_always_inline t##s##x##c \
74 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75 { return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
78 foreach_avx512_vec512i foreach_avx512_vec512u
82 static_always_inline u32
83 u16x32_msb_mask (u16x32 v)
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
90 always_inline t t##_pack (f lo, f hi) \
92 return (t) fn ((__m512i) lo, (__m512i) hi); \
95 _ (i16x32, i8x64, _mm512_packs_epi16)
96 _ (i16x32, u8x64, _mm512_packus_epi16)
97 _ (i32x16, i16x32, _mm512_packs_epi32)
98 _ (i32x16, u16x32, _mm512_packus_epi32)
101 static_always_inline u32x16
102 u32x16_byte_swap (u32x16 v)
105 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
106 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
107 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
108 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
110 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
113 static_always_inline u16x32
114 u16x32_byte_swap (u16x32 v)
117 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
118 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
119 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
120 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
122 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
126 static_always_inline t f##_extract_lo (f v) \
128 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
130 static_always_inline t f##_extract_hi (f v) \
132 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
141 static_always_inline u32
142 u32x16_min_scalar (u32x16 v)
144 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
145 u32x16_extract_hi (v)));
148 static_always_inline u32x16
149 u32x16_insert_lo (u32x16 r, u32x8 v)
151 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
154 static_always_inline u32x16
155 u32x16_insert_hi (u32x16 r, u32x8 v)
157 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
160 static_always_inline u64x8
161 u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
163 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
168 #define u32x16_ternary_logic(a, b, c, d) \
169 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
171 #define u8x64_insert_u8x16(a, b, n) \
172 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
174 #define u8x64_extract_u8x16(a, n) \
175 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
177 #define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
178 #define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
180 static_always_inline u8x64
181 u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
183 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
187 static_always_inline u8x64
188 u8x64_reflect_u8x16 (u8x64 x)
190 static const u8x64 mask = {
191 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
192 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
193 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
194 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
196 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
199 static_always_inline u8x64
200 u8x64_shuffle (u8x64 v, u8x64 m)
202 return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
205 #define u8x64_align_right(a, b, imm) \
206 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
208 #define u64x8_align_right(a, b, imm) \
209 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
211 static_always_inline u32
212 u32x16_sum_elts (u32x16 sum16)
215 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
216 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
217 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
218 return sum8[0] + sum8[4];
221 #define _(t, m, p, i, e) \
222 static_always_inline t t##_mask_load (t a, void *p, m mask) \
224 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
226 static_always_inline t t##_mask_load_zero (void *p, m mask) \
228 return (t) p##_maskz_loadu_##e (mask, p); \
230 static_always_inline void t##_mask_store (t a, void *p, m mask) \
232 p##_mask_storeu_##e (p, mask, (i) a); \
235 _ (u8x64, u64, _mm512, __m512i, epi8)
236 _ (u8x32, u32, _mm256, __m256i, epi8)
237 _ (u8x16, u16, _mm, __m128i, epi8)
238 _ (u16x32, u32, _mm512, __m512i, epi16)
239 _ (u16x16, u16, _mm256, __m256i, epi16)
240 _ (u16x8, u8, _mm, __m128i, epi16)
241 _ (u32x16, u16, _mm512, __m512i, epi32)
242 _ (u32x8, u8, _mm256, __m256i, epi32)
243 _ (u32x4, u8, _mm, __m128i, epi32)
244 _ (u64x8, u8, _mm512, __m512i, epi64)
245 _ (u64x4, u8, _mm256, __m256i, epi64)
246 _ (u64x2, u8, _mm, __m128i, epi64)
249 #define _(t, m, p, i, e) \
250 static_always_inline t t##_mask_and (t a, t b, m mask) \
252 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
254 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
256 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
258 static_always_inline t t##_mask_xor (t a, t b, m mask) \
260 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
262 static_always_inline t t##_mask_or (t a, t b, m mask) \
264 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
266 _ (u32x16, u16, _mm512, __m512i, epi32)
267 _ (u32x8, u8, _mm256, __m256i, epi32)
268 _ (u32x4, u8, _mm, __m128i, epi32)
269 _ (u64x8, u8, _mm512, __m512i, epi64)
270 _ (u64x4, u8, _mm256, __m256i, epi64)
271 _ (u64x2, u8, _mm, __m128i, epi64)
274 #ifdef CLIB_HAVE_VEC512
275 #define CLIB_HAVE_VEC512_MASK_LOAD_STORE
276 #define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
278 #ifdef CLIB_HAVE_VEC256
279 #define CLIB_HAVE_VEC256_MASK_LOAD_STORE
280 #define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
282 #ifdef CLIB_HAVE_VEC128
283 #define CLIB_HAVE_VEC128_MASK_LOAD_STORE
284 #define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
287 static_always_inline u8x64
288 u8x64_splat_u8x16 (u8x16 a)
290 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
293 static_always_inline u32x16
294 u32x16_splat_u32x4 (u32x4 a)
296 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
299 static_always_inline u32x16
300 u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
302 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
305 static_always_inline u8x64
306 u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
308 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
311 #define _(t, m, e, p, it) \
312 static_always_inline m t##_is_equal_mask (t a, t b) \
314 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
316 _ (u8x16, u16, epu8, _mm, __m128i)
317 _ (u16x8, u8, epu16, _mm, __m128i)
318 _ (u32x4, u8, epu32, _mm, __m128i)
319 _ (u64x2, u8, epu64, _mm, __m128i)
321 _ (u8x32, u32, epu8, _mm256, __m256i)
322 _ (u16x16, u16, epu16, _mm256, __m256i)
323 _ (u32x8, u8, epu32, _mm256, __m256i)
324 _ (u64x4, u8, epu64, _mm256, __m256i)
326 _ (u8x64, u64, epu8, _mm512, __m512i)
327 _ (u16x32, u32, epu16, _mm512, __m512i)
328 _ (u32x16, u16, epu32, _mm512, __m512i)
329 _ (u64x8, u8, epu64, _mm512, __m512i)
332 #define _(t, m, e, p, it) \
333 static_always_inline m t##_is_not_equal_mask (t a, t b) \
335 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
337 _ (u8x16, u16, epu8, _mm, __m128i)
338 _ (u16x8, u8, epu16, _mm, __m128i)
339 _ (u32x4, u8, epu32, _mm, __m128i)
340 _ (u64x2, u8, epu64, _mm, __m128i)
342 _ (u8x32, u32, epu8, _mm256, __m256i)
343 _ (u16x16, u16, epu16, _mm256, __m256i)
344 _ (u32x8, u8, epu32, _mm256, __m256i)
345 _ (u64x4, u8, epu64, _mm256, __m256i)
347 _ (u8x64, u64, epu8, _mm512, __m512i)
348 _ (u16x32, u32, epu16, _mm512, __m512i)
349 _ (u32x16, u16, epu32, _mm512, __m512i)
350 _ (u64x8, u8, epu64, _mm512, __m512i)
353 #define _(f, t, fn, it) \
354 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
355 _ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
356 _ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
357 _ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
358 _ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
361 #define _(vt, mt, p, it, epi) \
362 static_always_inline vt vt##_compress (vt a, mt mask) \
364 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
366 static_always_inline vt vt##_expand (vt a, mt mask) \
368 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
370 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
372 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
375 _ (u64x8, u8, _mm512, __m512i, epi64)
376 _ (u32x16, u16, _mm512, __m512i, epi32)
377 _ (u64x4, u8, _mm256, __m256i, epi64)
378 _ (u32x8, u8, _mm256, __m256i, epi32)
379 _ (u64x2, u8, _mm, __m128i, epi64)
380 _ (u32x4, u8, _mm, __m128i, epi32)
381 #ifdef __AVX512VBMI2__
382 _ (u16x32, u32, _mm512, __m512i, epi16)
383 _ (u8x64, u64, _mm512, __m512i, epi8)
384 _ (u16x16, u16, _mm256, __m256i, epi16)
385 _ (u8x32, u32, _mm256, __m256i, epi8)
386 _ (u16x8, u8, _mm, __m128i, epi16)
387 _ (u8x16, u16, _mm, __m128i, epi8)
391 #ifdef CLIB_HAVE_VEC256
392 #define CLIB_HAVE_VEC256_COMPRESS
393 #ifdef __AVX512VBMI2__
394 #define CLIB_HAVE_VEC256_COMPRESS_U8_U16
398 #ifdef CLIB_HAVE_VEC512
399 #define CLIB_HAVE_VEC512_COMPRESS
400 #ifdef __AVX512VBMI2__
401 #define CLIB_HAVE_VEC512_COMPRESS_U8_U16
406 #ifndef __AVX512VBMI2__
407 static_always_inline u16x16
408 u16x16_compress (u16x16 v, u16 mask)
410 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
413 static_always_inline u16x8
414 u16x8_compress (u16x8 v, u8 mask)
416 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
420 static_always_inline void
421 u32x16_transpose (u32x16 m[16])
423 __m512i r[16], a, b, c, d, x, y;
426 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
427 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
428 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
429 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
432 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
433 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
434 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
435 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
436 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
437 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
438 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
439 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
441 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
442 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
443 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
444 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
445 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
446 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
447 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
448 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
450 a = _mm512_unpacklo_epi64 (r[0], r[1]);
451 b = _mm512_unpacklo_epi64 (r[2], r[3]);
452 c = _mm512_unpacklo_epi64 (r[4], r[5]);
453 d = _mm512_unpacklo_epi64 (r[6], r[7]);
454 x = _mm512_permutex2var_epi64 (a, pm1, b);
455 y = _mm512_permutex2var_epi64 (c, pm1, d);
456 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
457 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
458 x = _mm512_permutex2var_epi64 (a, pm2, b);
459 y = _mm512_permutex2var_epi64 (c, pm2, d);
460 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
461 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
463 a = _mm512_unpacklo_epi64 (r[8], r[9]);
464 b = _mm512_unpacklo_epi64 (r[10], r[11]);
465 c = _mm512_unpacklo_epi64 (r[12], r[13]);
466 d = _mm512_unpacklo_epi64 (r[14], r[15]);
467 x = _mm512_permutex2var_epi64 (a, pm1, b);
468 y = _mm512_permutex2var_epi64 (c, pm1, d);
469 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
470 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
471 x = _mm512_permutex2var_epi64 (a, pm2, b);
472 y = _mm512_permutex2var_epi64 (c, pm2, d);
473 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
474 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
476 a = _mm512_unpackhi_epi64 (r[0], r[1]);
477 b = _mm512_unpackhi_epi64 (r[2], r[3]);
478 c = _mm512_unpackhi_epi64 (r[4], r[5]);
479 d = _mm512_unpackhi_epi64 (r[6], r[7]);
480 x = _mm512_permutex2var_epi64 (a, pm1, b);
481 y = _mm512_permutex2var_epi64 (c, pm1, d);
482 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
483 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
484 x = _mm512_permutex2var_epi64 (a, pm2, b);
485 y = _mm512_permutex2var_epi64 (c, pm2, d);
486 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
487 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
489 a = _mm512_unpackhi_epi64 (r[8], r[9]);
490 b = _mm512_unpackhi_epi64 (r[10], r[11]);
491 c = _mm512_unpackhi_epi64 (r[12], r[13]);
492 d = _mm512_unpackhi_epi64 (r[14], r[15]);
493 x = _mm512_permutex2var_epi64 (a, pm1, b);
494 y = _mm512_permutex2var_epi64 (c, pm1, d);
495 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
496 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
497 x = _mm512_permutex2var_epi64 (a, pm2, b);
498 y = _mm512_permutex2var_epi64 (c, pm2, d);
499 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
500 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
505 static_always_inline void
506 u64x8_transpose (u64x8 m[8])
511 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
512 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
513 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
514 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
517 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
518 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
519 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
520 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
521 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
522 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
523 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
524 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
526 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
527 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
528 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
529 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
530 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
531 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
532 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
533 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
535 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
536 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
537 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
538 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
539 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
540 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
541 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
542 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
545 #endif /* included_vector_avx512_h */
547 * fd.io coding-style-patch-verification: ON
550 * eval: (c-set-style "gnu")