2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx512_h
17 #define included_vector_avx512_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25 #define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27 #define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
31 is_all_equal, is_zero_mask */
32 #define _(t, s, c, i) \
33 static_always_inline t##s##x##c \
34 t##s##x##c##_splat (t##s x) \
35 { return (t##s##x##c) _mm512_set1_##i (x); } \
37 static_always_inline t##s##x##c \
38 t##s##x##c##_load_aligned (void *p) \
39 { return (t##s##x##c) _mm512_load_si512 (p); } \
41 static_always_inline void \
42 t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43 { _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
45 static_always_inline t##s##x##c \
46 t##s##x##c##_load_unaligned (void *p) \
47 { return (t##s##x##c) _mm512_loadu_si512 (p); } \
49 static_always_inline void \
50 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51 { _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
53 static_always_inline int \
54 t##s##x##c##_is_all_zero (t##s##x##c v) \
55 { return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
57 static_always_inline int \
58 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
59 { return t##s##x##c##_is_all_zero (a ^ b); } \
61 static_always_inline int \
62 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
65 static_always_inline u##c \
66 t##s##x##c##_is_zero_mask (t##s##x##c v) \
67 { return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
69 static_always_inline t##s##x##c \
70 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71 { return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
73 static_always_inline t##s##x##c \
74 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75 { return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
78 foreach_avx512_vec512i foreach_avx512_vec512u
82 static_always_inline u32
83 u16x32_msb_mask (u16x32 v)
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
88 static_always_inline u32x16
89 u32x16_byte_swap (u32x16 v)
92 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
93 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
94 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
95 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
97 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
100 static_always_inline u16x32
101 u16x32_byte_swap (u16x32 v)
104 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
105 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
106 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
107 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
109 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
112 static_always_inline u32x8
113 u32x16_extract_lo (u32x16 v)
115 return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
118 static_always_inline u32x8
119 u32x16_extract_hi (u32x16 v)
121 return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
124 static_always_inline u8x32
125 u8x64_extract_lo (u8x64 v)
127 return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
130 static_always_inline u8x32
131 u8x64_extract_hi (u8x64 v)
133 return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
136 static_always_inline u32
137 u32x16_min_scalar (u32x16 v)
139 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
140 u32x16_extract_hi (v)));
143 static_always_inline u32x16
144 u32x16_insert_lo (u32x16 r, u32x8 v)
146 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
149 static_always_inline u32x16
150 u32x16_insert_hi (u32x16 r, u32x8 v)
152 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
155 static_always_inline u64x8
156 u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
158 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
163 #define u32x16_ternary_logic(a, b, c, d) \
164 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
166 #define u8x64_insert_u8x16(a, b, n) \
167 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
169 #define u8x64_extract_u8x16(a, n) \
170 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
172 #define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
173 #define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
175 static_always_inline u8x64
176 u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
178 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
182 static_always_inline u8x64
183 u8x64_reflect_u8x16 (u8x64 x)
185 static const u8x64 mask = {
186 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
187 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
188 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
189 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
191 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
194 static_always_inline u8x64
195 u8x64_shuffle (u8x64 v, u8x64 m)
197 return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
200 #define u8x64_align_right(a, b, imm) \
201 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
203 static_always_inline u32
204 u32x16_sum_elts (u32x16 sum16)
207 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
208 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
209 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
210 return sum8[0] + sum8[4];
213 static_always_inline u8x64
214 u8x64_mask_load (u8x64 a, void *p, u64 mask)
216 return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
219 static_always_inline void
220 u8x64_mask_store (u8x64 a, void *p, u64 mask)
222 _mm512_mask_storeu_epi8 (p, mask, (__m512i) a);
225 static_always_inline u8x64
226 u8x64_splat_u8x16 (u8x16 a)
228 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
231 static_always_inline u32x16
232 u32x16_splat_u32x4 (u32x4 a)
234 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
237 static_always_inline u32x16
238 u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
240 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
243 static_always_inline u8x64
244 u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
246 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
249 static_always_inline u8
250 u64x8_mask_is_equal (u64x8 a, u64x8 b)
252 return _mm512_cmpeq_epu64_mask ((__m512i) a, (__m512i) b);
255 static_always_inline void
256 u32x16_transpose (u32x16 m[16])
258 __m512i r[16], a, b, c, d, x, y;
261 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
262 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
263 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
264 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
267 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
268 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
269 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
270 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
271 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
272 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
273 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
274 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
276 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
277 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
278 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
279 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
280 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
281 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
282 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
283 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
285 a = _mm512_unpacklo_epi64 (r[0], r[1]);
286 b = _mm512_unpacklo_epi64 (r[2], r[3]);
287 c = _mm512_unpacklo_epi64 (r[4], r[5]);
288 d = _mm512_unpacklo_epi64 (r[6], r[7]);
289 x = _mm512_permutex2var_epi64 (a, pm1, b);
290 y = _mm512_permutex2var_epi64 (c, pm1, d);
291 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
292 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
293 x = _mm512_permutex2var_epi64 (a, pm2, b);
294 y = _mm512_permutex2var_epi64 (c, pm2, d);
295 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
296 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
298 a = _mm512_unpacklo_epi64 (r[8], r[9]);
299 b = _mm512_unpacklo_epi64 (r[10], r[11]);
300 c = _mm512_unpacklo_epi64 (r[12], r[13]);
301 d = _mm512_unpacklo_epi64 (r[14], r[15]);
302 x = _mm512_permutex2var_epi64 (a, pm1, b);
303 y = _mm512_permutex2var_epi64 (c, pm1, d);
304 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
305 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
306 x = _mm512_permutex2var_epi64 (a, pm2, b);
307 y = _mm512_permutex2var_epi64 (c, pm2, d);
308 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
309 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
311 a = _mm512_unpackhi_epi64 (r[0], r[1]);
312 b = _mm512_unpackhi_epi64 (r[2], r[3]);
313 c = _mm512_unpackhi_epi64 (r[4], r[5]);
314 d = _mm512_unpackhi_epi64 (r[6], r[7]);
315 x = _mm512_permutex2var_epi64 (a, pm1, b);
316 y = _mm512_permutex2var_epi64 (c, pm1, d);
317 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
318 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
319 x = _mm512_permutex2var_epi64 (a, pm2, b);
320 y = _mm512_permutex2var_epi64 (c, pm2, d);
321 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
322 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
324 a = _mm512_unpackhi_epi64 (r[8], r[9]);
325 b = _mm512_unpackhi_epi64 (r[10], r[11]);
326 c = _mm512_unpackhi_epi64 (r[12], r[13]);
327 d = _mm512_unpackhi_epi64 (r[14], r[15]);
328 x = _mm512_permutex2var_epi64 (a, pm1, b);
329 y = _mm512_permutex2var_epi64 (c, pm1, d);
330 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
331 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
332 x = _mm512_permutex2var_epi64 (a, pm2, b);
333 y = _mm512_permutex2var_epi64 (c, pm2, d);
334 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
335 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
340 static_always_inline void
341 u64x8_transpose (u64x8 m[8])
346 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
347 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
348 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
349 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
352 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
353 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
354 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
355 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
356 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
357 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
358 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
359 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
361 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
362 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
363 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
364 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
365 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
366 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
367 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
368 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
370 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
371 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
372 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
373 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
374 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
375 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
376 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
377 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
380 #endif /* included_vector_avx512_h */
382 * fd.io coding-style-patch-verification: ON
385 * eval: (c-set-style "gnu")