2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2005 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef included_vector_sse2_h
39 #define included_vector_sse2_h
41 #include <vppinfra/error_bootstrap.h> /* for ASSERT */
42 #include <x86intrin.h>
45 #define foreach_sse42_vec128i \
46 _(i,8,16,epi8) _(i,16,8,epi16) _(i,32,4,epi32) _(i,64,2,epi64x)
47 #define foreach_sse42_vec128u \
48 _(u,8,16,epi8) _(u,16,8,epi16) _(u,32,4,epi32) _(u,64,2,epi64x)
49 #define foreach_sse42_vec128f \
50 _(f,32,4,ps) _(f,64,2,pd)
52 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
54 #define _(t, s, c, i) \
55 static_always_inline t##s##x##c \
56 t##s##x##c##_splat (t##s x) \
57 { return (t##s##x##c) _mm_set1_##i (x); } \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_load_unaligned (void *p) \
61 { return (t##s##x##c) _mm_loadu_si128 (p); } \
63 static_always_inline void \
64 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
65 { _mm_storeu_si128 ((__m128i *) p, (__m128i) v); } \
67 static_always_inline int \
68 t##s##x##c##_is_all_zero (t##s##x##c x) \
69 { return _mm_testz_si128 ((__m128i) x, (__m128i) x); } \
71 static_always_inline int \
72 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
73 { return t##s##x##c##_is_all_zero (a ^ b); } \
75 static_always_inline int \
76 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
77 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \
79 foreach_sse42_vec128i foreach_sse42_vec128u
83 #define _(t, s, c, i) \
84 static_always_inline t##s##x##c \
85 t##s##x##c##_min (t##s##x##c a, t##s##x##c b) \
86 { return (t##s##x##c) _mm_min_##i ((__m128i) a, (__m128i) b); } \
88 static_always_inline t##s##x##c \
89 t##s##x##c##_max (t##s##x##c a, t##s##x##c b) \
90 { return (t##s##x##c) _mm_max_##i ((__m128i) a, (__m128i) b); } \
92 _(i,8,16,epi8) _(i,16,8,epi16) _(i,32,4,epi32) _(i,64,2,epi64)
93 _(u,8,16,epu8) _(u,16,8,epu16) _(u,32,4,epu32) _(u,64,2,epu64)
97 #define CLIB_VEC128_SPLAT_DEFINED
98 #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
100 /* 128 bit interleaves. */
102 u8x16_interleave_hi (u8x16 a, u8x16 b)
104 return (u8x16) _mm_unpackhi_epi8 ((__m128i) a, (__m128i) b);
108 u8x16_interleave_lo (u8x16 a, u8x16 b)
110 return (u8x16) _mm_unpacklo_epi8 ((__m128i) a, (__m128i) b);
114 u16x8_interleave_hi (u16x8 a, u16x8 b)
116 return (u16x8) _mm_unpackhi_epi16 ((__m128i) a, (__m128i) b);
120 u16x8_interleave_lo (u16x8 a, u16x8 b)
122 return (u16x8) _mm_unpacklo_epi16 ((__m128i) a, (__m128i) b);
126 u32x4_interleave_hi (u32x4 a, u32x4 b)
128 return (u32x4) _mm_unpackhi_epi32 ((__m128i) a, (__m128i) b);
132 u32x4_interleave_lo (u32x4 a, u32x4 b)
134 return (u32x4) _mm_unpacklo_epi32 ((__m128i) a, (__m128i) b);
138 u64x2_interleave_hi (u64x2 a, u64x2 b)
140 return (u64x2) _mm_unpackhi_epi64 ((__m128i) a, (__m128i) b);
144 u64x2_interleave_lo (u64x2 a, u64x2 b)
146 return (u64x2) _mm_unpacklo_epi64 ((__m128i) a, (__m128i) b);
150 #define _(f, t, fn) \
151 always_inline t t##_pack (f lo, f hi) \
153 return (t) fn ((__m128i) lo, (__m128i) hi); \
156 _ (i16x8, i8x16, _mm_packs_epi16)
157 _ (i16x8, u8x16, _mm_packus_epi16)
158 _ (i32x4, i16x8, _mm_packs_epi32)
159 _ (i32x4, u16x8, _mm_packus_epi32)
163 #define _signed_binop(n,m,f,g) \
165 always_inline u##n##x##m \
166 u##n##x##m##_##f (u##n##x##m x, u##n##x##m y) \
167 { return (u##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); } \
170 always_inline i##n##x##m \
171 i##n##x##m##_##f (i##n##x##m x, i##n##x##m y) \
172 { return (i##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); }
173 /* Addition/subtraction with saturation. */
174 _signed_binop (8, 16, add_saturate, adds_epu)
175 _signed_binop (16, 8, add_saturate, adds_epu)
176 _signed_binop (8, 16, sub_saturate, subs_epu)
177 _signed_binop (16, 8, sub_saturate, subs_epu)
178 /* Multiplication. */
179 always_inline i16x8 i16x8_mul_lo (i16x8 x, i16x8 y)
181 return (i16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
185 u16x8_mul_lo (u16x8 x, u16x8 y)
187 return (u16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
191 i16x8_mul_hi (i16x8 x, i16x8 y)
193 return (i16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
197 u16x8_mul_hi (u16x8 x, u16x8 y)
199 return (u16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
202 /* 128 bit shifts. */
204 #define _(p,a,b,c,f) \
205 always_inline p##a##x##b p##a##x##b##_ishift_##c (p##a##x##b x, int i) \
206 { return (p##a##x##b) _mm_##f##i_epi##a ((__m128i) x, i); } \
208 always_inline p##a##x##b p##a##x##b##_shift_##c (p##a##x##b x, p##a##x##b y) \
209 { return (p##a##x##b) _mm_##f##_epi##a ((__m128i) x, (__m128i) y); }
211 _(u, 16, 8, left, sll)
212 _(u, 32, 4, left, sll)
213 _(u, 64, 2, left, sll)
214 _(u, 16, 8, right, srl)
215 _(u, 32, 4, right, srl)
216 _(u, 64, 2, right, srl)
217 _(i, 16, 8, left, sll)
218 _(i, 32, 4, left, sll)
219 _(i, 64, 2, left, sll) _(i, 16, 8, right, sra) _(i, 32, 4, right, sra)
222 #define u8x16_word_shift_left(a,n) (u8x16) _mm_slli_si128((__m128i) a, n)
223 #define u8x16_word_shift_right(a,n) (u8x16) _mm_srli_si128((__m128i) a, n)
225 #define i8x16_word_shift_left(a,n) \
226 ((i8x16) u8x16_word_shift_left((u8x16) (a), (n)))
227 #define i8x16_word_shift_right(a,n) \
228 ((i8x16) u8x16_word_shift_right((u8x16) (a), (n)))
230 #define u16x8_word_shift_left(a,n) \
231 ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16)))
232 #define i16x8_word_shift_left(a,n) \
233 ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16)))
234 #define u16x8_word_shift_right(a,n) \
235 ((u16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16)))
236 #define i16x8_word_shift_right(a,n) \
237 ((i16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16)))
239 #define u32x4_word_shift_left(a,n) \
240 ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32)))
241 #define i32x4_word_shift_left(a,n) \
242 ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32)))
243 #define u32x4_word_shift_right(a,n) \
244 ((u32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32)))
245 #define i32x4_word_shift_right(a,n) \
246 ((i32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32)))
248 #define u64x2_word_shift_left(a,n) \
249 ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64)))
250 #define i64x2_word_shift_left(a,n) \
251 ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64)))
252 #define u64x2_word_shift_right(a,n) \
253 ((u64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64)))
254 #define i64x2_word_shift_right(a,n) \
255 ((i64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64)))
257 /* SSE2 has no rotate instructions: use shifts to simulate them. */
258 #define _(t,n,lr1,lr2) \
259 always_inline t##x##n \
260 t##x##n##_irotate_##lr1 (t##x##n w, int i) \
262 ASSERT (i >= 0 && i <= BITS (t)); \
263 return (t##x##n##_ishift_##lr1 (w, i) \
264 | t##x##n##_ishift_##lr2 (w, BITS (t) - i)); \
267 always_inline t##x##n \
268 t##x##n##_rotate_##lr1 (t##x##n w, t##x##n i) \
270 t##x##n j = t##x##n##_splat (BITS (t)); \
271 return (t##x##n##_shift_##lr1 (w, i) \
272 | t##x##n##_shift_##lr2 (w, j - i)); \
275 _(u16, 8, left, right);
276 _(u16, 8, right, left);
277 _(u32, 4, left, right);
278 _(u32, 4, right, left);
279 _(u64, 2, left, right);
280 _(u64, 2, right, left);
285 u8x16_max_scalar (u8x16 x)
287 x = u8x16_max (x, u8x16_word_shift_right (x, 8));
288 x = u8x16_max (x, u8x16_word_shift_right (x, 4));
289 x = u8x16_max (x, u8x16_word_shift_right (x, 2));
290 x = u8x16_max (x, u8x16_word_shift_right (x, 1));
291 return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
295 u8x16_min_scalar (u8x16 x)
297 x = u8x16_min (x, u8x16_word_shift_right (x, 8));
298 x = u8x16_min (x, u8x16_word_shift_right (x, 4));
299 x = u8x16_min (x, u8x16_word_shift_right (x, 2));
300 x = u8x16_min (x, u8x16_word_shift_right (x, 1));
301 return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
305 i16x8_max_scalar (i16x8 x)
307 x = i16x8_max (x, i16x8_word_shift_right (x, 4));
308 x = i16x8_max (x, i16x8_word_shift_right (x, 2));
309 x = i16x8_max (x, i16x8_word_shift_right (x, 1));
310 return _mm_extract_epi16 ((__m128i) x, 0);
314 i16x8_min_scalar (i16x8 x)
316 x = i16x8_min (x, i16x8_word_shift_right (x, 4));
317 x = i16x8_min (x, i16x8_word_shift_right (x, 2));
318 x = i16x8_min (x, i16x8_word_shift_right (x, 1));
319 return _mm_extract_epi16 ((__m128i) x, 0);
322 #define u8x16_align_right(a, b, imm) \
323 (u8x16) _mm_alignr_epi8 ((__m128i) a, (__m128i) b, imm)
325 static_always_inline u32
326 u32x4_min_scalar (u32x4 v)
328 v = u32x4_min (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
329 v = u32x4_min (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
333 static_always_inline u32
334 u32x4_max_scalar (u32x4 v)
336 v = u32x4_max (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
337 v = u32x4_max (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
341 static_always_inline u32
342 i32x4_min_scalar (i32x4 v)
344 v = i32x4_min (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
345 v = i32x4_min (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
349 static_always_inline u32
350 i32x4_max_scalar (i32x4 v)
352 v = i32x4_max (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
353 v = i32x4_max (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
357 static_always_inline u16
358 u8x16_msb_mask (u8x16 v)
360 return _mm_movemask_epi8 ((__m128i) v);
363 static_always_inline u16
364 i8x16_msb_mask (i8x16 v)
366 return _mm_movemask_epi8 ((__m128i) v);
369 #define CLIB_HAVE_VEC128_MSB_MASK
373 static_always_inline u32x4
374 u32x4_byte_swap (u32x4 v)
377 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
379 return (u32x4) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
382 static_always_inline u16x8
383 u16x8_byte_swap (u16x8 v)
386 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
388 return (u16x8) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
391 static_always_inline u8x16
392 u8x16_reflect (u8x16 v)
395 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
397 return (u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) mask);
400 static_always_inline u32x4
401 u32x4_hadd (u32x4 v1, u32x4 v2)
403 return (u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2);
406 static_always_inline u32 __clib_unused
407 u32x4_sum_elts (u32x4 sum4)
409 sum4 += (u32x4) u8x16_align_right (sum4, sum4, 8);
410 sum4 += (u32x4) u8x16_align_right (sum4, sum4, 4);
417 static_always_inline t \
419 { return (t) _mm_cvt##i ((__m128i) x); }
421 _(u8x16, u16x8, epu8_epi16)
422 _(u8x16, u32x4, epu8_epi32)
423 _(u8x16, u64x2, epu8_epi64)
424 _(u16x8, u32x4, epu16_epi32)
425 _(u16x8, u64x2, epu16_epi64)
426 _(u32x4, u64x2, epu32_epi64)
428 _(i8x16, i16x8, epi8_epi16)
429 _(i8x16, i32x4, epi8_epi32)
430 _(i8x16, i64x2, epi8_epi64)
431 _(i16x8, i32x4, epi16_epi32)
432 _(i16x8, i64x2, epi16_epi64)
433 _(i32x4, i64x2, epi32_epi64)
437 static_always_inline u64x2
438 u64x2_gather (void *p0, void *p1)
440 u64x2 r = { *(u64 *) p0, *(u64 *) p1 };
444 static_always_inline u32x4
445 u32x4_gather (void *p0, void *p1, void *p2, void *p3)
447 u32x4 r = { *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3 };
452 static_always_inline void
453 u64x2_scatter (u64x2 r, void *p0, void *p1)
459 static_always_inline void
460 u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
468 static_always_inline void
469 u64x2_scatter_one (u64x2 r, int index, void *p)
471 *(u64 *) p = r[index];
474 static_always_inline void
475 u32x4_scatter_one (u32x4 r, int index, void *p)
477 *(u32 *) p = r[index];
480 static_always_inline u8x16
481 u8x16_blend (u8x16 v1, u8x16 v2, u8x16 mask)
483 return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask);
486 static_always_inline u8x16
487 u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
490 return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
496 static_always_inline u8x16
497 u8x16_load_partial (u8 *data, uword n)
500 #if defined(CLIB_HAVE_VEC128_MASK_LOAD_STORE)
501 return u8x16_mask_load_zero (data, pow2_mask (n));
506 r[1] = *(u64u *) (data + n - 8);
508 r[0] = *(u64u *) data;
514 r[1] = *(u32u *) (data + n - 4);
516 r[0] = *(u32u *) data;
522 r[1] = *(u16u *) (data + n - 2);
524 r[0] = *(u16u *) data;
532 static_always_inline void
533 u8x16_store_partial (u8x16 r, u8 *data, uword n)
535 #if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
536 u8x16_mask_store (r, data, pow2_mask (n));
540 *(u64u *) (data + n - 8) = ((u64x2) r)[1] << ((16 - n) * 8);
541 *(u64u *) data = ((u64x2) r)[0];
545 *(u32u *) (data + n - 4) = ((u32x4) r)[1] << ((8 - n) * 8);
546 *(u32u *) data = ((u32x4) r)[0];
550 *(u16u *) (data + n - 2) = ((u16x8) r)[1] << ((4 - n) * 8);
551 *(u16u *) data = ((u16x8) r)[0];
558 #endif /* included_vector_sse2_h */
561 * fd.io coding-style-patch-verification: ON
564 * eval: (c-set-style "gnu")