2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2005 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef included_vector_sse2_h
39 #define included_vector_sse2_h
41 #include <vppinfra/error_bootstrap.h> /* for ASSERT */
42 #include <x86intrin.h>
45 #define foreach_sse42_vec128i \
46 _(i,8,16,epi8) _(i,16,8,epi16) _(i,32,4,epi32) _(i,64,2,epi64x)
47 #define foreach_sse42_vec128u \
48 _(u,8,16,epi8) _(u,16,8,epi16) _(u,32,4,epi32) _(u,64,2,epi64x)
49 #define foreach_sse42_vec128f \
50 _(f,32,4,ps) _(f,64,2,pd)
52 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
54 #define _(t, s, c, i) \
55 static_always_inline t##s##x##c \
56 t##s##x##c##_splat (t##s x) \
57 { return (t##s##x##c) _mm_set1_##i (x); } \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_load_unaligned (void *p) \
61 { return (t##s##x##c) _mm_loadu_si128 (p); } \
63 static_always_inline void \
64 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
65 { _mm_storeu_si128 ((__m128i *) p, (__m128i) v); } \
67 static_always_inline int \
68 t##s##x##c##_is_all_zero (t##s##x##c x) \
69 { return _mm_testz_si128 ((__m128i) x, (__m128i) x); } \
71 static_always_inline int \
72 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
73 { return t##s##x##c##_is_all_zero (a ^ b); } \
75 static_always_inline int \
76 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
77 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \
79 foreach_sse42_vec128i foreach_sse42_vec128u
83 #define _(t, s, c, i) \
84 static_always_inline t##s##x##c \
85 t##s##x##c##_min (t##s##x##c a, t##s##x##c b) \
86 { return (t##s##x##c) _mm_min_##i ((__m128i) a, (__m128i) b); } \
88 static_always_inline t##s##x##c \
89 t##s##x##c##_max (t##s##x##c a, t##s##x##c b) \
90 { return (t##s##x##c) _mm_max_##i ((__m128i) a, (__m128i) b); } \
92 _(i,8,16,epi8) _(i,16,8,epi16) _(i,32,4,epi32) _(i,64,2,epi64)
93 _(u,8,16,epu8) _(u,16,8,epu16) _(u,32,4,epu32) _(u,64,2,epu64)
97 #define CLIB_VEC128_SPLAT_DEFINED
98 #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
100 /* 128 bit interleaves. */
102 u8x16_interleave_hi (u8x16 a, u8x16 b)
104 return (u8x16) _mm_unpackhi_epi8 ((__m128i) a, (__m128i) b);
108 u8x16_interleave_lo (u8x16 a, u8x16 b)
110 return (u8x16) _mm_unpacklo_epi8 ((__m128i) a, (__m128i) b);
114 u16x8_interleave_hi (u16x8 a, u16x8 b)
116 return (u16x8) _mm_unpackhi_epi16 ((__m128i) a, (__m128i) b);
120 u16x8_interleave_lo (u16x8 a, u16x8 b)
122 return (u16x8) _mm_unpacklo_epi16 ((__m128i) a, (__m128i) b);
126 u32x4_interleave_hi (u32x4 a, u32x4 b)
128 return (u32x4) _mm_unpackhi_epi32 ((__m128i) a, (__m128i) b);
132 u32x4_interleave_lo (u32x4 a, u32x4 b)
134 return (u32x4) _mm_unpacklo_epi32 ((__m128i) a, (__m128i) b);
138 u64x2_interleave_hi (u64x2 a, u64x2 b)
140 return (u64x2) _mm_unpackhi_epi64 ((__m128i) a, (__m128i) b);
144 u64x2_interleave_lo (u64x2 a, u64x2 b)
146 return (u64x2) _mm_unpacklo_epi64 ((__m128i) a, (__m128i) b);
149 /* 64 bit interleaves. */
151 u8x8_interleave_hi (u8x8 a, u8x8 b)
153 return (u8x8) _m_punpckhbw ((__m64) a, (__m64) b);
157 u8x8_interleave_lo (u8x8 a, u8x8 b)
159 return (u8x8) _m_punpcklbw ((__m64) a, (__m64) b);
163 u16x4_interleave_hi (u16x4 a, u16x4 b)
165 return (u16x4) _m_punpckhwd ((__m64) a, (__m64) b);
169 u16x4_interleave_lo (u16x4 a, u16x4 b)
171 return (u16x4) _m_punpcklwd ((__m64) a, (__m64) b);
175 u32x2_interleave_hi (u32x2 a, u32x2 b)
177 return (u32x2) _m_punpckhdq ((__m64) a, (__m64) b);
181 u32x2_interleave_lo (u32x2 a, u32x2 b)
183 return (u32x2) _m_punpckldq ((__m64) a, (__m64) b);
188 u16x8_pack (u16x8 lo, u16x8 hi)
190 return (u8x16) _mm_packus_epi16 ((__m128i) lo, (__m128i) hi);
194 i16x8_pack (i16x8 lo, i16x8 hi)
196 return (i8x16) _mm_packs_epi16 ((__m128i) lo, (__m128i) hi);
200 u32x4_pack (u32x4 lo, u32x4 hi)
202 return (u16x8) _mm_packs_epi32 ((__m128i) lo, (__m128i) hi);
207 u16x4_pack (u16x4 lo, u16x4 hi)
209 return (u8x8) _m_packuswb ((__m64) lo, (__m64) hi);
213 i16x4_pack (i16x4 lo, i16x4 hi)
215 return (i8x8) _m_packsswb ((__m64) lo, (__m64) hi);
219 u32x2_pack (u32x2 lo, u32x2 hi)
221 return (u16x4) _m_packssdw ((__m64) lo, (__m64) hi);
225 i32x2_pack (i32x2 lo, i32x2 hi)
227 return (i16x4) _m_packssdw ((__m64) lo, (__m64) hi);
232 u64x2_read_lo (u64x2 x, u64 * a)
234 return (u64x2) _mm_loadl_pi ((__m128) x, (__m64 *) a);
238 u64x2_read_hi (u64x2 x, u64 * a)
240 return (u64x2) _mm_loadh_pi ((__m128) x, (__m64 *) a);
244 u64x2_write_lo (u64x2 x, u64 * a)
246 _mm_storel_pi ((__m64 *) a, (__m128) x);
250 u64x2_write_hi (u64x2 x, u64 * a)
252 _mm_storeh_pi ((__m64 *) a, (__m128) x);
256 #define _signed_binop(n,m,f,g) \
258 always_inline u##n##x##m \
259 u##n##x##m##_##f (u##n##x##m x, u##n##x##m y) \
260 { return (u##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); } \
263 always_inline i##n##x##m \
264 i##n##x##m##_##f (i##n##x##m x, i##n##x##m y) \
265 { return (i##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); }
266 /* Addition/subtraction with saturation. */
267 _signed_binop (8, 16, add_saturate, adds_epu)
268 _signed_binop (16, 8, add_saturate, adds_epu)
269 _signed_binop (8, 16, sub_saturate, subs_epu)
270 _signed_binop (16, 8, sub_saturate, subs_epu)
271 /* Multiplication. */
272 always_inline i16x8 i16x8_mul_lo (i16x8 x, i16x8 y)
274 return (i16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
278 u16x8_mul_lo (u16x8 x, u16x8 y)
280 return (u16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
284 i16x8_mul_hi (i16x8 x, i16x8 y)
286 return (i16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
290 u16x8_mul_hi (u16x8 x, u16x8 y)
292 return (u16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
295 /* 128 bit shifts. */
297 #define _(p,a,b,c,f) \
298 always_inline p##a##x##b p##a##x##b##_ishift_##c (p##a##x##b x, int i) \
299 { return (p##a##x##b) _mm_##f##i_epi##a ((__m128i) x, i); } \
301 always_inline p##a##x##b p##a##x##b##_shift_##c (p##a##x##b x, p##a##x##b y) \
302 { return (p##a##x##b) _mm_##f##_epi##a ((__m128i) x, (__m128i) y); }
304 _(u, 16, 8, left, sll)
305 _(u, 32, 4, left, sll)
306 _(u, 64, 2, left, sll)
307 _(u, 16, 8, right, srl)
308 _(u, 32, 4, right, srl)
309 _(u, 64, 2, right, srl)
310 _(i, 16, 8, left, sll)
311 _(i, 32, 4, left, sll)
312 _(i, 64, 2, left, sll) _(i, 16, 8, right, sra) _(i, 32, 4, right, sra)
316 u16x4_shift_left (u16x4 x, u16x4 i)
318 return (u16x4) _m_psllw ((__m64) x, (__m64) i);
322 u32x2_shift_left (u32x2 x, u32x2 i)
324 return (u32x2) _m_pslld ((__m64) x, (__m64) i);
328 u16x4_shift_right (u16x4 x, u16x4 i)
330 return (u16x4) _m_psrlw ((__m64) x, (__m64) i);
334 u32x2_shift_right (u32x2 x, u32x2 i)
336 return (u32x2) _m_psrld ((__m64) x, (__m64) i);
340 i16x4_shift_left (i16x4 x, i16x4 i)
342 return (i16x4) _m_psllw ((__m64) x, (__m64) i);
346 i32x2_shift_left (i32x2 x, i32x2 i)
348 return (i32x2) _m_pslld ((__m64) x, (__m64) i);
352 i16x4_shift_right (i16x4 x, i16x4 i)
354 return (i16x4) _m_psraw ((__m64) x, (__m64) i);
358 i32x2_shift_right (i32x2 x, i32x2 i)
360 return (i32x2) _m_psrad ((__m64) x, (__m64) i);
363 #define u8x16_word_shift_left(a,n) (u8x16) _mm_slli_si128((__m128i) a, n)
364 #define u8x16_word_shift_right(a,n) (u8x16) _mm_srli_si128((__m128i) a, n)
366 #define i8x16_word_shift_left(a,n) \
367 ((i8x16) u8x16_word_shift_left((u8x16) (a), (n)))
368 #define i8x16_word_shift_right(a,n) \
369 ((i8x16) u8x16_word_shift_right((u8x16) (a), (n)))
371 #define u16x8_word_shift_left(a,n) \
372 ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16)))
373 #define i16x8_word_shift_left(a,n) \
374 ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16)))
375 #define u16x8_word_shift_right(a,n) \
376 ((u16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16)))
377 #define i16x8_word_shift_right(a,n) \
378 ((i16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16)))
380 #define u32x4_word_shift_left(a,n) \
381 ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32)))
382 #define i32x4_word_shift_left(a,n) \
383 ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32)))
384 #define u32x4_word_shift_right(a,n) \
385 ((u32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32)))
386 #define i32x4_word_shift_right(a,n) \
387 ((i32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32)))
389 #define u64x2_word_shift_left(a,n) \
390 ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64)))
391 #define i64x2_word_shift_left(a,n) \
392 ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64)))
393 #define u64x2_word_shift_right(a,n) \
394 ((u64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64)))
395 #define i64x2_word_shift_right(a,n) \
396 ((i64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64)))
398 /* SSE2 has no rotate instructions: use shifts to simulate them. */
399 #define _(t,n,lr1,lr2) \
400 always_inline t##x##n \
401 t##x##n##_irotate_##lr1 (t##x##n w, int i) \
403 ASSERT (i >= 0 && i <= BITS (t)); \
404 return (t##x##n##_ishift_##lr1 (w, i) \
405 | t##x##n##_ishift_##lr2 (w, BITS (t) - i)); \
408 always_inline t##x##n \
409 t##x##n##_rotate_##lr1 (t##x##n w, t##x##n i) \
411 t##x##n j = t##x##n##_splat (BITS (t)); \
412 return (t##x##n##_shift_##lr1 (w, i) \
413 | t##x##n##_shift_##lr2 (w, j - i)); \
416 _(u16, 8, left, right);
417 _(u16, 8, right, left);
418 _(u32, 4, left, right);
419 _(u32, 4, right, left);
420 _(u64, 2, left, right);
421 _(u64, 2, right, left);
426 #define _(t,n,lr1,lr2) \
427 always_inline t##x##n \
428 t##x##n##_word_rotate2_##lr1 (t##x##n w0, t##x##n w1, int i) \
430 int m = sizeof (t##x##n) / sizeof (t); \
431 ASSERT (i >= 0 && i < m); \
432 return (t##x##n##_word_shift_##lr1 (w0, i) \
433 | t##x##n##_word_shift_##lr2 (w1, m - i)); \
436 always_inline t##x##n \
437 t##x##n##_word_rotate_##lr1 (t##x##n w0, int i) \
438 { return t##x##n##_word_rotate2_##lr1 (w0, w0, i); }
440 _(u8, 16, left, right);
441 _(u8, 16, right, left);
442 _(u16, 8, left, right);
443 _(u16, 8, right, left);
444 _(u32, 4, left, right);
445 _(u32, 4, right, left);
446 _(u64, 2, left, right);
447 _(u64, 2, right, left);
452 #define u32x4_select(A,MASK) \
456 asm volatile ("pshufd %[mask], %[x], %[y]" \
457 : /* outputs */ [y] "=x" (_y) \
458 : /* inputs */ [x] "x" (_x), [mask] "i" (MASK)); \
462 #define u32x4_splat_word(x,i) \
463 u32x4_select ((x), (((i) << (2*0)) \
468 /* Extract low order 32 bit word. */
473 asm volatile ("movd %[x], %[result]": /* outputs */ [result] "=r" (result)
474 : /* inputs */ [x] "x" (x));
482 asm volatile ("movd %[x], %[result]": /* outputs */ [result] "=x" (result)
483 : /* inputs */ [x] "r" (x));
490 return (i32x4) u32x4_set0 ((u32) x);
496 return (i32) u32x4_get0 ((u32x4) x);
499 /* Converts all ones/zeros compare mask to bitmap. */
501 u8x16_compare_byte_mask (u8x16 x)
503 return _mm_movemask_epi8 ((__m128i) x);
506 extern u8 u32x4_compare_word_mask_table[256];
509 u32x4_compare_word_mask (u32x4 x)
511 u32 m = u8x16_compare_byte_mask ((u8x16) x);
512 return (u32x4_compare_word_mask_table[(m >> 0) & 0xff]
513 | (u32x4_compare_word_mask_table[(m >> 8) & 0xff] << 2));
517 u8x16_zero_byte_mask (u8x16 x)
520 return u8x16_compare_byte_mask (x == zero);
524 u16x8_zero_byte_mask (u16x8 x)
527 return u8x16_compare_byte_mask ((u8x16) (x == zero));
531 u32x4_zero_byte_mask (u32x4 x)
534 return u8x16_compare_byte_mask ((u8x16) (x == zero));
538 u8x16_max_scalar (u8x16 x)
540 x = u8x16_max (x, u8x16_word_shift_right (x, 8));
541 x = u8x16_max (x, u8x16_word_shift_right (x, 4));
542 x = u8x16_max (x, u8x16_word_shift_right (x, 2));
543 x = u8x16_max (x, u8x16_word_shift_right (x, 1));
544 return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
548 u8x16_min_scalar (u8x16 x)
550 x = u8x16_min (x, u8x16_word_shift_right (x, 8));
551 x = u8x16_min (x, u8x16_word_shift_right (x, 4));
552 x = u8x16_min (x, u8x16_word_shift_right (x, 2));
553 x = u8x16_min (x, u8x16_word_shift_right (x, 1));
554 return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
558 i16x8_max_scalar (i16x8 x)
560 x = i16x8_max (x, i16x8_word_shift_right (x, 4));
561 x = i16x8_max (x, i16x8_word_shift_right (x, 2));
562 x = i16x8_max (x, i16x8_word_shift_right (x, 1));
563 return _mm_extract_epi16 ((__m128i) x, 0);
567 i16x8_min_scalar (i16x8 x)
569 x = i16x8_min (x, i16x8_word_shift_right (x, 4));
570 x = i16x8_min (x, i16x8_word_shift_right (x, 2));
571 x = i16x8_min (x, i16x8_word_shift_right (x, 1));
572 return _mm_extract_epi16 ((__m128i) x, 0);
575 #define u8x16_align_right(a, b, imm) \
576 (u8x16) _mm_alignr_epi8 ((__m128i) a, (__m128i) b, imm)
578 static_always_inline u32
579 u32x4_min_scalar (u32x4 v)
581 v = u32x4_min (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
582 v = u32x4_min (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
586 static_always_inline u32
587 u32x4_max_scalar (u32x4 v)
589 v = u32x4_max (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
590 v = u32x4_max (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
594 static_always_inline u32
595 i32x4_min_scalar (i32x4 v)
597 v = i32x4_min (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
598 v = i32x4_min (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
602 static_always_inline u32
603 i32x4_max_scalar (i32x4 v)
605 v = i32x4_max (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
606 v = i32x4_max (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
610 static_always_inline u16
611 u8x16_msb_mask (u8x16 v)
613 return _mm_movemask_epi8 ((__m128i) v);
616 #define CLIB_HAVE_VEC128_MSB_MASK
620 static_always_inline u32x4
621 u32x4_byte_swap (u32x4 v)
624 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
626 return (u32x4) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
629 static_always_inline u16x8
630 u16x8_byte_swap (u16x8 v)
633 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
635 return (u16x8) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
638 static_always_inline u8x16
639 u8x16_reflect (u8x16 v)
642 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
644 return (u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) mask);
647 static_always_inline u32x4
648 u32x4_hadd (u32x4 v1, u32x4 v2)
650 return (u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2);
653 static_always_inline u32 __clib_unused
654 u32x4_sum_elts (u32x4 sum4)
656 sum4 += (u32x4) u8x16_align_right (sum4, sum4, 8);
657 sum4 += (u32x4) u8x16_align_right (sum4, sum4, 4);
661 static_always_inline u8x16
662 u8x16_shuffle (u8x16 v, u8x16 m)
664 return (u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) m);
667 static_always_inline u32x4
668 u32x4_shuffle (u32x4 v, const int a, const int b, const int c, const int d)
670 #if defined(__clang__) || !__OPTIMIZE__
671 u32x4 r = { v[a], v[b], v[c], v[d] };
674 return (u32x4) _mm_shuffle_epi32 ((__m128i) v,
675 a | b << 2 | c << 4 | d << 6);
682 static_always_inline t \
684 { return (t) _mm_cvt##i ((__m128i) x); }
686 _(u8x16, u16x8, epu8_epi16)
687 _(u8x16, u32x4, epu8_epi32)
688 _(u8x16, u64x2, epu8_epi64)
689 _(u16x8, u32x4, epu16_epi32)
690 _(u16x8, u64x2, epu16_epi64)
691 _(u32x4, u64x2, epu32_epi64)
693 _(i8x16, i16x8, epi8_epi16)
694 _(i8x16, i32x4, epi8_epi32)
695 _(i8x16, i64x2, epi8_epi64)
696 _(i16x8, i32x4, epi16_epi32)
697 _(i16x8, i64x2, epi16_epi64)
698 _(i32x4, i64x2, epi32_epi64)
702 static_always_inline u64x2
703 u64x2_gather (void *p0, void *p1)
705 u64x2 r = { *(u64 *) p0, *(u64 *) p1 };
709 static_always_inline u32x4
710 u32x4_gather (void *p0, void *p1, void *p2, void *p3)
712 u32x4 r = { *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3 };
717 static_always_inline void
718 u64x2_scatter (u64x2 r, void *p0, void *p1)
724 static_always_inline void
725 u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
733 static_always_inline void
734 u64x2_scatter_one (u64x2 r, int index, void *p)
736 *(u64 *) p = r[index];
739 static_always_inline void
740 u32x4_scatter_one (u32x4 r, int index, void *p)
742 *(u32 *) p = r[index];
745 static_always_inline u8x16
746 u8x16_is_greater (u8x16 v1, u8x16 v2)
748 return (u8x16) _mm_cmpgt_epi8 ((__m128i) v1, (__m128i) v2);
751 static_always_inline u8x16
752 u8x16_blend (u8x16 v1, u8x16 v2, u8x16 mask)
754 return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask);
757 static_always_inline u8x16
758 u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
761 return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
768 static_always_inline u8x16
769 u8x16_mask_load (u8x16 a, void *p, u16 mask)
771 return (u8x16) _mm_mask_loadu_epi8 ((__m128i) a, mask, p);
775 #endif /* included_vector_sse2_h */
778 * fd.io coding-style-patch-verification: ON
781 * eval: (c-set-style "gnu")