2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx2_h
17 #define included_vector_avx2_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
23 #define foreach_avx2_vec256i \
24 _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64x)
25 #define foreach_avx2_vec256u \
26 _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64x)
27 #define foreach_avx2_vec256f \
28 _(f,32,8,ps) _(f,64,4,pd)
30 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
32 #define _(t, s, c, i) \
33 static_always_inline t##s##x##c \
34 t##s##x##c##_splat (t##s x) \
35 { return (t##s##x##c) _mm256_set1_##i (x); } \
37 static_always_inline t##s##x##c \
38 t##s##x##c##_load_unaligned (void *p) \
39 { return (t##s##x##c) _mm256_loadu_si256 (p); } \
41 static_always_inline void \
42 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
43 { _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
45 static_always_inline int \
46 t##s##x##c##_is_all_zero (t##s##x##c x) \
47 { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
49 static_always_inline int \
50 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
51 { return t##s##x##c##_is_all_zero (a ^ b); } \
53 static_always_inline int \
54 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
55 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \
57 foreach_avx2_vec256i foreach_avx2_vec256u
62 u32x8_permute (u32x8 v, u32x8 idx)
64 return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
67 /* _extract_lo, _extract_hi */
71 t2##_extract_lo (t2 v) \
72 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
75 t2##_extract_hi (t2 v) \
76 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
79 t2##_insert_lo (t2 v1, t1 v2) \
80 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
83 t2##_insert_hi (t2 v1, t1 v2) \
84 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
96 static_always_inline u32
97 u8x32_msb_mask (u8x32 v)
99 return _mm256_movemask_epi8 ((__m256i) v);
105 static_always_inline t \
106 f##_extend_to_##t (f x) \
107 { return (t) _mm256_cvt##i ((__m128i) x); }
109 _(u16x8, u32x8, epu16_epi32)
110 _(u16x8, u64x4, epu16_epi64)
111 _(u32x4, u64x4, epu32_epi64)
112 _(u8x16, u16x16, epu8_epi64)
113 _(u8x16, u32x8, epu8_epi32)
114 _(u8x16, u64x4, epu8_epi64)
115 _(i16x8, i32x8, epi16_epi32)
116 _(i16x8, i64x4, epi16_epi64)
117 _(i32x4, i64x4, epi32_epi64)
118 _(i8x16, i16x16, epi8_epi64)
119 _(i8x16, i32x8, epi8_epi32)
120 _(i8x16, i64x4, epi8_epi64)
124 #endif /* included_vector_avx2_h */
127 * fd.io coding-style-patch-verification: ON
130 * eval: (c-set-style "gnu")