2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vector_avx512_h
17 #define included_vector_avx512_h
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
22 #define foreach_avx512_vec512i \
23 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
24 #define foreach_avx512_vec512u \
25 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
26 #define foreach_avx512_vec512f \
27 _(f,32,8,ps) _(f,64,4,pd)
29 /* splat, load_unaligned, store_unaligned */
31 #define _(t, s, c, i) \
32 static_always_inline t##s##x##c \
33 t##s##x##c##_splat (t##s x) \
34 { return (t##s##x##c) _mm512_set1_##i (x); } \
36 static_always_inline t##s##x##c \
37 t##s##x##c##_load_unaligned (void *p) \
38 { return (t##s##x##c) _mm512_loadu_si512 (p); } \
40 static_always_inline void \
41 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
42 { _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
45 foreach_avx512_vec512i foreach_avx512_vec512u
49 static_always_inline u32
50 u16x32_msb_mask (u16x32 v)
52 return (u32) _mm512_movepi16_mask ((__m512i) v);
55 #endif /* included_vector_avx512_h */
57 * fd.io coding-style-patch-verification: ON
60 * eval: (c-set-style "gnu")