1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2021 Cisco Systems, Inc.
5 #ifndef included_vector_funcs_h
6 #define included_vector_funcs_h
7 #include <vppinfra/clib.h>
8 #include <vppinfra/memcpy.h>
10 static_always_inline u64
11 clib_mask_compare_u16_x64 (u16 v, u16 *a, u32 n_elts)
14 #if defined(CLIB_HAVE_VEC512)
15 u16x32 v32 = u16x32_splat (v);
16 u16x32u *av = (u16x32u *) a;
17 mask = ((u64) u16x32_is_equal_mask (av[0], v32) |
18 (u64) u16x32_is_equal_mask (av[1], v32) << 32);
19 #elif defined(CLIB_HAVE_VEC256)
20 u16x16 v16 = u16x16_splat (v);
21 u16x16u *av = (u16x16u *) a;
24 x = i8x32_pack (v16 == av[0], v16 == av[1]);
25 mask = i8x32_msb_mask ((i8x32) u64x4_permute (x, 0, 2, 1, 3));
26 x = i8x32_pack (v16 == av[2], v16 == av[3]);
27 mask |= (u64) i8x32_msb_mask ((i8x32) u64x4_permute (x, 0, 2, 1, 3)) << 32;
28 #elif defined(CLIB_HAVE_VEC128) && defined(__ARM_NEON)
29 u16x8 v8 = u16x8_splat (v);
30 u16x8 m = { 1, 2, 4, 8, 16, 32, 64, 128 };
31 u16x8u *av = (u16x8u *) a;
33 /* compare each u16 elemment with v8, result gives 0xffff in each element
34 of the resulting vector if comparison result is true.
35 Bitwise AND with m will give us one bit set for true result and offset
36 of that bit represend element index. Finally vaddvq_u16() gives us sum
37 of all elements of the vector which will give us u8 bitmap. */
39 for (int i = 0; i < 8; i++)
40 mask |= (u64) vaddvq_u16 ((av[i] == v8) & m) << (i * 8);
42 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
43 u16x8 v8 = u16x8_splat (v);
44 u16x8u *av = (u16x8u *) a;
45 mask = ((u64) i8x16_msb_mask (i8x16_pack (v8 == av[0], v8 == av[1])) |
46 (u64) i8x16_msb_mask (i8x16_pack (v8 == av[2], v8 == av[3])) << 16 |
47 (u64) i8x16_msb_mask (i8x16_pack (v8 == av[4], v8 == av[5])) << 32 |
48 (u64) i8x16_msb_mask (i8x16_pack (v8 == av[6], v8 == av[7])) << 48);
50 for (int i = 0; i < n_elts; i++)
57 /** \brief Compare 16-bit elemments with provied value and return bitmap
59 @param v value to compare elements with
60 @param a array of u16 elements
61 @param mask array of u64 where reuslting mask will be stored
62 @param n_elts number of elements in the array
66 static_always_inline void
67 clib_mask_compare_u16 (u16 v, u16 *a, u64 *mask, u32 n_elts)
71 mask++[0] = clib_mask_compare_u16_x64 (v, a, 64);
76 if (PREDICT_TRUE (n_elts == 0))
79 mask[0] = clib_mask_compare_u16_x64 (v, a, n_elts) & pow2_mask (n_elts);
82 static_always_inline u64
83 clib_mask_compare_u32_x64 (u32 v, u32 *a, u32 n_elts)
86 #if defined(CLIB_HAVE_VEC512)
87 u32x16 v16 = u32x16_splat (v);
88 u32x16u *av = (u32x16u *) a;
89 mask = ((u64) u32x16_is_equal_mask (av[0], v16) |
90 (u64) u32x16_is_equal_mask (av[1], v16) << 16 |
91 (u64) u32x16_is_equal_mask (av[2], v16) << 32 |
92 (u64) u32x16_is_equal_mask (av[3], v16) << 48);
93 #elif defined(CLIB_HAVE_VEC256)
94 u32x8 v8 = u32x8_splat (v);
95 u32x8u *av = (u32x8u *) a;
96 u32x8 m = { 0, 4, 1, 5, 2, 6, 3, 7 };
99 c = i8x32_pack (i16x16_pack ((i32x8) (v8 == av[0]), (i32x8) (v8 == av[1])),
100 i16x16_pack ((i32x8) (v8 == av[2]), (i32x8) (v8 == av[3])));
101 mask = i8x32_msb_mask ((i8x32) u32x8_permute ((u32x8) c, m));
103 c = i8x32_pack (i16x16_pack ((i32x8) (v8 == av[4]), (i32x8) (v8 == av[5])),
104 i16x16_pack ((i32x8) (v8 == av[6]), (i32x8) (v8 == av[7])));
105 mask |= (u64) i8x32_msb_mask ((i8x32) u32x8_permute ((u32x8) c, m)) << 32;
107 #elif defined(CLIB_HAVE_VEC128) && defined(__ARM_NEON)
108 u32x4 v4 = u32x4_splat (v);
109 u32x4 m = { 1, 2, 4, 8 };
110 u32x4u *av = (u32x4u *) a;
112 /* compare each u32 elemment with v4, result gives -1 in each element
113 of the resulting vector if comparison result is true.
114 Bitwise AND with m will give us one bit set for true result and offset
115 of that bit represend element index. Finally vaddvq_u32() gives us sum
116 of all elements of the vector which will give us u8 bitmap. */
118 for (int i = 0; i < 16; i++)
119 mask |= (u64) vaddvq_u32 ((av[i] == v4) & m) << (i * 4);
121 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
122 u32x4 v4 = u32x4_splat (v);
123 u32x4u *av = (u32x4u *) a;
125 for (int i = 0; i < 4; i++)
127 i16x8 p1 = i16x8_pack (v4 == av[0], v4 == av[1]);
128 i16x8 p2 = i16x8_pack (v4 == av[2], v4 == av[3]);
129 mask |= (u64) i8x16_msb_mask (i8x16_pack (p1, p2)) << (i * 16);
134 for (int i = 0; i < n_elts; i++)
141 /** \brief Compare 32-bit elemments with provied value and return bitmap
143 @param v value to compare elements with
144 @param a array of u32 elements
145 @param mask array of u64 where reuslting mask will be stored
146 @param n_elts number of elements in the array
150 static_always_inline void
151 clib_mask_compare_u32 (u32 v, u32 *a, u64 *bitmap, u32 n_elts)
155 bitmap++[0] = clib_mask_compare_u32_x64 (v, a, 64);
160 if (PREDICT_TRUE (n_elts == 0))
163 bitmap[0] = clib_mask_compare_u32_x64 (v, a, n_elts) & pow2_mask (n_elts);
166 static_always_inline u32 *
167 clib_compress_u32_x64 (u32 *dst, u32 *src, u64 mask)
169 #if defined(CLIB_HAVE_VEC512_COMPRESS)
170 u32x16u *sv = (u32x16u *) src;
171 for (int i = 0; i < 4; i++)
173 int cnt = _popcnt32 ((u16) mask);
174 u32x16_compress_store (sv[i], mask, dst);
179 #elif defined(CLIB_HAVE_VEC256_COMPRESS)
180 u32x8u *sv = (u32x8u *) src;
181 for (int i = 0; i < 8; i++)
183 int cnt = _popcnt32 ((u8) mask);
184 u32x8_compress_store (sv[i], mask, dst);
191 u16 bit = count_trailing_zeros (mask);
192 mask = clear_lowest_set_bit (mask);
199 /** \brief Compare array of 32-bit elemments into destination array based on
202 @param dst destination array of u32 elements
203 @param src source array of u32 elements
204 @param mask array of u64 values representing compress mask
205 @param n_elts number of elements in the source array
206 @return number of elements stored in destionation array
209 static_always_inline u32
210 clib_compress_u32 (u32 *dst, u32 *src, u64 *mask, u32 n_elts)
215 if (mask[0] == ~0ULL)
217 clib_memcpy_u32 (dst, src, 64);
221 dst = clib_compress_u32_x64 (dst, src, mask[0]);
228 if (PREDICT_TRUE (n_elts == 0))
231 return clib_compress_u32_x64 (dst, src, mask[0] & pow2_mask (n_elts)) - dst0;