2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef included_clib_string_h
39 #define included_clib_string_h
41 #include <vppinfra/clib.h> /* for CLIB_LINUX_KERNEL */
42 #include <vppinfra/vector.h>
44 #ifdef CLIB_LINUX_KERNEL
45 #include <linux/string.h>
52 #ifdef CLIB_STANDALONE
53 #include <vppinfra/standalone_string.h>
57 #include <x86intrin.h>
60 /* Exchanges source and destination. */
61 void clib_memswap (void *_a, void *_b, uword bytes);
64 * the vector unit memcpy variants confuse coverity
65 * so don't let it anywhere near them.
69 #include <vppinfra/memcpy_avx512.h>
71 #include <vppinfra/memcpy_avx2.h>
73 #include <vppinfra/memcpy_sse3.h>
75 #define clib_memcpy(a,b,c) memcpy(a,b,c)
77 #else /* __COVERITY__ */
78 #define clib_memcpy(a,b,c) memcpy(a,b,c)
82 * Copy 64 bytes of data to 4 destinations
83 * this function is typically used in quad-loop case when whole cacheline
84 * needs to be copied to 4 different places. First it reads whole cacheline
85 * to 1/2/4 SIMD registers and then it writes data to 4 destinations.
88 static_always_inline void
89 clib_memcpy64_x4 (void *d0, void *d1, void *d2, void *d3, void *s)
91 #if defined (__AVX512F__)
92 __m512i r0 = _mm512_loadu_si512 (s);
94 _mm512_storeu_si512 (d0, r0);
95 _mm512_storeu_si512 (d1, r0);
96 _mm512_storeu_si512 (d2, r0);
97 _mm512_storeu_si512 (d3, r0);
99 #elif defined (__AVX2__)
100 __m256i r0 = _mm256_loadu_si256 ((__m256i *) (s + 0 * 32));
101 __m256i r1 = _mm256_loadu_si256 ((__m256i *) (s + 1 * 32));
103 _mm256_storeu_si256 ((__m256i *) (d0 + 0 * 32), r0);
104 _mm256_storeu_si256 ((__m256i *) (d0 + 1 * 32), r1);
106 _mm256_storeu_si256 ((__m256i *) (d1 + 0 * 32), r0);
107 _mm256_storeu_si256 ((__m256i *) (d1 + 1 * 32), r1);
109 _mm256_storeu_si256 ((__m256i *) (d2 + 0 * 32), r0);
110 _mm256_storeu_si256 ((__m256i *) (d2 + 1 * 32), r1);
112 _mm256_storeu_si256 ((__m256i *) (d3 + 0 * 32), r0);
113 _mm256_storeu_si256 ((__m256i *) (d3 + 1 * 32), r1);
115 #elif defined (__SSSE3__)
116 __m128i r0 = _mm_loadu_si128 ((__m128i *) (s + 0 * 16));
117 __m128i r1 = _mm_loadu_si128 ((__m128i *) (s + 1 * 16));
118 __m128i r2 = _mm_loadu_si128 ((__m128i *) (s + 2 * 16));
119 __m128i r3 = _mm_loadu_si128 ((__m128i *) (s + 3 * 16));
121 _mm_storeu_si128 ((__m128i *) (d0 + 0 * 16), r0);
122 _mm_storeu_si128 ((__m128i *) (d0 + 1 * 16), r1);
123 _mm_storeu_si128 ((__m128i *) (d0 + 2 * 16), r2);
124 _mm_storeu_si128 ((__m128i *) (d0 + 3 * 16), r3);
126 _mm_storeu_si128 ((__m128i *) (d1 + 0 * 16), r0);
127 _mm_storeu_si128 ((__m128i *) (d1 + 1 * 16), r1);
128 _mm_storeu_si128 ((__m128i *) (d1 + 2 * 16), r2);
129 _mm_storeu_si128 ((__m128i *) (d1 + 3 * 16), r3);
131 _mm_storeu_si128 ((__m128i *) (d2 + 0 * 16), r0);
132 _mm_storeu_si128 ((__m128i *) (d2 + 1 * 16), r1);
133 _mm_storeu_si128 ((__m128i *) (d2 + 2 * 16), r2);
134 _mm_storeu_si128 ((__m128i *) (d2 + 3 * 16), r3);
136 _mm_storeu_si128 ((__m128i *) (d3 + 0 * 16), r0);
137 _mm_storeu_si128 ((__m128i *) (d3 + 1 * 16), r1);
138 _mm_storeu_si128 ((__m128i *) (d3 + 2 * 16), r2);
139 _mm_storeu_si128 ((__m128i *) (d3 + 3 * 16), r3);
142 clib_memcpy (d0, s, 64);
143 clib_memcpy (d1, s, 64);
144 clib_memcpy (d2, s, 64);
145 clib_memcpy (d3, s, 64);
149 static_always_inline void
150 clib_memset_u64 (void *p, u64 val, uword count)
153 #if defined(CLIB_HAVE_VEC512)
154 u64x8 v512 = u64x8_splat (val);
157 u64x8_store_unaligned (v512, ptr);
164 #if defined(CLIB_HAVE_VEC256)
165 u64x4 v256 = u64x4_splat (val);
168 u64x4_store_unaligned (v256, ptr);
177 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
186 static_always_inline void
187 clib_memset_u32 (void *p, u32 val, uword count)
190 #if defined(CLIB_HAVE_VEC512)
191 u32x16 v512 = u32x16_splat (val);
194 u32x16_store_unaligned (v512, ptr);
201 #if defined(CLIB_HAVE_VEC256)
202 u32x8 v256 = u32x8_splat (val);
205 u32x8_store_unaligned (v256, ptr);
212 #if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
213 u32x4 v128 = u32x4_splat (val);
216 u32x4_store_unaligned (v128, ptr);
223 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
232 static_always_inline void
233 clib_memset_u16 (void *p, u16 val, uword count)
236 #if defined(CLIB_HAVE_VEC512)
237 u16x32 v512 = u16x32_splat (val);
240 u16x32_store_unaligned (v512, ptr);
247 #if defined(CLIB_HAVE_VEC256)
248 u16x16 v256 = u16x16_splat (val);
251 u16x16_store_unaligned (v256, ptr);
258 #if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
259 u16x8 v128 = u16x8_splat (val);
262 u16x8_store_unaligned (v128, ptr);
269 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
278 static_always_inline void
279 clib_memset_u8 (void *p, u8 val, uword count)
282 #if defined(CLIB_HAVE_VEC512)
283 u8x64 v512 = u8x64_splat (val);
286 u8x64_store_unaligned (v512, ptr);
293 #if defined(CLIB_HAVE_VEC256)
294 u8x32 v256 = u8x32_splat (val);
297 u8x32_store_unaligned (v256, ptr);
304 #if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
305 u8x16 v128 = u8x16_splat (val);
308 u8x16_store_unaligned (v128, ptr);
315 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
324 static_always_inline uword
325 clib_count_equal_u64 (u64 * data, uword max_count)
332 if (data[0] != data[1])
338 #if defined(CLIB_HAVE_VEC256)
339 u64x4 splat = u64x4_splat (first);
343 bmp = u8x32_msb_mask ((u8x32) (u64x4_load_unaligned (data) == splat));
344 if (bmp != 0xffffffff)
346 count += count_trailing_zeros (~bmp) / 8;
347 return clib_min (count, max_count);
353 if (count >= max_count)
359 while (count + 3 < max_count &&
360 ((data[0] ^ first) | (data[1] ^ first) |
361 (data[2] ^ first) | (data[3] ^ first)) == 0)
366 while (count < max_count && (data[0] == first))
374 static_always_inline uword
375 clib_count_equal_u32 (u32 * data, uword max_count)
382 if (data[0] != data[1])
388 #if defined(CLIB_HAVE_VEC256)
389 u32x8 splat = u32x8_splat (first);
393 bmp = u8x32_msb_mask ((u8x32) (u32x8_load_unaligned (data) == splat));
394 if (bmp != 0xffffffff)
396 count += count_trailing_zeros (~bmp) / 4;
397 return clib_min (count, max_count);
403 if (count >= max_count)
406 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
407 u32x4 splat = u32x4_splat (first);
411 bmp = u8x16_msb_mask ((u8x16) (u32x4_load_unaligned (data) == splat));
414 count += count_trailing_zeros (~bmp) / 4;
415 return clib_min (count, max_count);
421 if (count >= max_count)
427 while (count + 3 < max_count &&
428 ((data[0] ^ first) | (data[1] ^ first) |
429 (data[2] ^ first) | (data[3] ^ first)) == 0)
434 while (count < max_count && (data[0] == first))
442 static_always_inline uword
443 clib_count_equal_u16 (u16 * data, uword max_count)
450 if (data[0] != data[1])
456 #if defined(CLIB_HAVE_VEC256)
457 u16x16 splat = u16x16_splat (first);
461 bmp = u8x32_msb_mask ((u8x32) (u16x16_load_unaligned (data) == splat));
462 if (bmp != 0xffffffff)
464 count += count_trailing_zeros (~bmp) / 2;
465 return clib_min (count, max_count);
471 if (count >= max_count)
474 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
475 u16x8 splat = u16x8_splat (first);
479 bmp = u8x16_msb_mask ((u8x16) (u16x8_load_unaligned (data) == splat));
482 count += count_trailing_zeros (~bmp) / 2;
483 return clib_min (count, max_count);
489 if (count >= max_count)
495 while (count + 3 < max_count &&
496 ((data[0] ^ first) | (data[1] ^ first) |
497 (data[2] ^ first) | (data[3] ^ first)) == 0)
502 while (count < max_count && (data[0] == first))
510 static_always_inline uword
511 clib_count_equal_u8 (u8 * data, uword max_count)
518 if (data[0] != data[1])
524 #if defined(CLIB_HAVE_VEC256)
525 u8x32 splat = u8x32_splat (first);
529 bmp = u8x32_msb_mask ((u8x32) (u8x32_load_unaligned (data) == splat));
530 if (bmp != 0xffffffff)
532 count += count_trailing_zeros (~bmp);
533 return clib_min (count, max_count);
539 if (count >= max_count)
542 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
543 u8x16 splat = u8x16_splat (first);
547 bmp = u8x16_msb_mask ((u8x16) (u8x16_load_unaligned (data) == splat));
550 count += count_trailing_zeros (~bmp);
551 return clib_min (count, max_count);
557 if (count >= max_count)
563 while (count + 3 < max_count &&
564 ((data[0] ^ first) | (data[1] ^ first) |
565 (data[2] ^ first) | (data[3] ^ first)) == 0)
570 while (count < max_count && (data[0] == first))
579 #endif /* included_clib_string_h */
582 * fd.io coding-style-patch-verification: ON
585 * eval: (c-set-style "gnu")