2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 Optimized string handling code, including c11-compliant
41 "safe C library" variants.
44 #ifndef included_clib_string_h
45 #define included_clib_string_h
47 #include <vppinfra/clib.h> /* for CLIB_LINUX_KERNEL */
48 #include <vppinfra/vector.h>
50 #ifdef CLIB_LINUX_KERNEL
51 #include <linux/string.h>
58 #ifdef CLIB_STANDALONE
59 #include <vppinfra/standalone_string.h>
63 #include <x86intrin.h>
66 /* Exchanges source and destination. */
67 void clib_memswap (void *_a, void *_b, uword bytes);
70 * the vector unit memcpy variants confuse coverity
71 * so don't let it anywhere near them.
75 #include <vppinfra/memcpy_avx512.h>
77 #include <vppinfra/memcpy_avx2.h>
79 #include <vppinfra/memcpy_sse3.h>
81 #define clib_memcpy_fast(a,b,c) memcpy(a,b,c)
83 #else /* __COVERITY__ */
84 #define clib_memcpy_fast(a,b,c) memcpy(a,b,c)
87 /* c-11 string manipulation variants */
97 typedef uword rsize_t;
99 void clib_c11_violation (const char *s);
100 errno_t memcpy_s (void *__restrict__ dest, rsize_t dmax,
101 const void *__restrict__ src, rsize_t n);
103 always_inline errno_t
104 memcpy_s_inline (void *__restrict__ dest, rsize_t dmax,
105 const void *__restrict__ src, rsize_t n)
111 * Optimize constant-number-of-bytes calls without asking
112 * "too many questions for someone from New Jersey"
114 if (__builtin_constant_p (n))
116 clib_memcpy_fast (dest, src, n);
121 * call bogus if: src or dst NULL, trying to copy
122 * more data than we have space in dst, or src == dst.
123 * n == 0 isn't really "bad", so check first in the
124 * "wall-of-shame" department...
126 bad = (dest == 0) + (src == 0) + (n > dmax) + (dest == src) + (n == 0);
127 if (PREDICT_FALSE (bad != 0))
129 /* Not actually trying to copy anything is OK */
133 clib_c11_violation ("dest NULL");
135 clib_c11_violation ("src NULL");
137 clib_c11_violation ("n > dmax");
139 clib_c11_violation ("dest == src");
143 /* Check for src/dst overlap, which is not allowed */
144 low = (uword) (src < dest ? src : dest);
145 hi = (uword) (src < dest ? dest : src);
147 if (PREDICT_FALSE (low + (n - 1) >= hi))
149 clib_c11_violation ("src/dest overlap");
153 clib_memcpy_fast (dest, src, n);
158 * Note: $$$ This macro is a crutch. Folks need to manually
159 * inspect every extant clib_memcpy(...) call and
160 * attempt to provide a real destination buffer size
163 #define clib_memcpy(d,s,n) memcpy_s_inline(d,n,s,n)
165 errno_t memset_s (void *s, rsize_t smax, int c, rsize_t n);
167 always_inline errno_t
168 memset_s_inline (void *s, rsize_t smax, int c, rsize_t n)
172 bad = (s == 0) + (n > smax);
174 if (PREDICT_FALSE (bad != 0))
177 clib_c11_violation ("s NULL");
179 clib_c11_violation ("n > smax");
187 * This macro is not [so much of] a crutch.
188 * It's super-typical to write:
190 * ep = pool_get (<pool>);
191 * clib_memset(ep, 0, sizeof (*ep));
193 * The compiler should delete the not-so useful
194 * (n > smax) test. TBH the NULL pointer check isn't
195 * so useful in this case, but so be it.
197 #define clib_memset(s,c,n) memset_s_inline(s,n,c,n)
200 * Copy 64 bytes of data to 4 destinations
201 * this function is typically used in quad-loop case when whole cacheline
202 * needs to be copied to 4 different places. First it reads whole cacheline
203 * to 1/2/4 SIMD registers and then it writes data to 4 destinations.
206 static_always_inline void
207 clib_memcpy64_x4 (void *d0, void *d1, void *d2, void *d3, void *s)
209 #if defined (__AVX512F__)
210 __m512i r0 = _mm512_loadu_si512 (s);
212 _mm512_storeu_si512 (d0, r0);
213 _mm512_storeu_si512 (d1, r0);
214 _mm512_storeu_si512 (d2, r0);
215 _mm512_storeu_si512 (d3, r0);
217 #elif defined (__AVX2__)
218 __m256i r0 = _mm256_loadu_si256 ((__m256i *) (s + 0 * 32));
219 __m256i r1 = _mm256_loadu_si256 ((__m256i *) (s + 1 * 32));
221 _mm256_storeu_si256 ((__m256i *) (d0 + 0 * 32), r0);
222 _mm256_storeu_si256 ((__m256i *) (d0 + 1 * 32), r1);
224 _mm256_storeu_si256 ((__m256i *) (d1 + 0 * 32), r0);
225 _mm256_storeu_si256 ((__m256i *) (d1 + 1 * 32), r1);
227 _mm256_storeu_si256 ((__m256i *) (d2 + 0 * 32), r0);
228 _mm256_storeu_si256 ((__m256i *) (d2 + 1 * 32), r1);
230 _mm256_storeu_si256 ((__m256i *) (d3 + 0 * 32), r0);
231 _mm256_storeu_si256 ((__m256i *) (d3 + 1 * 32), r1);
233 #elif defined (__SSSE3__)
234 __m128i r0 = _mm_loadu_si128 ((__m128i *) (s + 0 * 16));
235 __m128i r1 = _mm_loadu_si128 ((__m128i *) (s + 1 * 16));
236 __m128i r2 = _mm_loadu_si128 ((__m128i *) (s + 2 * 16));
237 __m128i r3 = _mm_loadu_si128 ((__m128i *) (s + 3 * 16));
239 _mm_storeu_si128 ((__m128i *) (d0 + 0 * 16), r0);
240 _mm_storeu_si128 ((__m128i *) (d0 + 1 * 16), r1);
241 _mm_storeu_si128 ((__m128i *) (d0 + 2 * 16), r2);
242 _mm_storeu_si128 ((__m128i *) (d0 + 3 * 16), r3);
244 _mm_storeu_si128 ((__m128i *) (d1 + 0 * 16), r0);
245 _mm_storeu_si128 ((__m128i *) (d1 + 1 * 16), r1);
246 _mm_storeu_si128 ((__m128i *) (d1 + 2 * 16), r2);
247 _mm_storeu_si128 ((__m128i *) (d1 + 3 * 16), r3);
249 _mm_storeu_si128 ((__m128i *) (d2 + 0 * 16), r0);
250 _mm_storeu_si128 ((__m128i *) (d2 + 1 * 16), r1);
251 _mm_storeu_si128 ((__m128i *) (d2 + 2 * 16), r2);
252 _mm_storeu_si128 ((__m128i *) (d2 + 3 * 16), r3);
254 _mm_storeu_si128 ((__m128i *) (d3 + 0 * 16), r0);
255 _mm_storeu_si128 ((__m128i *) (d3 + 1 * 16), r1);
256 _mm_storeu_si128 ((__m128i *) (d3 + 2 * 16), r2);
257 _mm_storeu_si128 ((__m128i *) (d3 + 3 * 16), r3);
260 clib_memcpy_fast (d0, s, 64);
261 clib_memcpy_fast (d1, s, 64);
262 clib_memcpy_fast (d2, s, 64);
263 clib_memcpy_fast (d3, s, 64);
267 static_always_inline void
268 clib_memset_u64 (void *p, u64 val, uword count)
271 #if defined(CLIB_HAVE_VEC512)
272 u64x8 v512 = u64x8_splat (val);
275 u64x8_store_unaligned (v512, ptr);
282 #if defined(CLIB_HAVE_VEC256)
283 u64x4 v256 = u64x4_splat (val);
286 u64x4_store_unaligned (v256, ptr);
295 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
304 static_always_inline void
305 clib_memset_u32 (void *p, u32 val, uword count)
308 #if defined(CLIB_HAVE_VEC512)
309 u32x16 v512 = u32x16_splat (val);
312 u32x16_store_unaligned (v512, ptr);
319 #if defined(CLIB_HAVE_VEC256)
320 u32x8 v256 = u32x8_splat (val);
323 u32x8_store_unaligned (v256, ptr);
330 #if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
331 u32x4 v128 = u32x4_splat (val);
334 u32x4_store_unaligned (v128, ptr);
341 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
350 static_always_inline void
351 clib_memset_u16 (void *p, u16 val, uword count)
354 #if defined(CLIB_HAVE_VEC512)
355 u16x32 v512 = u16x32_splat (val);
358 u16x32_store_unaligned (v512, ptr);
365 #if defined(CLIB_HAVE_VEC256)
366 u16x16 v256 = u16x16_splat (val);
369 u16x16_store_unaligned (v256, ptr);
376 #if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
377 u16x8 v128 = u16x8_splat (val);
380 u16x8_store_unaligned (v128, ptr);
387 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
396 static_always_inline void
397 clib_memset_u8 (void *p, u8 val, uword count)
400 #if defined(CLIB_HAVE_VEC512)
401 u8x64 v512 = u8x64_splat (val);
404 u8x64_store_unaligned (v512, ptr);
411 #if defined(CLIB_HAVE_VEC256)
412 u8x32 v256 = u8x32_splat (val);
415 u8x32_store_unaligned (v256, ptr);
422 #if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
423 u8x16 v128 = u8x16_splat (val);
426 u8x16_store_unaligned (v128, ptr);
433 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
442 static_always_inline uword
443 clib_count_equal_u64 (u64 * data, uword max_count)
450 if (data[0] != data[1])
456 #if defined(CLIB_HAVE_VEC256)
457 u64x4 splat = u64x4_splat (first);
461 bmp = u8x32_msb_mask ((u8x32) (u64x4_load_unaligned (data) == splat));
462 if (bmp != 0xffffffff)
464 count += count_trailing_zeros (~bmp) / 8;
465 return clib_min (count, max_count);
471 if (count >= max_count)
477 while (count + 3 < max_count &&
478 ((data[0] ^ first) | (data[1] ^ first) |
479 (data[2] ^ first) | (data[3] ^ first)) == 0)
484 while (count < max_count && (data[0] == first))
492 static_always_inline uword
493 clib_count_equal_u32 (u32 * data, uword max_count)
500 if (data[0] != data[1])
506 #if defined(CLIB_HAVE_VEC256)
507 u32x8 splat = u32x8_splat (first);
511 bmp = u8x32_msb_mask ((u8x32) (u32x8_load_unaligned (data) == splat));
512 if (bmp != 0xffffffff)
514 count += count_trailing_zeros (~bmp) / 4;
515 return clib_min (count, max_count);
521 if (count >= max_count)
524 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
525 u32x4 splat = u32x4_splat (first);
529 bmp = u8x16_msb_mask ((u8x16) (u32x4_load_unaligned (data) == splat));
532 count += count_trailing_zeros (~bmp) / 4;
533 return clib_min (count, max_count);
539 if (count >= max_count)
545 while (count + 3 < max_count &&
546 ((data[0] ^ first) | (data[1] ^ first) |
547 (data[2] ^ first) | (data[3] ^ first)) == 0)
552 while (count < max_count && (data[0] == first))
560 static_always_inline uword
561 clib_count_equal_u16 (u16 * data, uword max_count)
568 if (data[0] != data[1])
574 #if defined(CLIB_HAVE_VEC256)
575 u16x16 splat = u16x16_splat (first);
579 bmp = u8x32_msb_mask ((u8x32) (u16x16_load_unaligned (data) == splat));
580 if (bmp != 0xffffffff)
582 count += count_trailing_zeros (~bmp) / 2;
583 return clib_min (count, max_count);
589 if (count >= max_count)
592 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
593 u16x8 splat = u16x8_splat (first);
597 bmp = u8x16_msb_mask ((u8x16) (u16x8_load_unaligned (data) == splat));
600 count += count_trailing_zeros (~bmp) / 2;
601 return clib_min (count, max_count);
607 if (count >= max_count)
613 while (count + 3 < max_count &&
614 ((data[0] ^ first) | (data[1] ^ first) |
615 (data[2] ^ first) | (data[3] ^ first)) == 0)
620 while (count < max_count && (data[0] == first))
628 static_always_inline uword
629 clib_count_equal_u8 (u8 * data, uword max_count)
636 if (data[0] != data[1])
642 #if defined(CLIB_HAVE_VEC256)
643 u8x32 splat = u8x32_splat (first);
647 bmp = u8x32_msb_mask ((u8x32) (u8x32_load_unaligned (data) == splat));
648 if (bmp != 0xffffffff)
650 count += count_trailing_zeros (~bmp);
651 return clib_min (count, max_count);
657 if (count >= max_count)
660 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
661 u8x16 splat = u8x16_splat (first);
665 bmp = u8x16_msb_mask ((u8x16) (u8x16_load_unaligned (data) == splat));
668 count += count_trailing_zeros (~bmp);
669 return clib_min (count, max_count);
675 if (count >= max_count)
681 while (count + 3 < max_count &&
682 ((data[0] ^ first) | (data[1] ^ first) |
683 (data[2] ^ first) | (data[3] ^ first)) == 0)
688 while (count < max_count && (data[0] == first))
696 #endif /* included_clib_string_h */
699 * fd.io coding-style-patch-verification: ON
702 * eval: (c-set-style "gnu")