2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
19 * All rights reserved.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #ifndef included_clib_memcpy_avx512_h
49 #define included_clib_memcpy_avx512_h
52 #include <x86intrin.h>
53 #include <vppinfra/warnings.h>
56 WARN_OFF (stringop-overflow)
60 clib_mov16 (u8 * dst, const u8 * src)
64 xmm0 = _mm_loadu_si128 ((const __m128i *) src);
65 _mm_storeu_si128 ((__m128i *) dst, xmm0);
69 clib_mov32 (u8 * dst, const u8 * src)
73 ymm0 = _mm256_loadu_si256 ((const __m256i *) src);
74 _mm256_storeu_si256 ((__m256i *) dst, ymm0);
78 clib_mov64 (u8 * dst, const u8 * src)
82 zmm0 = _mm512_loadu_si512 ((const void *) src);
83 _mm512_storeu_si512 ((void *) dst, zmm0);
87 clib_mov128 (u8 * dst, const u8 * src)
89 clib_mov64 (dst + 0 * 64, src + 0 * 64);
90 clib_mov64 (dst + 1 * 64, src + 1 * 64);
94 clib_mov256 (u8 * dst, const u8 * src)
96 clib_mov128 (dst + 0 * 128, src + 0 * 128);
97 clib_mov128 (dst + 1 * 128, src + 1 * 128);
101 clib_mov128blocks (u8 * dst, const u8 * src, size_t n)
107 zmm0 = _mm512_loadu_si512 ((const void *) (src + 0 * 64));
109 zmm1 = _mm512_loadu_si512 ((const void *) (src + 1 * 64));
111 _mm512_storeu_si512 ((void *) (dst + 0 * 64), zmm0);
112 _mm512_storeu_si512 ((void *) (dst + 1 * 64), zmm1);
118 clib_mov512blocks (u8 * dst, const u8 * src, size_t n)
120 __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
124 zmm0 = _mm512_loadu_si512 ((const void *) (src + 0 * 64));
126 zmm1 = _mm512_loadu_si512 ((const void *) (src + 1 * 64));
127 zmm2 = _mm512_loadu_si512 ((const void *) (src + 2 * 64));
128 zmm3 = _mm512_loadu_si512 ((const void *) (src + 3 * 64));
129 zmm4 = _mm512_loadu_si512 ((const void *) (src + 4 * 64));
130 zmm5 = _mm512_loadu_si512 ((const void *) (src + 5 * 64));
131 zmm6 = _mm512_loadu_si512 ((const void *) (src + 6 * 64));
132 zmm7 = _mm512_loadu_si512 ((const void *) (src + 7 * 64));
134 _mm512_storeu_si512 ((void *) (dst + 0 * 64), zmm0);
135 _mm512_storeu_si512 ((void *) (dst + 1 * 64), zmm1);
136 _mm512_storeu_si512 ((void *) (dst + 2 * 64), zmm2);
137 _mm512_storeu_si512 ((void *) (dst + 3 * 64), zmm3);
138 _mm512_storeu_si512 ((void *) (dst + 4 * 64), zmm4);
139 _mm512_storeu_si512 ((void *) (dst + 5 * 64), zmm5);
140 _mm512_storeu_si512 ((void *) (dst + 6 * 64), zmm6);
141 _mm512_storeu_si512 ((void *) (dst + 7 * 64), zmm7);
147 clib_memcpy_fast_avx512 (void *dst, const void *src, size_t n)
149 uword dstu = (uword) dst;
150 uword srcu = (uword) src;
156 * Copy less than 16 bytes
162 *(u8 *) dstu = *(const u8 *) srcu;
163 srcu = (uword) ((const u8 *) srcu + 1);
164 dstu = (uword) ((u8 *) dstu + 1);
168 *(u16 *) dstu = *(const u16 *) srcu;
169 srcu = (uword) ((const u16 *) srcu + 1);
170 dstu = (uword) ((u16 *) dstu + 1);
174 *(u32 *) dstu = *(const u32 *) srcu;
175 srcu = (uword) ((const u32 *) srcu + 1);
176 dstu = (uword) ((u32 *) dstu + 1);
179 *(u64 *) dstu = *(const u64 *) srcu;
184 * Fast way when copy size doesn't exceed 512 bytes
188 clib_mov16 ((u8 *) dst, (const u8 *) src);
189 clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
194 clib_mov32 ((u8 *) dst, (const u8 *) src);
195 clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
203 clib_mov256 ((u8 *) dst, (const u8 *) src);
204 src = (const u8 *) src + 256;
205 dst = (u8 *) dst + 256;
210 clib_mov128 ((u8 *) dst, (const u8 *) src);
211 src = (const u8 *) src + 128;
212 dst = (u8 *) dst + 128;
214 COPY_BLOCK_128_BACK63:
217 clib_mov64 ((u8 *) dst, (const u8 *) src);
218 clib_mov64 ((u8 *) dst - 64 + n, (const u8 *) src - 64 + n);
222 clib_mov64 ((u8 *) dst - 64 + n, (const u8 *) src - 64 + n);
227 * Make store aligned when copy size exceeds 512 bytes
229 dstofss = (uword) dst & 0x3F;
232 dstofss = 64 - dstofss;
234 clib_mov64 ((u8 *) dst, (const u8 *) src);
235 src = (const u8 *) src + dstofss;
236 dst = (u8 *) dst + dstofss;
240 * Copy 512-byte blocks.
241 * Use copy block function for better instruction order control,
242 * which is important when load is unaligned.
244 clib_mov512blocks ((u8 *) dst, (const u8 *) src, n);
248 src = (const u8 *) src + bits;
249 dst = (u8 *) dst + bits;
252 * Copy 128-byte blocks.
253 * Use copy block function for better instruction order control,
254 * which is important when load is unaligned.
258 clib_mov128blocks ((u8 *) dst, (const u8 *) src, n);
262 src = (const u8 *) src + bits;
263 dst = (u8 *) dst + bits;
269 goto COPY_BLOCK_128_BACK63;
273 WARN_ON (stringop-overflow)
276 #endif /* included_clib_memcpy_avx512_h */
280 * fd.io coding-style-patch-verification: ON
283 * eval: (c-set-style "gnu")