2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
19 * All rights reserved.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #ifndef included_clib_memcpy_avx_h
49 #define included_clib_memcpy_avx_h
52 #include <x86intrin.h>
55 clib_mov16(u8 *dst, const u8 *src)
59 xmm0 = _mm_loadu_si128((const __m128i *)src);
60 _mm_storeu_si128((__m128i *)dst, xmm0);
64 clib_mov32(u8 *dst, const u8 *src)
68 ymm0 = _mm256_loadu_si256((const __m256i *)src);
69 _mm256_storeu_si256((__m256i *)dst, ymm0);
73 clib_mov64(u8 *dst, const u8 *src)
75 clib_mov32((u8 *)dst + 0 * 32, (const u8 *)src + 0 * 32);
76 clib_mov32((u8 *)dst + 1 * 32, (const u8 *)src + 1 * 32);
80 clib_mov128(u8 *dst, const u8 *src)
82 clib_mov64((u8 *)dst + 0 * 64, (const u8 *)src + 0 * 64);
83 clib_mov64((u8 *)dst + 1 * 64, (const u8 *)src + 1 * 64);
87 clib_mov256(u8 *dst, const u8 *src)
89 clib_mov128((u8 *)dst + 0 * 128, (const u8 *)src + 0 * 128);
90 clib_mov128((u8 *)dst + 1 * 128, (const u8 *)src + 1 * 128);
94 clib_mov64blocks(u8 *dst, const u8 *src, size_t n)
99 ymm0 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 0 * 32));
101 ymm1 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 1 * 32));
102 src = (const u8 *)src + 64;
103 _mm256_storeu_si256((__m256i *)((u8 *)dst + 0 * 32), ymm0);
104 _mm256_storeu_si256((__m256i *)((u8 *)dst + 1 * 32), ymm1);
105 dst = (u8 *)dst + 64;
110 clib_mov256blocks(u8 *dst, const u8 *src, size_t n)
112 __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
115 ymm0 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 0 * 32));
117 ymm1 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 1 * 32));
118 ymm2 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 2 * 32));
119 ymm3 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 3 * 32));
120 ymm4 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 4 * 32));
121 ymm5 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 5 * 32));
122 ymm6 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 6 * 32));
123 ymm7 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 7 * 32));
124 src = (const u8 *)src + 256;
125 _mm256_storeu_si256((__m256i *)((u8 *)dst + 0 * 32), ymm0);
126 _mm256_storeu_si256((__m256i *)((u8 *)dst + 1 * 32), ymm1);
127 _mm256_storeu_si256((__m256i *)((u8 *)dst + 2 * 32), ymm2);
128 _mm256_storeu_si256((__m256i *)((u8 *)dst + 3 * 32), ymm3);
129 _mm256_storeu_si256((__m256i *)((u8 *)dst + 4 * 32), ymm4);
130 _mm256_storeu_si256((__m256i *)((u8 *)dst + 5 * 32), ymm5);
131 _mm256_storeu_si256((__m256i *)((u8 *)dst + 6 * 32), ymm6);
132 _mm256_storeu_si256((__m256i *)((u8 *)dst + 7 * 32), ymm7);
133 dst = (u8 *)dst + 256;
138 clib_memcpy(void *dst, const void *src, size_t n)
140 uword dstu = (uword)dst;
141 uword srcu = (uword)src;
147 * Copy less than 16 bytes
151 *(u8 *)dstu = *(const u8 *)srcu;
152 srcu = (uword)((const u8 *)srcu + 1);
153 dstu = (uword)((u8 *)dstu + 1);
156 *(uint16_t *)dstu = *(const uint16_t *)srcu;
157 srcu = (uword)((const uint16_t *)srcu + 1);
158 dstu = (uword)((uint16_t *)dstu + 1);
161 *(uint32_t *)dstu = *(const uint32_t *)srcu;
162 srcu = (uword)((const uint32_t *)srcu + 1);
163 dstu = (uword)((uint32_t *)dstu + 1);
166 *(uint64_t *)dstu = *(const uint64_t *)srcu;
172 * Fast way when copy size doesn't exceed 512 bytes
175 clib_mov16((u8 *)dst, (const u8 *)src);
176 clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
180 clib_mov32((u8 *)dst, (const u8 *)src);
181 clib_mov32((u8 *)dst - 32 + n, (const u8 *)src - 32 + n);
187 clib_mov256((u8 *)dst, (const u8 *)src);
188 src = (const u8 *)src + 256;
189 dst = (u8 *)dst + 256;
193 clib_mov128((u8 *)dst, (const u8 *)src);
194 src = (const u8 *)src + 128;
195 dst = (u8 *)dst + 128;
199 clib_mov64((u8 *)dst, (const u8 *)src);
200 src = (const u8 *)src + 64;
201 dst = (u8 *)dst + 64;
203 COPY_BLOCK_64_BACK31:
205 clib_mov32((u8 *)dst, (const u8 *)src);
206 clib_mov32((u8 *)dst - 32 + n, (const u8 *)src - 32 + n);
210 clib_mov32((u8 *)dst - 32 + n, (const u8 *)src - 32 + n);
216 * Make store aligned when copy size exceeds 512 bytes
218 dstofss = (uword)dst & 0x1F;
220 dstofss = 32 - dstofss;
222 clib_mov32((u8 *)dst, (const u8 *)src);
223 src = (const u8 *)src + dstofss;
224 dst = (u8 *)dst + dstofss;
228 * Copy 256-byte blocks.
229 * Use copy block function for better instruction order control,
230 * which is important when load is unaligned.
232 clib_mov256blocks((u8 *)dst, (const u8 *)src, n);
236 src = (const u8 *)src + bits;
237 dst = (u8 *)dst + bits;
240 * Copy 64-byte blocks.
241 * Use copy block function for better instruction order control,
242 * which is important when load is unaligned.
245 clib_mov64blocks((u8 *)dst, (const u8 *)src, n);
249 src = (const u8 *)src + bits;
250 dst = (u8 *)dst + bits;
256 goto COPY_BLOCK_64_BACK31;
260 #endif /* included_clib_mamcpy_avx_h */