4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_MEMCPY_X86_64_H_
35 #define _RTE_MEMCPY_X86_64_H_
40 * Functions for SSE/AVX/AVX2/AVX512 implementation of memcpy().
47 #include <rte_common.h>
48 #include <rte_config.h>
55 * Copy bytes from one location to another. The locations must not overlap.
57 * @note This is implemented as a macro, so it's address should not be taken
58 * and care is needed as parameter expressions may be evaluated multiple times.
61 * Pointer to the destination of the data.
63 * Pointer to the source data.
65 * Number of bytes to copy.
67 * Pointer to the destination data.
69 static __rte_always_inline void *
70 rte_memcpy(void *dst, const void *src, size_t n);
72 #ifdef RTE_MACHINE_CPUFLAG_AVX512F
74 #define ALIGNMENT_MASK 0x3F
77 * AVX512 implementation below
81 * Copy 16 bytes from one location to another,
82 * locations should not overlap.
85 rte_mov16(uint8_t *dst, const uint8_t *src)
89 xmm0 = _mm_loadu_si128((const __m128i *)src);
90 _mm_storeu_si128((__m128i *)dst, xmm0);
94 * Copy 32 bytes from one location to another,
95 * locations should not overlap.
98 rte_mov32(uint8_t *dst, const uint8_t *src)
102 ymm0 = _mm256_loadu_si256((const __m256i *)src);
103 _mm256_storeu_si256((__m256i *)dst, ymm0);
107 * Copy 64 bytes from one location to another,
108 * locations should not overlap.
111 rte_mov64(uint8_t *dst, const uint8_t *src)
115 zmm0 = _mm512_loadu_si512((const void *)src);
116 _mm512_storeu_si512((void *)dst, zmm0);
120 * Copy 128 bytes from one location to another,
121 * locations should not overlap.
124 rte_mov128(uint8_t *dst, const uint8_t *src)
126 rte_mov64(dst + 0 * 64, src + 0 * 64);
127 rte_mov64(dst + 1 * 64, src + 1 * 64);
131 * Copy 256 bytes from one location to another,
132 * locations should not overlap.
135 rte_mov256(uint8_t *dst, const uint8_t *src)
137 rte_mov64(dst + 0 * 64, src + 0 * 64);
138 rte_mov64(dst + 1 * 64, src + 1 * 64);
139 rte_mov64(dst + 2 * 64, src + 2 * 64);
140 rte_mov64(dst + 3 * 64, src + 3 * 64);
144 * Copy 128-byte blocks from one location to another,
145 * locations should not overlap.
148 rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
153 zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
155 zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
157 _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
158 _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
164 * Copy 512-byte blocks from one location to another,
165 * locations should not overlap.
168 rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
170 __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
173 zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
175 zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
176 zmm2 = _mm512_loadu_si512((const void *)(src + 2 * 64));
177 zmm3 = _mm512_loadu_si512((const void *)(src + 3 * 64));
178 zmm4 = _mm512_loadu_si512((const void *)(src + 4 * 64));
179 zmm5 = _mm512_loadu_si512((const void *)(src + 5 * 64));
180 zmm6 = _mm512_loadu_si512((const void *)(src + 6 * 64));
181 zmm7 = _mm512_loadu_si512((const void *)(src + 7 * 64));
183 _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
184 _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
185 _mm512_storeu_si512((void *)(dst + 2 * 64), zmm2);
186 _mm512_storeu_si512((void *)(dst + 3 * 64), zmm3);
187 _mm512_storeu_si512((void *)(dst + 4 * 64), zmm4);
188 _mm512_storeu_si512((void *)(dst + 5 * 64), zmm5);
189 _mm512_storeu_si512((void *)(dst + 6 * 64), zmm6);
190 _mm512_storeu_si512((void *)(dst + 7 * 64), zmm7);
196 rte_memcpy_generic(void *dst, const void *src, size_t n)
198 uintptr_t dstu = (uintptr_t)dst;
199 uintptr_t srcu = (uintptr_t)src;
205 * Copy less than 16 bytes
209 *(uint8_t *)dstu = *(const uint8_t *)srcu;
210 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
211 dstu = (uintptr_t)((uint8_t *)dstu + 1);
214 *(uint16_t *)dstu = *(const uint16_t *)srcu;
215 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
216 dstu = (uintptr_t)((uint16_t *)dstu + 1);
219 *(uint32_t *)dstu = *(const uint32_t *)srcu;
220 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
221 dstu = (uintptr_t)((uint32_t *)dstu + 1);
224 *(uint64_t *)dstu = *(const uint64_t *)srcu;
229 * Fast way when copy size doesn't exceed 512 bytes
232 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
233 rte_mov16((uint8_t *)dst - 16 + n,
234 (const uint8_t *)src - 16 + n);
238 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
239 rte_mov32((uint8_t *)dst - 32 + n,
240 (const uint8_t *)src - 32 + n);
246 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
247 src = (const uint8_t *)src + 256;
248 dst = (uint8_t *)dst + 256;
252 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
253 src = (const uint8_t *)src + 128;
254 dst = (uint8_t *)dst + 128;
256 COPY_BLOCK_128_BACK63:
258 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
259 rte_mov64((uint8_t *)dst - 64 + n,
260 (const uint8_t *)src - 64 + n);
264 rte_mov64((uint8_t *)dst - 64 + n,
265 (const uint8_t *)src - 64 + n);
270 * Make store aligned when copy size exceeds 512 bytes
272 dstofss = ((uintptr_t)dst & 0x3F);
274 dstofss = 64 - dstofss;
276 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
277 src = (const uint8_t *)src + dstofss;
278 dst = (uint8_t *)dst + dstofss;
282 * Copy 512-byte blocks.
283 * Use copy block function for better instruction order control,
284 * which is important when load is unaligned.
286 rte_mov512blocks((uint8_t *)dst, (const uint8_t *)src, n);
290 src = (const uint8_t *)src + bits;
291 dst = (uint8_t *)dst + bits;
294 * Copy 128-byte blocks.
295 * Use copy block function for better instruction order control,
296 * which is important when load is unaligned.
299 rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
303 src = (const uint8_t *)src + bits;
304 dst = (uint8_t *)dst + bits;
310 goto COPY_BLOCK_128_BACK63;
313 #elif defined RTE_MACHINE_CPUFLAG_AVX2
315 #define ALIGNMENT_MASK 0x1F
318 * AVX2 implementation below
322 * Copy 16 bytes from one location to another,
323 * locations should not overlap.
326 rte_mov16(uint8_t *dst, const uint8_t *src)
330 xmm0 = _mm_loadu_si128((const __m128i *)src);
331 _mm_storeu_si128((__m128i *)dst, xmm0);
335 * Copy 32 bytes from one location to another,
336 * locations should not overlap.
339 rte_mov32(uint8_t *dst, const uint8_t *src)
343 ymm0 = _mm256_loadu_si256((const __m256i *)src);
344 _mm256_storeu_si256((__m256i *)dst, ymm0);
348 * Copy 64 bytes from one location to another,
349 * locations should not overlap.
352 rte_mov64(uint8_t *dst, const uint8_t *src)
354 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
355 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
359 * Copy 128 bytes from one location to another,
360 * locations should not overlap.
363 rte_mov128(uint8_t *dst, const uint8_t *src)
365 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
366 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
367 rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
368 rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
372 * Copy 128-byte blocks from one location to another,
373 * locations should not overlap.
376 rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
378 __m256i ymm0, ymm1, ymm2, ymm3;
381 ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
383 ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
384 ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
385 ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
386 src = (const uint8_t *)src + 128;
387 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
388 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
389 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
390 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
391 dst = (uint8_t *)dst + 128;
396 rte_memcpy_generic(void *dst, const void *src, size_t n)
398 uintptr_t dstu = (uintptr_t)dst;
399 uintptr_t srcu = (uintptr_t)src;
405 * Copy less than 16 bytes
409 *(uint8_t *)dstu = *(const uint8_t *)srcu;
410 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
411 dstu = (uintptr_t)((uint8_t *)dstu + 1);
414 *(uint16_t *)dstu = *(const uint16_t *)srcu;
415 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
416 dstu = (uintptr_t)((uint16_t *)dstu + 1);
419 *(uint32_t *)dstu = *(const uint32_t *)srcu;
420 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
421 dstu = (uintptr_t)((uint32_t *)dstu + 1);
424 *(uint64_t *)dstu = *(const uint64_t *)srcu;
430 * Fast way when copy size doesn't exceed 256 bytes
433 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
434 rte_mov16((uint8_t *)dst - 16 + n,
435 (const uint8_t *)src - 16 + n);
439 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
440 rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
441 rte_mov16((uint8_t *)dst - 16 + n,
442 (const uint8_t *)src - 16 + n);
446 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
447 rte_mov32((uint8_t *)dst - 32 + n,
448 (const uint8_t *)src - 32 + n);
454 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
455 src = (const uint8_t *)src + 128;
456 dst = (uint8_t *)dst + 128;
458 COPY_BLOCK_128_BACK31:
461 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
462 src = (const uint8_t *)src + 64;
463 dst = (uint8_t *)dst + 64;
466 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
467 rte_mov32((uint8_t *)dst - 32 + n,
468 (const uint8_t *)src - 32 + n);
472 rte_mov32((uint8_t *)dst - 32 + n,
473 (const uint8_t *)src - 32 + n);
479 * Make store aligned when copy size exceeds 256 bytes
481 dstofss = (uintptr_t)dst & 0x1F;
483 dstofss = 32 - dstofss;
485 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
486 src = (const uint8_t *)src + dstofss;
487 dst = (uint8_t *)dst + dstofss;
491 * Copy 128-byte blocks
493 rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
497 src = (const uint8_t *)src + bits;
498 dst = (uint8_t *)dst + bits;
503 goto COPY_BLOCK_128_BACK31;
506 #else /* RTE_MACHINE_CPUFLAG */
508 #define ALIGNMENT_MASK 0x0F
511 * SSE & AVX implementation below
515 * Copy 16 bytes from one location to another,
516 * locations should not overlap.
519 rte_mov16(uint8_t *dst, const uint8_t *src)
523 xmm0 = _mm_loadu_si128((const __m128i *)(const __m128i *)src);
524 _mm_storeu_si128((__m128i *)dst, xmm0);
528 * Copy 32 bytes from one location to another,
529 * locations should not overlap.
532 rte_mov32(uint8_t *dst, const uint8_t *src)
534 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
535 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
539 * Copy 64 bytes from one location to another,
540 * locations should not overlap.
543 rte_mov64(uint8_t *dst, const uint8_t *src)
545 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
546 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
547 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
548 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
552 * Copy 128 bytes from one location to another,
553 * locations should not overlap.
556 rte_mov128(uint8_t *dst, const uint8_t *src)
558 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
559 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
560 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
561 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
562 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
563 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
564 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
565 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
569 * Copy 256 bytes from one location to another,
570 * locations should not overlap.
573 rte_mov256(uint8_t *dst, const uint8_t *src)
575 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
576 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
577 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
578 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
579 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
580 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
581 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
582 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
583 rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
584 rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
585 rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
586 rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
587 rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
588 rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
589 rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
590 rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
594 * Macro for copying unaligned block from one location to another with constant load offset,
595 * 47 bytes leftover maximum,
596 * locations should not overlap.
599 * - Load offset is <offset>, which must be immediate value within [1, 15]
600 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
601 * - <dst>, <src>, <len> must be variables
602 * - __m128i <xmm0> ~ <xmm8> must be pre-defined
604 #define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
607 while (len >= 128 + 16 - offset) { \
608 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
610 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
611 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
612 xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16)); \
613 xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16)); \
614 xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16)); \
615 xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16)); \
616 xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16)); \
617 xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16)); \
618 src = (const uint8_t *)src + 128; \
619 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
620 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
621 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
622 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
623 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
624 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
625 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
626 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
627 dst = (uint8_t *)dst + 128; \
630 len = ((len - 16 + offset) & 127) + 16 - offset; \
632 src = (const uint8_t *)src + tmp; \
633 dst = (uint8_t *)dst + tmp; \
634 if (len >= 32 + 16 - offset) { \
635 while (len >= 32 + 16 - offset) { \
636 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
638 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
639 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
640 src = (const uint8_t *)src + 32; \
641 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
642 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
643 dst = (uint8_t *)dst + 32; \
646 len = ((len - 16 + offset) & 31) + 16 - offset; \
648 src = (const uint8_t *)src + tmp; \
649 dst = (uint8_t *)dst + tmp; \
654 * Macro for copying unaligned block from one location to another,
655 * 47 bytes leftover maximum,
656 * locations should not overlap.
657 * Use switch here because the aligning instruction requires immediate value for shift count.
660 * - Load offset is <offset>, which must be within [1, 15]
661 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
662 * - <dst>, <src>, <len> must be variables
663 * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be pre-defined
665 #define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \
668 case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \
669 case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \
670 case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \
671 case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \
672 case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \
673 case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \
674 case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \
675 case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \
676 case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \
677 case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \
678 case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \
679 case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \
680 case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \
681 case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \
682 case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \
688 rte_memcpy_generic(void *dst, const void *src, size_t n)
690 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
691 uintptr_t dstu = (uintptr_t)dst;
692 uintptr_t srcu = (uintptr_t)src;
698 * Copy less than 16 bytes
702 *(uint8_t *)dstu = *(const uint8_t *)srcu;
703 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
704 dstu = (uintptr_t)((uint8_t *)dstu + 1);
707 *(uint16_t *)dstu = *(const uint16_t *)srcu;
708 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
709 dstu = (uintptr_t)((uint16_t *)dstu + 1);
712 *(uint32_t *)dstu = *(const uint32_t *)srcu;
713 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
714 dstu = (uintptr_t)((uint32_t *)dstu + 1);
717 *(uint64_t *)dstu = *(const uint64_t *)srcu;
723 * Fast way when copy size doesn't exceed 512 bytes
726 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
727 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
731 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
732 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
736 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
737 rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
738 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
742 goto COPY_BLOCK_128_BACK15;
747 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
748 rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
749 src = (const uint8_t *)src + 256;
750 dst = (uint8_t *)dst + 256;
752 COPY_BLOCK_255_BACK15:
755 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
756 src = (const uint8_t *)src + 128;
757 dst = (uint8_t *)dst + 128;
759 COPY_BLOCK_128_BACK15:
762 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
763 src = (const uint8_t *)src + 64;
764 dst = (uint8_t *)dst + 64;
766 COPY_BLOCK_64_BACK15:
769 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
770 src = (const uint8_t *)src + 32;
771 dst = (uint8_t *)dst + 32;
774 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
775 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
779 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
785 * Make store aligned when copy size exceeds 512 bytes,
786 * and make sure the first 15 bytes are copied, because
787 * unaligned copy functions require up to 15 bytes
790 dstofss = (uintptr_t)dst & 0x0F;
792 dstofss = 16 - dstofss + 16;
794 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
795 src = (const uint8_t *)src + dstofss;
796 dst = (uint8_t *)dst + dstofss;
798 srcofs = ((uintptr_t)src & 0x0F);
805 * Copy 256-byte blocks
807 for (; n >= 256; n -= 256) {
808 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
809 dst = (uint8_t *)dst + 256;
810 src = (const uint8_t *)src + 256;
816 goto COPY_BLOCK_255_BACK15;
820 * For copy with unaligned load
822 MOVEUNALIGNED_LEFT47(dst, src, n, srcofs);
827 goto COPY_BLOCK_64_BACK15;
830 #endif /* RTE_MACHINE_CPUFLAG */
833 rte_memcpy_aligned(void *dst, const void *src, size_t n)
837 /* Copy size <= 16 bytes */
840 *(uint8_t *)dst = *(const uint8_t *)src;
841 src = (const uint8_t *)src + 1;
842 dst = (uint8_t *)dst + 1;
845 *(uint16_t *)dst = *(const uint16_t *)src;
846 src = (const uint16_t *)src + 1;
847 dst = (uint16_t *)dst + 1;
850 *(uint32_t *)dst = *(const uint32_t *)src;
851 src = (const uint32_t *)src + 1;
852 dst = (uint32_t *)dst + 1;
855 *(uint64_t *)dst = *(const uint64_t *)src;
860 /* Copy 16 <= size <= 32 bytes */
862 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
863 rte_mov16((uint8_t *)dst - 16 + n,
864 (const uint8_t *)src - 16 + n);
869 /* Copy 32 < size <= 64 bytes */
871 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
872 rte_mov32((uint8_t *)dst - 32 + n,
873 (const uint8_t *)src - 32 + n);
878 /* Copy 64 bytes blocks */
879 for (; n >= 64; n -= 64) {
880 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
881 dst = (uint8_t *)dst + 64;
882 src = (const uint8_t *)src + 64;
885 /* Copy whatever left */
886 rte_mov64((uint8_t *)dst - 64 + n,
887 (const uint8_t *)src - 64 + n);
893 rte_memcpy(void *dst, const void *src, size_t n)
895 if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
896 return rte_memcpy_aligned(dst, src, n);
898 return rte_memcpy_generic(dst, src, n);
905 #endif /* _RTE_MEMCPY_X86_64_H_ */