/
avxmemcpy.c
65 lines (63 loc) · 2.33 KB
/
avxmemcpy.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#include <string.h>
#include <x86intrin.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
/* on Zen better use ssememcpy: its about as fast for most cases, and
for block sizes 5..63 where avxmemcpy uses _mm256_maskload_ps/
_mm256_maskstore_ps is slow on Zen */
/* the calls to memcpy() here implement unaligned accesses, and are
compiled by gcc to mov instructions (even if you call this function
memcpy, too) */
void *avxmemcpy(void *dest, const void *src, size_t n)
{
static unsigned mask1[] = {~0,~0,~0,~0,~0,~0,~0,~0,
0, 0, 0, 0, 0, 0, 0, 0};
if (n<5) {
if (n<2) {
if (n==1) {
*(char *)dest = *(char *)src;
}
} else { /* n in 2..4 */
short temp1, temp2;
memcpy(&temp1,src,2);
memcpy(&temp2,src+(n-2),2);
memcpy(dest,&temp1,2);
memcpy(dest+(n-2),&temp2,2);
}
} else {
if (n<64) { /* n in 5..63 */
size_t masklen = (n>>1)&~3; /* actual mask len is 4 bytes longer,
that's why this case does not cover n=64 */
__m256i mask = (__m256i)_mm256_loadu_ps((float const*)(((char *)mask1)+28-masklen));
__m256 p1 = _mm256_maskload_ps((float const *)src, mask);
__m256 p2 = _mm256_maskload_ps((float const *)(src+(n-masklen-4)), mask);
_mm256_maskstore_ps((float *)dest, mask, p1);
_mm256_maskstore_ps((float *)(dest+(n-masklen-4)), mask, p2);
} else { /* n>=64 */
__m256i x1 = _mm256_loadu_si256((__m256i *)src);
ptrdiff_t off = src-dest;
void *dlast = dest+n-32;
void *d = (void *)(((intptr_t)(dest+32))&~31);
__m256i x3 = _mm256_loadu_si256((__m256i *)(dlast+off));
#ifdef NO_UNROLLING
for (; d<dlast; d+=32) {
__m256i x = _mm256_loadu_si256((__m256i *)(d+off));
_mm256_storeu_si256((__m256i *)d, x);
}
#else
__m256i x2 = _mm256_loadu_si256((__m256i *)(dlast+off-32));
for (; d<dlast-32; d+=64) {
__m256i x1 = _mm256_loadu_si256((__m256i *)(d+off));
__m256i x2 = _mm256_loadu_si256((__m256i *)(d+off+32));
_mm256_storeu_si256((__m256i *)d, x1);
_mm256_storeu_si256((__m256i *)(d+32), x2);
}
_mm256_storeu_si256((__m256i *)(dlast-32), x2);
#endif
_mm256_storeu_si256((__m256i *)dest, x1);
_mm256_storeu_si256((__m256i *)dlast, x3);
}
}
return dest;
}