Skip to content

Commit

Permalink
riscv: optimized memcpy
Browse files Browse the repository at this point in the history
Write a C version of memcpy() which uses the biggest data size allowed,
without generating unaligned accesses.

The procedure is made of three steps:
First copy data one byte at time until the destination buffer is aligned
to a long boundary.
Then copy the data one long at time shifting the current and the next u8
to compose a long at every cycle.
Finally, copy the remainder one byte at time.

On a BeagleV, the TCP RX throughput increased by 45%:

before:

$ iperf3 -c beaglev
Connecting to host beaglev, port 5201
[  5] local 192.168.85.6 port 44840 connected to 192.168.85.48 port 5201
[ ID] Interval           Transfer     Bitrate         Retr  Cwnd
[  5]   0.00-1.00   sec  76.4 MBytes   641 Mbits/sec   27    624 KBytes
[  5]   1.00-2.00   sec  72.5 MBytes   608 Mbits/sec    0    708 KBytes
[  5]   2.00-3.00   sec  73.8 MBytes   619 Mbits/sec   10    451 KBytes
[  5]   3.00-4.00   sec  72.5 MBytes   608 Mbits/sec    0    564 KBytes
[  5]   4.00-5.00   sec  73.8 MBytes   619 Mbits/sec    0    658 KBytes
[  5]   5.00-6.00   sec  73.8 MBytes   619 Mbits/sec   14    522 KBytes
[  5]   6.00-7.00   sec  73.8 MBytes   619 Mbits/sec    0    621 KBytes
[  5]   7.00-8.00   sec  72.5 MBytes   608 Mbits/sec    0    706 KBytes
[  5]   8.00-9.00   sec  73.8 MBytes   619 Mbits/sec   20    580 KBytes
[  5]   9.00-10.00  sec  73.8 MBytes   619 Mbits/sec    0    672 KBytes
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval           Transfer     Bitrate         Retr
[  5]   0.00-10.00  sec   736 MBytes   618 Mbits/sec   71             sender
[  5]   0.00-10.01  sec   733 MBytes   615 Mbits/sec                  receiver

after:

$ iperf3 -c beaglev
Connecting to host beaglev, port 5201
[  5] local 192.168.85.6 port 44864 connected to 192.168.85.48 port 5201
[ ID] Interval           Transfer     Bitrate         Retr  Cwnd
[  5]   0.00-1.00   sec   109 MBytes   912 Mbits/sec   48    559 KBytes
[  5]   1.00-2.00   sec   108 MBytes   902 Mbits/sec    0    690 KBytes
[  5]   2.00-3.00   sec   106 MBytes   891 Mbits/sec   36    396 KBytes
[  5]   3.00-4.00   sec   108 MBytes   902 Mbits/sec    0    567 KBytes
[  5]   4.00-5.00   sec   106 MBytes   891 Mbits/sec    0    699 KBytes
[  5]   5.00-6.00   sec   106 MBytes   891 Mbits/sec   32    414 KBytes
[  5]   6.00-7.00   sec   106 MBytes   891 Mbits/sec    0    583 KBytes
[  5]   7.00-8.00   sec   106 MBytes   891 Mbits/sec    0    708 KBytes
[  5]   8.00-9.00   sec   106 MBytes   891 Mbits/sec   28    433 KBytes
[  5]   9.00-10.00  sec   108 MBytes   902 Mbits/sec    0    591 KBytes
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval           Transfer     Bitrate         Retr
[  5]   0.00-10.00  sec  1.04 GBytes   897 Mbits/sec  144             sender
[  5]   0.00-10.01  sec  1.04 GBytes   894 Mbits/sec                  receiver

And the decreased CPU time of the memcpy() is observable with perf top.
This is the `perf top -Ue task-clock` output when doing the test:

before:

Overhead  Shared O  Symbol
  42.22%  [kernel]  [k] memcpy
  35.00%  [kernel]  [k] __asm_copy_to_user
   3.50%  [kernel]  [k] sifive_l2_flush64_range
   2.30%  [kernel]  [k] stmmac_napi_poll_rx
   1.11%  [kernel]  [k] memset

after:

Overhead  Shared O  Symbol
  45.69%  [kernel]  [k] __asm_copy_to_user
  29.06%  [kernel]  [k] memcpy
   4.09%  [kernel]  [k] sifive_l2_flush64_range
   2.77%  [kernel]  [k] stmmac_napi_poll_rx
   1.24%  [kernel]  [k] memset

Signed-off-by: Matteo Croce <mcroce@microsoft.com>
  • Loading branch information
teknoraver authored and intel-lab-lkp committed Jun 17, 2021
1 parent dd86005 commit c35a347
Show file tree
Hide file tree
Showing 5 changed files with 98 additions and 113 deletions.
8 changes: 6 additions & 2 deletions arch/riscv/include/asm/string.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,13 @@
#define __HAVE_ARCH_MEMSET
extern asmlinkage void *memset(void *, int, size_t);
extern asmlinkage void *__memset(void *, int, size_t);

#ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
#define __HAVE_ARCH_MEMCPY
extern asmlinkage void *memcpy(void *, const void *, size_t);
extern asmlinkage void *__memcpy(void *, const void *, size_t);
extern void *memcpy(void *dest, const void *src, size_t count);
extern void *__memcpy(void *dest, const void *src, size_t count);
#endif

#define __HAVE_ARCH_MEMMOVE
extern asmlinkage void *memmove(void *, const void *, size_t);
extern asmlinkage void *__memmove(void *, const void *, size_t);
Expand Down
2 changes: 0 additions & 2 deletions arch/riscv/kernel/riscv_ksyms.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@
* Assembly functions that may be used (directly or indirectly) by modules
*/
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
2 changes: 1 addition & 1 deletion arch/riscv/lib/Makefile
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
lib-y += delay.o
lib-y += memcpy.o
lib-y += memset.o
lib-y += memmove.o
lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE) += string.o

obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
108 changes: 0 additions & 108 deletions arch/riscv/lib/memcpy.S

This file was deleted.

91 changes: 91 additions & 0 deletions arch/riscv/lib/string.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* String functions optimized for hardware which doesn't
* handle unaligned memory accesses efficiently.
*
* Copyright (C) 2021 Matteo Croce
*/

#include <linux/types.h>
#include <linux/module.h>

/* Minimum size for a word copy to be convenient */
#define MIN_THRESHOLD (BITS_PER_LONG / 8 * 2)

/* convenience union to avoid cast between different pointer types */
union types {
u8 *u8;
unsigned long *ulong;
uintptr_t uptr;
};

union const_types {
const u8 *u8;
unsigned long *ulong;
};

void *__memcpy(void *dest, const void *src, size_t count)
{
const int bytes_long = BITS_PER_LONG / 8;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
const int mask = bytes_long - 1;
const int distance = (src - dest) & mask;
#endif
union const_types s = { .u8 = src };
union types d = { .u8 = dest };

#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (count < MIN_THRESHOLD)
goto copy_remainder;

/* copy a byte at time until destination is aligned */
for (; count && d.uptr & mask; count--)
*d.u8++ = *s.u8++;

if (distance) {
unsigned long last, next;

/* move s backward to the previous alignment boundary */
s.u8 -= distance;

/* 32/64 bit wide copy from s to d.
* d is aligned now but s is not, so read s alignment wise,
* and do proper shift to get the right value.
* Works only on Little Endian machines.
*/
for (next = s.ulong[0]; count >= bytes_long + mask; count -= bytes_long) {
last = next;
next = s.ulong[1];

d.ulong[0] = last >> (distance * 8) |
next << ((bytes_long - distance) * 8);

d.ulong++;
s.ulong++;
}

/* restore s with the original offset */
s.u8 += distance;
} else
#endif
{
/* if the source and dest lower bits are the same, do a simple
* 32/64 bit wide copy.
*/
for (; count >= bytes_long; count -= bytes_long)
*d.ulong++ = *s.ulong++;
}

/* suppress warning when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y */
goto copy_remainder;

copy_remainder:
while (count--)
*d.u8++ = *s.u8++;

return dest;
}
EXPORT_SYMBOL(__memcpy);

void *memcpy(void *dest, const void *src, size_t count) __weak __alias(__memcpy);
EXPORT_SYMBOL(memcpy);

0 comments on commit c35a347

Please sign in to comment.