Skip to content

Commit

Permalink
libgo: ensure memmove, memset 8 byte atomicity on ppc64x
Browse files Browse the repository at this point in the history
Go requires that pointer moves are done 8 bytes at a time,
but gccgo uses libc's memmove and memset which does not require
that, and there are some cases where an 8 byte move might be
done as 4+4.

To enforce 8 byte moves for memmove and memset, this adds a
C implementation in libgo/runtime for memmove and memset to be
used on ppc64le and ppc64. Asm implementations were considered
but discarded to avoid different implementations for different
target ISAs.

Fixes golang/go#41428

Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/294931
  • Loading branch information
laboger authored and ianlancetaylor committed Feb 26, 2021
1 parent a505fad commit 2fbed0d
Show file tree
Hide file tree
Showing 7 changed files with 144 additions and 7 deletions.
2 changes: 1 addition & 1 deletion gcc/go/gofrontend/MERGE
@@ -1,4 +1,4 @@
78a840e4940159a66072237f6b002ab79f441b79
56cf388da8d04bbd3824c4df34d77a8afa69749b

The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
1 change: 1 addition & 0 deletions libgo/Makefile.am
Expand Up @@ -454,6 +454,7 @@ runtime_files = \
runtime/go-fieldtrack.c \
runtime/go-matherr.c \
runtime/go-memclr.c \
runtime/go-memmove.c \
runtime/go-memequal.c \
runtime/go-nanotime.c \
runtime/go-now.c \
Expand Down
9 changes: 7 additions & 2 deletions libgo/Makefile.in
Expand Up @@ -244,8 +244,9 @@ am__objects_4 = runtime/aeshash.lo runtime/go-assert.lo \
runtime/go-caller.lo runtime/go-callers.lo runtime/go-cgo.lo \
runtime/go-construct-map.lo runtime/go-ffi.lo \
runtime/go-fieldtrack.lo runtime/go-matherr.lo \
runtime/go-memclr.lo runtime/go-memequal.lo \
runtime/go-nanotime.lo runtime/go-now.lo runtime/go-nosys.lo \
runtime/go-memclr.lo runtime/go-memmove.lo \
runtime/go-memequal.lo runtime/go-nanotime.lo \
runtime/go-now.lo runtime/go-nosys.lo \
runtime/go-reflect-call.lo runtime/go-setenv.lo \
runtime/go-signal.lo runtime/go-unsafe-pointer.lo \
runtime/go-unsetenv.lo runtime/go-unwind.lo \
Expand Down Expand Up @@ -906,6 +907,7 @@ runtime_files = \
runtime/go-fieldtrack.c \
runtime/go-matherr.c \
runtime/go-memclr.c \
runtime/go-memmove.c \
runtime/go-memequal.c \
runtime/go-nanotime.c \
runtime/go-now.c \
Expand Down Expand Up @@ -1367,6 +1369,8 @@ runtime/go-matherr.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
runtime/go-memclr.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
runtime/go-memmove.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
runtime/go-memequal.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
runtime/go-nanotime.lo: runtime/$(am__dirstamp) \
Expand Down Expand Up @@ -1435,6 +1439,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-matherr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-memclr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-memequal.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-memmove.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-nanotime.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-nosys.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-now.Plo@am__quote@
Expand Down
2 changes: 0 additions & 2 deletions libgo/go/runtime/stubs.go
Expand Up @@ -106,9 +106,7 @@ func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
memclrNoHeapPointers(ptr, n)
}

// memmove copies n bytes from "from" to "to".
//go:noescape
//extern __builtin_memmove
func memmove(to, from unsafe.Pointer, n uintptr)

//go:linkname reflect_memmove reflect.memmove
Expand Down
45 changes: 44 additions & 1 deletion libgo/runtime/go-memclr.c
Expand Up @@ -13,5 +13,48 @@ void memclrNoHeapPointers(void *, uintptr)
void
memclrNoHeapPointers (void *p1, uintptr len)
{
__builtin_memset (p1, 0, len);

#if !defined(__PPC64__)
__builtin_memset(p1, 0, len);
#else
int64 rem,drem,i;
uint64 offset;
volatile uint64 *vp;

if (len == 0) {
return;
}
rem = len;

offset = (uint64)p1 % 8;
// This memset is OK since it can't contain
// an 8 byte aligned pointer.
if ((rem < 8) || (offset > 0 && offset+rem <= 16)) {
__builtin_memset(p1, 0, rem);
return;
}
// Move initial bytes to get to 8 byte boundary
if (offset > 0) {
__builtin_memset(p1, 0, 8-offset);
p1 = (void*)((char*)p1+8-offset);
rem -= 8-offset;
}

// If at least 8 bytes left, clear
drem = rem>>3;

vp = (volatile uint64*)(p1);
// Without the use of volatile here, the compiler
// might convert the loop into a memset.
for (i=0; i<drem; i++) {
*vp = 0;
vp++;
rem -= 8;
}
p1 = (void*)((char*)p1 + 8*drem);
// Clear any remaining
if (rem > 0) {
__builtin_memset (p1, 0, rem);
}
#endif
}
89 changes: 89 additions & 0 deletions libgo/runtime/go-memmove.c
@@ -0,0 +1,89 @@
/* go-memmove.c -- memmove
Copyright 2021 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file. */

#include "runtime.h"

void gomemmove(void *, void *, uintptr)
__asm__ (GOSYM_PREFIX "runtime.memmove")
__attribute__ ((no_split_stack));

// This implementation is necessary since
// the __builtin_memmove might use __libc_memmove
// which doesn't require atomicity of 8 byte
// moves.

void
gomemmove (void *dst, void *src, uintptr len)
{
#if !defined(__PPC64__)
__builtin_memmove(dst, src, len);
#else
uint64 offset, tail;
int64 rem;
uint64 dwords;
uint64 i;
char *bdst,*bsrc;

rem = len;

if (len == 0) {
return;
}

// If src and dst don't have the same 8 byte alignment then
// there is no issue with copying pointer atomicity. Use the
// builtin.
if (((uint64)dst % 8) != ((uint64)src % 8) || len < 8) {
__builtin_memmove(dst, src, len);
return;
}

// Length >= 8 && same ptr alignment
offset = (uint64)dst % 8;

// If not 8 byte alignment, move the intial bytes.
if (offset > 0) {
__builtin_memmove(dst, src, 8-offset);
dst += (8-offset);
src += (8-offset);
rem -= (8-offset);
}

// Move the tail bytes to make the backward move
// easier.
tail = rem % 8;
if (tail > 0) {
__builtin_memmove(dst+rem-tail, src+rem-tail, tail);
rem -= tail;
}

if (rem == 0) {
return;
}

// Must now be 8 byte alignment and rem is multiple of 8.
dwords = len>>3;

// Determine if a backwards move is needed
// Forward or backward, move all doublewords

if ((uint64)(dst - src) < (uint64)rem) {
bdst = dst+rem-8;
bsrc = src+rem-8;
for (i = 0; i<dwords; i++) {
*(uint64*)bdst = *(uint64*)bsrc;
bdst -= 8;
bsrc -= 8;
}
} else {
for (i = 0; i<dwords; i++) {
*(uint64*)dst = *(uint64*)src;
dst += 8;
src += 8;
}
}
#endif
}
3 changes: 2 additions & 1 deletion libgo/runtime/runtime.h
Expand Up @@ -221,7 +221,8 @@ bool runtime_canpanic(G*);
void runtime_printf(const char*, ...);
int32 runtime_snprintf(byte*, int32, const char*, ...);
#define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
void runtime_memmove(void*, void*, uint64)
__asm__ (GOSYM_PREFIX "runtime.memmove");
String runtime_gostringnocopy(const byte*)
__asm__ (GOSYM_PREFIX "runtime.gostringnocopy");
void runtime_ginit(void)
Expand Down

0 comments on commit 2fbed0d

Please sign in to comment.