Skip to content

Commit

Permalink
runtime: scan stacks conservatively at async safe points
Browse files Browse the repository at this point in the history
This adds support for scanning the stack when a goroutine is stopped
at an async safe point. This is not yet lit up because asyncPreempt is
not yet injected, but prepares us for that.

This works by conservatively scanning the registers dumped in the
frame of asyncPreempt and its parent frame, which was stopped at an
asynchronous safe point.

Conservative scanning works by only marking words that are pointers to
valid, allocated heap objects. One complication is pointers to stack
objects. In this case, we can't determine if the stack object is still
"allocated" or if it was freed by an earlier GC. Hence, we need to
propagate the conservative-ness of scanning stack objects: if all
pointers found to a stack object were found via conservative scanning,
then the stack object itself needs to be scanned conservatively, since
its pointers may point to dead objects.

For #10958, #24543.

Change-Id: I7ff84b058c37cde3de8a982da07002eaba126fd6
Reviewed-on: https://go-review.googlesource.com/c/go/+/201761
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
  • Loading branch information
aclements committed Nov 2, 2019
1 parent a3ffb0d commit d16ec13
Show file tree
Hide file tree
Showing 5 changed files with 217 additions and 32 deletions.
3 changes: 3 additions & 0 deletions src/cmd/internal/objabi/funcid.go
Expand Up @@ -38,6 +38,7 @@ const (
FuncID_gopanic
FuncID_panicwrap
FuncID_handleAsyncEvent
FuncID_asyncPreempt
FuncID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.)
)

Expand Down Expand Up @@ -85,6 +86,8 @@ func GetFuncID(name, file string) FuncID {
return FuncID_panicwrap
case "runtime.handleAsyncEvent":
return FuncID_handleAsyncEvent
case "runtime.asyncPreempt":
return FuncID_asyncPreempt
case "runtime.deferreturn":
// Don't show in the call stack (used when invoking defer functions)
return FuncID_wrapper
Expand Down
4 changes: 4 additions & 0 deletions src/runtime/mgc.go
Expand Up @@ -139,6 +139,10 @@ const (
_ConcurrentSweep = true
_FinBlockSize = 4 * 1024

// debugScanConservative enables debug logging for stack
// frames that are scanned conservatively.
debugScanConservative = false

// sweepMinHeapDistance is a lower bound on the heap distance
// (in bytes) reserved for concurrent sweeping between GC
// cycles.
Expand Down
165 changes: 159 additions & 6 deletions src/runtime/mgcmark.go
Expand Up @@ -757,13 +757,17 @@ func scanstack(gp *g, gcw *gcWork) {
}
if gp._panic != nil {
// Panics are always stack allocated.
state.putPtr(uintptr(unsafe.Pointer(gp._panic)))
state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
}

// Find and scan all reachable stack objects.
//
// The state's pointer queue prioritizes precise pointers over
// conservative pointers so that we'll prefer scanning stack
// objects precisely.
state.buildIndex()
for {
p := state.getPtr()
p, conservative := state.getPtr()
if p == 0 {
break
}
Expand All @@ -778,7 +782,13 @@ func scanstack(gp *g, gcw *gcWork) {
}
obj.setType(nil) // Don't scan it again.
if stackTraceDebug {
println(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of type", t.string())
printlock()
print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of type", t.string())
if conservative {
print(" (conservative)")
}
println()
printunlock()
}
gcdata := t.gcdata
var s *mspan
Expand All @@ -796,7 +806,12 @@ func scanstack(gp *g, gcw *gcWork) {
gcdata = (*byte)(unsafe.Pointer(s.startAddr))
}

scanblock(state.stack.lo+uintptr(obj.off), t.ptrdata, gcdata, gcw, &state)
b := state.stack.lo + uintptr(obj.off)
if conservative {
scanConservative(b, t.ptrdata, gcdata, gcw, &state)
} else {
scanblock(b, t.ptrdata, gcdata, gcw, &state)
}

if s != nil {
dematerializeGCProg(s)
Expand All @@ -820,7 +835,7 @@ func scanstack(gp *g, gcw *gcWork) {
x.nobj = 0
putempty((*workbuf)(unsafe.Pointer(x)))
}
if state.buf != nil || state.freeBuf != nil {
if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
throw("remaining pointer buffers")
}
}
Expand All @@ -832,6 +847,49 @@ func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
print("scanframe ", funcname(frame.fn), "\n")
}

isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == funcID_asyncPreempt
if state.conservative || isAsyncPreempt {
if debugScanConservative {
println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
}

// Conservatively scan the frame. Unlike the precise
// case, this includes the outgoing argument space
// since we may have stopped while this function was
// setting up a call.
//
// TODO: We could narrow this down if the compiler
// produced a single map per function of stack slots
// and registers that ever contain a pointer.
if frame.varp != 0 {
size := frame.varp - frame.sp
if size > 0 {
scanConservative(frame.sp, size, nil, gcw, state)
}
}

// Scan arguments to this frame.
if frame.arglen != 0 {
// TODO: We could pass the entry argument map
// to narrow this down further.
scanConservative(frame.argp, frame.arglen, nil, gcw, state)
}

if isAsyncPreempt {
// This function's frame contained the
// registers for the asynchronously stopped
// parent frame. Scan the parent
// conservatively.
state.conservative = true
} else {
// We only wanted to scan those two frames
// conservatively. Clear the flag for future
// frames.
state.conservative = false
}
return
}

locals, args, objs := getStackMap(frame, &state.cache, false)

// Scan local variables if stack frame has been allocated.
Expand Down Expand Up @@ -1104,7 +1162,7 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState)
if obj, span, objIndex := findObject(p, b, i); obj != 0 {
greyobject(obj, b, i, span, gcw, objIndex)
} else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
stk.putPtr(p)
stk.putPtr(p, false)
}
}
}
Expand Down Expand Up @@ -1214,6 +1272,101 @@ func scanobject(b uintptr, gcw *gcWork) {
gcw.scanWork += int64(i)
}

// scanConservative scans block [b, b+n) conservatively, treating any
// pointer-like value in the block as a pointer.
//
// If ptrmask != nil, only words that are marked in ptrmask are
// considered as potential pointers.
//
// If state != nil, it's assumed that [b, b+n) is a block in the stack
// and may contain pointers to stack objects.
func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
if debugScanConservative {
printlock()
print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
hexdumpWords(b, b+n, func(p uintptr) byte {
if ptrmask != nil {
word := (p - b) / sys.PtrSize
bits := *addb(ptrmask, word/8)
if (bits>>(word%8))&1 == 0 {
return '$'
}
}

val := *(*uintptr)(unsafe.Pointer(p))
if state != nil && state.stack.lo <= val && val < state.stack.hi {
return '@'
}

span := spanOfHeap(val)
if span == nil {
return ' '
}
idx := span.objIndex(val)
if span.isFree(idx) {
return ' '
}
return '*'
})
printunlock()
}

for i := uintptr(0); i < n; i += sys.PtrSize {
if ptrmask != nil {
word := i / sys.PtrSize
bits := *addb(ptrmask, word/8)
if bits == 0 {
// Skip 8 words (the loop increment will do the 8th)
//
// This must be the first time we've
// seen this word of ptrmask, so i
// must be 8-word-aligned, but check
// our reasoning just in case.
if i%(sys.PtrSize*8) != 0 {
throw("misaligned mask")
}
i += sys.PtrSize*8 - sys.PtrSize
continue
}
if (bits>>(word%8))&1 == 0 {
continue
}
}

val := *(*uintptr)(unsafe.Pointer(b + i))

// Check if val points into the stack.
if state != nil && state.stack.lo <= val && val < state.stack.hi {
// val may point to a stack object. This
// object may be dead from last cycle and
// hence may contain pointers to unallocated
// objects, but unlike heap objects we can't
// tell if it's already dead. Hence, if all
// pointers to this object are from
// conservative scanning, we have to scan it
// defensively, too.
state.putPtr(val, true)
continue
}

// Check if val points to a heap span.
span := spanOfHeap(val)
if span == nil {
continue
}

// Check if val points to an allocated object.
idx := span.objIndex(val)
if span.isFree(idx) {
continue
}

// val points to an allocated object. Mark it.
obj := span.base() + idx*span.elemsize
greyobject(obj, b, i, span, gcw, idx)
}
}

// Shade the object if it isn't already.
// The object is not nil and known to be in the heap.
// Preemption must be disabled.
Expand Down
76 changes: 50 additions & 26 deletions src/runtime/mgcstack.go
Expand Up @@ -175,12 +175,23 @@ type stackScanState struct {
// stack limits
stack stack

// conservative indicates that the next frame must be scanned conservatively.
// This applies only to the innermost frame at an async safe-point.
conservative bool

// buf contains the set of possible pointers to stack objects.
// Organized as a LIFO linked list of buffers.
// All buffers except possibly the head buffer are full.
buf *stackWorkBuf
freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis

// cbuf contains conservative pointers to stack objects. If
// all pointers to a stack object are obtained via
// conservative scanning, then the stack object may be dead
// and may contain dead pointers, so it must be scanned
// defensively.
cbuf *stackWorkBuf

// list of stack objects
// Objects are in increasing address order.
head *stackObjectBuf
Expand All @@ -194,17 +205,21 @@ type stackScanState struct {

// Add p as a potential pointer to a stack object.
// p must be a stack address.
func (s *stackScanState) putPtr(p uintptr) {
func (s *stackScanState) putPtr(p uintptr, conservative bool) {
if p < s.stack.lo || p >= s.stack.hi {
throw("address not a stack address")
}
buf := s.buf
head := &s.buf
if conservative {
head = &s.cbuf
}
buf := *head
if buf == nil {
// Initial setup.
buf = (*stackWorkBuf)(unsafe.Pointer(getempty()))
buf.nobj = 0
buf.next = nil
s.buf = buf
*head = buf
} else if buf.nobj == len(buf.obj) {
if s.freeBuf != nil {
buf = s.freeBuf
Expand All @@ -213,39 +228,48 @@ func (s *stackScanState) putPtr(p uintptr) {
buf = (*stackWorkBuf)(unsafe.Pointer(getempty()))
}
buf.nobj = 0
buf.next = s.buf
s.buf = buf
buf.next = *head
*head = buf
}
buf.obj[buf.nobj] = p
buf.nobj++
}

// Remove and return a potential pointer to a stack object.
// Returns 0 if there are no more pointers available.
func (s *stackScanState) getPtr() uintptr {
buf := s.buf
if buf == nil {
// Never had any data.
return 0
}
if buf.nobj == 0 {
if s.freeBuf != nil {
// Free old freeBuf.
putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
}
// Move buf to the freeBuf.
s.freeBuf = buf
buf = buf.next
s.buf = buf
//
// This prefers non-conservative pointers so we scan stack objects
// precisely if there are any non-conservative pointers to them.
func (s *stackScanState) getPtr() (p uintptr, conservative bool) {
for _, head := range []**stackWorkBuf{&s.buf, &s.cbuf} {
buf := *head
if buf == nil {
// No more data.
putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
s.freeBuf = nil
return 0
// Never had any data.
continue
}
if buf.nobj == 0 {
if s.freeBuf != nil {
// Free old freeBuf.
putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
}
// Move buf to the freeBuf.
s.freeBuf = buf
buf = buf.next
*head = buf
if buf == nil {
// No more data in this list.
continue
}
}
buf.nobj--
return buf.obj[buf.nobj], head == &s.cbuf
}
// No more data in either list.
if s.freeBuf != nil {
putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
s.freeBuf = nil
}
buf.nobj--
return buf.obj[buf.nobj]
return 0, false
}

// addObject adds a stack object at addr of type typ to the set of stack objects.
Expand Down
1 change: 1 addition & 0 deletions src/runtime/symtab.go
Expand Up @@ -255,6 +255,7 @@ const (
funcID_gopanic
funcID_panicwrap
funcID_handleAsyncEvent
funcID_asyncPreempt
funcID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.)
)

Expand Down

0 comments on commit d16ec13

Please sign in to comment.