From 0993b2fd06a43a2e51b68dd7d8b0643e50c54b9d Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 9 Feb 2017 14:11:13 -0500 Subject: [PATCH] runtime: remove g.stackAlloc Since we're no longer stealing space for the stack barrier array from the stack allocation, the stack allocation is simply g.stack.hi-g.stack.lo. Updates #17503. Change-Id: Id9b450ae12c3df9ec59cfc4365481a0a16b7c601 Reviewed-on: https://go-review.googlesource.com/36621 Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-by: Rick Hudson --- src/runtime/proc.go | 10 ++++------ src/runtime/runtime2.go | 1 - src/runtime/signal_unix.go | 4 ---- src/runtime/stack.go | 19 +++++++++---------- 4 files changed, 13 insertions(+), 21 deletions(-) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index c960d81408c0a..5fc7d2539097d 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -2812,7 +2812,6 @@ func malg(stacksize int32) *g { }) newg.stackguard0 = newg.stack.lo + _StackGuard newg.stackguard1 = ^uintptr(0) - newg.stackAlloc = uintptr(stacksize) } return newg } @@ -2949,11 +2948,11 @@ func gfput(_p_ *p, gp *g) { throw("gfput: bad status (not Gdead)") } - stksize := gp.stackAlloc + stksize := gp.stack.hi - gp.stack.lo if stksize != _FixedStack { // non-standard stack size - free it. - stackfree(gp.stack, gp.stackAlloc) + stackfree(gp.stack) gp.stack.lo = 0 gp.stack.hi = 0 gp.stackguard0 = 0 @@ -3016,13 +3015,12 @@ retry: gp.stack = stackalloc(_FixedStack) }) gp.stackguard0 = gp.stack.lo + _StackGuard - gp.stackAlloc = _FixedStack } else { if raceenabled { - racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) + racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) } if msanenabled { - msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) + msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) } } } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 4812268a19848..f0b919453a360 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -341,7 +341,6 @@ type g struct { _panic *_panic // innermost panic - offset known to liblink _defer *_defer // innermost defer m *m // current m; offset known to arm liblink - stackAlloc uintptr // stack allocation is [stack.lo,stack.lo+stackAlloc) sched gobuf syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index 9a332693678d3..e0ea724f97a24 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -712,7 +712,6 @@ type gsignalStack struct { stack stack stackguard0 uintptr stackguard1 uintptr - stackAlloc uintptr stktopsp uintptr } @@ -729,7 +728,6 @@ func setGsignalStack(st *stackt, old *gsignalStack) { old.stack = g.m.gsignal.stack old.stackguard0 = g.m.gsignal.stackguard0 old.stackguard1 = g.m.gsignal.stackguard1 - old.stackAlloc = g.m.gsignal.stackAlloc old.stktopsp = g.m.gsignal.stktopsp } stsp := uintptr(unsafe.Pointer(st.ss_sp)) @@ -737,7 +735,6 @@ func setGsignalStack(st *stackt, old *gsignalStack) { g.m.gsignal.stack.hi = stsp + st.ss_size g.m.gsignal.stackguard0 = stsp + _StackGuard g.m.gsignal.stackguard1 = stsp + _StackGuard - g.m.gsignal.stackAlloc = st.ss_size } // restoreGsignalStack restores the gsignal stack to the value it had @@ -749,7 +746,6 @@ func restoreGsignalStack(st *gsignalStack) { gp.stack = st.stack gp.stackguard0 = st.stackguard0 gp.stackguard1 = st.stackguard1 - gp.stackAlloc = st.stackAlloc gp.stktopsp = st.stktopsp } diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 1bd7e87b2ed97..d6a4e4ea801f9 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -415,9 +415,10 @@ func stackalloc(n uint32) stack { // resources and must not split the stack. // //go:systemstack -func stackfree(stk stack, n uintptr) { +func stackfree(stk stack) { gp := getg() v := unsafe.Pointer(stk.lo) + n := stk.hi - stk.lo if n&(n-1) != 0 { throw("stack not a power of 2") } @@ -852,7 +853,7 @@ func copystack(gp *g, newsize uintptr, sync bool) { fillstack(new, 0xfd) } if stackDebug >= 1 { - print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") + print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") } // Compute adjustment. @@ -895,8 +896,6 @@ func copystack(gp *g, newsize uintptr, sync bool) { gp.stack = new gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request gp.sched.sp = new.hi - used - oldsize := gp.stackAlloc - gp.stackAlloc = newsize gp.stktopsp += adjinfo.delta // Adjust pointers in the new stack. @@ -906,7 +905,7 @@ func copystack(gp *g, newsize uintptr, sync bool) { if stackPoisonCopy != 0 { fillstack(old, 0xfc) } - stackfree(old, oldsize) + stackfree(old) } // round x up to a power of 2. @@ -1051,9 +1050,9 @@ func newstack(ctxt unsafe.Pointer) { } // Allocate a bigger segment and move the stack. - oldsize := int(gp.stackAlloc) + oldsize := gp.stack.hi - gp.stack.lo newsize := oldsize * 2 - if uintptr(newsize) > maxstacksize { + if newsize > maxstacksize { print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") throw("stack overflow") } @@ -1064,7 +1063,7 @@ func newstack(ctxt unsafe.Pointer) { // The concurrent GC will not scan the stack while we are doing the copy since // the gp is in a Gcopystack status. - copystack(gp, uintptr(newsize), true) + copystack(gp, newsize, true) if stackDebug >= 1 { print("stack grow done\n") } @@ -1098,7 +1097,7 @@ func shrinkstack(gp *g) { if gp.stack.lo != 0 { // Free whole stack - it will get reallocated // if G is used again. - stackfree(gp.stack, gp.stackAlloc) + stackfree(gp.stack) gp.stack.lo = 0 gp.stack.hi = 0 } @@ -1120,7 +1119,7 @@ func shrinkstack(gp *g) { return } - oldsize := gp.stackAlloc + oldsize := gp.stack.hi - gp.stack.lo newsize := oldsize / 2 // Don't shrink the allocation below the minimum-sized stack // allocation.