Permalink
Browse files

runtime: make it possible to exit Go-created threads

Currently, threads created by the runtime exist until the whole
program exits. For #14592 and #20395, we want to be able to exit and
clean up threads created by the runtime. This commit implements that
mechanism.

The main difficulty is how to clean up the g0 stack. In cgo mode and
on Solaris and Windows where the OS manages thread stacks, we simply
arrange to return from mstart and let the system clean up the thread.
If the runtime allocated the g0 stack, then we use a new exitThread
syscall wrapper that arranges to clear a flag in the M once the stack
can safely be reaped and call the thread termination syscall.

exitThread is based on the existing exit1 wrapper, which was always
meant to terminate the calling thread. However, exit1 has never been
used since it was introduced 9 years ago, so it was broken on several
platforms. exitThread also has the additional complication of having
to flag that the stack is unused, which requires some tricks on
platforms that use the stack for syscalls.

This still leaves the problem of how to reap the unused g0 stacks. For
this, we move the M from allm to a new freem list as part of the M
exiting. Later, allocm scans the freem list, finds Ms that are marked
as done with their stack, removes these from the list and frees their
g0 stacks. This also allows these Ms to be garbage collected.

This CL does not yet use any of this functionality. Follow-up CLs
will. Likewise, there are no new tests in this CL because we'll need
follow-up functionality to test it.

Change-Id: Ic851ee74227b6d39c6fc1219fc71b45d3004bc63
Reviewed-on: https://go-review.googlesource.com/46037
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
  • Loading branch information...
aclements committed Jun 16, 2017
1 parent a9c3d09 commit eff2b2620db005cb58c266c0f25309d6f466cb25
@@ -98,6 +98,10 @@ _cgo_try_pthread_create(pthread_t* thread, const pthread_attr_t* attr, void* (*p
for (tries = 0; tries < 20; tries++) {
err = pthread_create(thread, attr, pfn, arg);
if (err == 0) {
pthread_detach(*thread);
return 0;
}
if (err != EAGAIN) {
return err;
}
@@ -181,6 +181,12 @@ func newosproc(mp *m, _ unsafe.Pointer) {
}
}
func exitThread(wait *uint32) {
// We should never reach exitThread on Solaris because we let
// libc clean up threads.
throw("exitThread")
}
var urandom_dev = []byte("/dev/urandom\x00")
//go:nosplit
View
@@ -168,6 +168,9 @@ func newosproc(mp *m, stk unsafe.Pointer) {
}
}
//go:noescape
func exitThread(wait *uint32)
//go:nosplit
func semacreate(mp *m) {
if mp.waitsema != 0 {
View
@@ -21,6 +21,9 @@ const (
_UC_SIGMASK = 0x01
_UC_CPU = 0x04
// From <sys/lwp.h>
_LWP_DETACHED = 0x00000040
_EAGAIN = 35
)
@@ -182,7 +185,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, funcPC(netbsdMstart))
ret := lwp_create(unsafe.Pointer(&uc), 0, unsafe.Pointer(&mp.procid))
ret := lwp_create(unsafe.Pointer(&uc), _LWP_DETACHED, unsafe.Pointer(&mp.procid))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret < 0 {
print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", -ret, ")\n")
View
@@ -421,6 +421,12 @@ func newosproc(mp *m, stk unsafe.Pointer) {
}
}
func exitThread(wait *uint32) {
// We should never reach exitThread on Plan 9 because we let
// the OS clean up threads.
throw("exitThread")
}
//go:nosplit
func semacreate(mp *m) {
}
@@ -640,6 +640,9 @@ func newosproc(mp *m, stk unsafe.Pointer) {
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", getlasterror(), ")\n")
throw("runtime.newosproc")
}
// Close thandle to avoid leaking the thread object if it exits.
stdcall1(_CloseHandle, thandle)
}
// Used by the C library build mode. On Linux this function would allocate a
@@ -651,6 +654,12 @@ func newosproc0(mp *m, stk unsafe.Pointer) {
newosproc(mp, stk)
}
func exitThread(wait *uint32) {
// We should never reach exitThread on Windows because we let
// the OS clean up threads.
throw("exitThread")
}
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
View
@@ -1152,7 +1152,8 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 {
func mstart() {
_g_ := getg()
if _g_.stack.lo == 0 {
osStack := _g_.stack.lo == 0
if osStack {
// Initialize stack bounds from system stack.
// Cgo may have left stack size in stack.hi.
size := _g_.stack.hi
@@ -1166,21 +1167,30 @@ func mstart() {
// both Go and C functions with stack growth prologues.
_g_.stackguard0 = _g_.stack.lo + _StackGuard
_g_.stackguard1 = _g_.stackguard0
mstart1()
mstart1(0)
// Exit this thread.
if GOOS == "windows" || GOOS == "solaris" {
// Windows and Solaris always system-allocate the
// stack, but put it in _g_.stack before mstart, so
// the logic above hasn't set osStack yet.
osStack = true
}
mexit(osStack)
}
func mstart1() {
func mstart1(dummy int32) {
_g_ := getg()
if _g_ != _g_.m.g0 {
throw("bad runtime·mstart")
}
// Record top of stack for use by mcall.
// Once we call schedule we're never coming back,
// so other calls can reuse this stack space.
gosave(&_g_.m.g0.sched)
_g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used
// Record the caller for use as the top of stack in mcall and
// for terminating the thread.
// We're never coming back to mstart1 after we call schedule,
// so other calls can reuse the current frame.
save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
asminit()
minit()
@@ -1219,6 +1229,99 @@ func mstartm0() {
initsig(false)
}
// mexit tears down and exits the current thread.
//
// Don't call this directly to exit the thread, since it must run at
// the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
// unwind the stack to the point that exits the thread.
//
// It is entered with m.p != nil, so write barriers are allowed. It
// will release the P before exiting.
//
//go:yeswritebarrierrec
func mexit(osStack bool) {
g := getg()
m := g.m
if m == &m0 {
// This is the main thread. Just wedge it.
//
// On Linux, exiting the main thread puts the process
// into a non-waitable zombie state. On Plan 9,
// exiting the main thread unblocks wait even though
// other threads are still running. On Solaris we can
// neither exitThread nor return from mstart. Other
// bad things probably happen on other platforms.
//
// We could try to clean up this M more before wedging
// it, but that complicates signal handling.
handoffp(releasep())
lock(&sched.lock)
sched.nmfreed++
checkdead()
unlock(&sched.lock)
notesleep(&m.park)
throw("locked m0 woke up")
}
sigblock()
unminit()
// Free the gsignal stack.
if m.gsignal != nil {
stackfree(m.gsignal.stack)
}
// Remove m from allm.
lock(&sched.lock)
for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
if *pprev == m {
*pprev = m.alllink
goto found
}
}
throw("m not found in allm")
found:
if !osStack {
// Delay reaping m until it's done with the stack.
//
// If this is using an OS stack, the OS will free it
// so there's no need for reaping.
atomic.Store(&m.freeWait, 1)
// Put m on the free list, though it will not be reaped until
// freeWait is 0. Note that the free list must not be linked
// through alllink because some functions walk allm without
// locking, so may be using alllink.
m.freelink = sched.freem
sched.freem = m
}
unlock(&sched.lock)
// Release the P.
handoffp(releasep())
// After this point we must not have write barriers.
// Invoke the deadlock detector. This must happen after
// handoffp because it may have started a new M to take our
// P's work.
lock(&sched.lock)
sched.nmfreed++
checkdead()
unlock(&sched.lock)
if osStack {
// Return from mstart and let the system thread
// library free the g0 stack and terminate the thread.
return
}
// mstart is the thread's entry point, so there's nothing to
// return to. Exit the thread directly. exitThread will clear
// m.freeWait when it's done with the stack and the m can be
// reaped.
exitThread(&m.freeWait)
}
// forEachP calls fn(p) for every P p when p reaches a GC safe point.
// If a P is currently executing code, this will bring the P to a GC
// safe point and execute fn on that P. If the P is not executing code
@@ -1364,6 +1467,27 @@ func allocm(_p_ *p, fn func()) *m {
if _g_.m.p == 0 {
acquirep(_p_) // temporarily borrow p for mallocs in this function
}
// Release the free M list. We need to do this somewhere and
// this may free up a stack we can use.
if sched.freem != nil {
lock(&sched.lock)
var newList *m
for freem := sched.freem; freem != nil; {
if freem.freeWait != 0 {
next := freem.freelink
freem.freelink = newList
newList = freem
freem = next
continue
}
stackfree(freem.g0.stack)
freem = freem.freelink
}
sched.freem = newList
unlock(&sched.lock)
}
mp := new(m)
mp.mstartfn = fn
mcommoninit(mp)
@@ -3377,7 +3501,7 @@ func gcount() int32 {
}
func mcount() int32 {
return int32(sched.mnext)
return int32(sched.mnext - sched.nmfreed)
}
var prof struct {
@@ -3902,6 +4026,7 @@ func incidlelocked(v int32) {
// Check for deadlock situation.
// The check is based on number of running M's, if 0 -> deadlock.
// sched.lock must be held.
func checkdead() {
// For -buildmode=c-shared or -buildmode=c-archive it's OK if
// there are no running goroutines. The calling program is
View
@@ -169,9 +169,13 @@ func efaceOf(ep *interface{}) *eface {
// a word that is completely ignored by the GC than to have one for which
// only a few updates are ignored.
//
// Gs, Ms, and Ps are always reachable via true pointers in the
// allgs, allm, and allp lists or (during allocation before they reach those lists)
// Gs and Ps are always reachable via true pointers in the
// allgs and allp lists or (during allocation before they reach those lists)
// from stack variables.
//
// Ms are always reachable via true pointers either from allm or
// freem. Unlike Gs and Ps we do free Ms, so it's important that
// nothing ever hold an muintptr across a safe point.
// A guintptr holds a goroutine pointer, but typed as a uintptr
// to bypass write barriers. It is used in the Gobuf goroutine state
@@ -221,6 +225,15 @@ func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
//go:nosplit
func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
// muintptr is a *m that is not tracked by the garbage collector.
//
// Because we do free Ms, there are some additional constrains on
// muintptrs:
//
// 1. Never hold an muintptr locally across a safe point.
//
// 2. Any muintptr in the heap must be owned by the M itself so it can
// ensure it is not in use when the last true *m is released.
type muintptr uintptr
//go:nosplit
@@ -413,7 +426,8 @@ type m struct {
inwb bool // m is executing a write barrier
newSigstack bool // minit on C thread called sigaltstack
printlock int8
incgo bool // m is executing a cgo call
incgo bool // m is executing a cgo call
freeWait uint32 // if == 0, safe to free g0 and delete m (atomic)
fastrand [2]uint32
needextram bool
traceback uint8
@@ -440,6 +454,7 @@ type m struct {
startingtrace bool
syscalltick uint32
thread uintptr // thread handle
freelink *m // on sched.freem
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
@@ -528,12 +543,16 @@ type schedt struct {
lock mutex
// When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
// sure to call checkdead().
midle muintptr // idle m's waiting for work
nmidle int32 // number of idle m's waiting for work
nmidlelocked int32 // number of locked m's waiting for work
mnext int64 // number of m's that have been created and next M ID
maxmcount int32 // maximum number of m's allowed (or die)
nmsys int32 // number of system m's not counted for deadlock
nmfreed int64 // cumulative number of freed m's
ngsys uint32 // number of system goroutines; updated atomically
@@ -560,6 +579,10 @@ type schedt struct {
deferlock mutex
deferpool [5]*_defer
// freem is the list of m's waiting to be freed when their
// m.exited is set. Linked through m.freelink.
freem *m
gcwaiting uint32 // gc is waiting to run
stopwait int32
stopnote note
View
@@ -136,7 +136,6 @@ func gosave(buf *gobuf)
//go:noescape
func jmpdefer(fv *funcval, argp uintptr)
func exit1(code int32)
func asminit()
func setg(gg *g)
func breakpoint()
View
@@ -25,3 +25,9 @@ func write(fd uintptr, p unsafe.Pointer, n int32) int32
func open(name *byte, mode, perm int32) int32
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
// exitThread terminates the current thread, writing *wait = 0 when
// the stack is safe to reclaim.
//
//go:noescape
func exitThread(wait *uint32)
Oops, something went wrong.

0 comments on commit eff2b26

Please sign in to comment.