Skip to content

Commit

Permalink
runtime: ensure heap memstats are updated atomically
Browse files Browse the repository at this point in the history
For the most part, heap memstats are already updated atomically when
passed down to OS-level memory functions (e.g. sysMap). Elsewhere,
however, they're updated with the heap lock.

In order to facilitate holding the heap lock for less time during
allocation paths, this change more consistently makes the update of
these statistics atomic by calling mSysStat{Inc,Dec} appropriately
instead of simply adding or subtracting. It also ensures these values
are loaded atomically.

Furthermore, an undocumented but safe update condition for these
memstats is during STW, at which point using atomics is unnecessary.
This change also documents this condition in mstats.go.

Updates #35112.

Change-Id: I87d0b6c27b98c88099acd2563ea23f8da1239b66
Reviewed-on: https://go-review.googlesource.com/c/go/+/196638
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
  • Loading branch information
mknyszek committed Nov 8, 2019
1 parent 814c505 commit ae4534e
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 18 deletions.
7 changes: 3 additions & 4 deletions src/runtime/mgcscavenge.go
Expand Up @@ -57,6 +57,7 @@ package runtime

import (
"math/bits"
"runtime/internal/atomic"
"unsafe"
)

Expand All @@ -78,10 +79,8 @@ const (
)

// heapRetained returns an estimate of the current heap RSS.
//
// mheap_.lock must be held or the world must be stopped.
func heapRetained() uint64 {
return memstats.heap_sys - memstats.heap_released
return atomic.Load64(&memstats.heap_sys) - atomic.Load64(&memstats.heap_released)
}

// gcPaceScavenger updates the scavenger's pacing, particularly
Expand Down Expand Up @@ -489,7 +488,7 @@ func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) {

// Update global accounting only when not in test, otherwise
// the runtime's accounting will be wrong.
memstats.heap_released += uint64(npages) * pageSize
mSysStatInc(&memstats.heap_released, uintptr(npages)*pageSize)
}

// fillAligned returns x but with all zeroes in m-aligned
Expand Down
22 changes: 12 additions & 10 deletions src/runtime/mheap.go
Expand Up @@ -1003,7 +1003,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
s.limit = s.base() + s.npages<<_PageShift
s.state.set(mSpanManual) // Publish the span
// Manually managed memory doesn't count toward heap_sys.
memstats.heap_sys -= uint64(s.npages << _PageShift)
mSysStatDec(&memstats.heap_sys, s.npages*pageSize)
}

// This unlock acts as a release barrier. See mheap.alloc_m.
Expand Down Expand Up @@ -1113,7 +1113,7 @@ HaveBase:
// sysUsed all the pages that are actually available
// in the span.
sysUsed(unsafe.Pointer(base), npage*pageSize)
memstats.heap_released -= uint64(scav)
mSysStatDec(&memstats.heap_released, scav)
}

s := (*mspan)(h.spanalloc.alloc())
Expand All @@ -1123,8 +1123,10 @@ HaveBase:
}
h.setSpans(s.base(), npage, s)

*stat += uint64(npage << _PageShift)
memstats.heap_idle -= uint64(npage << _PageShift)
// Update stats.
nbytes := npage * pageSize
mSysStatInc(stat, nbytes)
mSysStatDec(&memstats.heap_idle, nbytes)

return s
}
Expand Down Expand Up @@ -1172,8 +1174,8 @@ func (h *mheap) grow(npage uintptr) bool {
// The allocation is always aligned to the heap arena
// size which is always > physPageSize, so its safe to
// just add directly to heap_released.
memstats.heap_released += uint64(asize)
memstats.heap_idle += uint64(asize)
mSysStatInc(&memstats.heap_released, asize)
mSysStatInc(&memstats.heap_idle, asize)

// Recalculate nBase
nBase = alignUp(h.curArena.base+ask, physPageSize)
Expand Down Expand Up @@ -1237,8 +1239,8 @@ func (h *mheap) freeSpan(s *mspan) {
func (h *mheap) freeManual(s *mspan, stat *uint64) {
s.needzero = 1
lock(&h.lock)
*stat -= uint64(s.npages << _PageShift)
memstats.heap_sys += uint64(s.npages << _PageShift)
mSysStatDec(stat, s.npages*pageSize)
mSysStatInc(&memstats.heap_sys, s.npages*pageSize)
h.freeSpanLocked(s, false, true)
unlock(&h.lock)
}
Expand All @@ -1264,10 +1266,10 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
}

if acctinuse {
memstats.heap_inuse -= uint64(s.npages << _PageShift)
mSysStatDec(&memstats.heap_inuse, s.npages*pageSize)
}
if acctidle {
memstats.heap_idle += uint64(s.npages << _PageShift)
mSysStatInc(&memstats.heap_idle, s.npages*pageSize)
}

// Mark the space as free.
Expand Down
8 changes: 4 additions & 4 deletions src/runtime/mstats.go
Expand Up @@ -31,7 +31,7 @@ type mstats struct {
nfree uint64 // number of frees

// Statistics about malloc heap.
// Protected by mheap.lock
// Updated atomically, or with the world stopped.
//
// Like MemStats, heap_sys and heap_inuse do not count memory
// in manually-managed spans.
Expand All @@ -47,15 +47,15 @@ type mstats struct {

// Statistics about allocation of low-level fixed-size structures.
// Protected by FixAlloc locks.
stacks_inuse uint64 // bytes in manually-managed stack spans
stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
mspan_inuse uint64 // mspan structures
mspan_sys uint64
mcache_inuse uint64 // mcache structures
mcache_sys uint64
buckhash_sys uint64 // profiling bucket hash table
gc_sys uint64
other_sys uint64
gc_sys uint64 // updated atomically or during STW
other_sys uint64 // updated atomically or during STW

// Statistics about garbage collector.
// Protected by mheap or stopping the world during GC.
Expand Down

0 comments on commit ae4534e

Please sign in to comment.