Skip to content

Commit

Permalink
[release-branch.go1.18] runtime: store consistent total allocation st…
Browse files Browse the repository at this point in the history
…ats as uint64

Currently the consistent total allocation stats are managed as uintptrs,
which means they can easily overflow on 32-bit systems. Fix this by
storing these stats as uint64s. This will cause some minor performance
degradation on 32-bit systems, but there really isn't a way around this,
and it affects the correctness of the metrics we export.

For #52680.
Fixes #52689.

Change-Id: I8b1926116e899ae9f03d58e0320bcb9264945b3e
Reviewed-on: https://go-review.googlesource.com/c/go/+/411495
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
  • Loading branch information
mknyszek authored and heschi committed Jul 6, 2022
1 parent 4068be5 commit f0eca66
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 37 deletions.
16 changes: 8 additions & 8 deletions src/runtime/mcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,11 +175,11 @@ func (c *mcache) refill(spc spanClass) {
// Assume all objects from this span will be allocated in the
// mcache. If it gets uncached, we'll adjust this.
stats := memstats.heapStats.acquire()
atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], int64(s.nelems)-int64(s.allocCount))

// Flush tinyAllocs.
if spc == tinySpanClass {
atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
c.tinyAllocs = 0
}
memstats.heapStats.release()
Expand Down Expand Up @@ -215,8 +215,8 @@ func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
throw("out of memory")
}
stats := memstats.heapStats.acquire()
atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize)
atomic.Xadduintptr(&stats.largeAllocCount, 1)
atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
atomic.Xadd64(&stats.largeAllocCount, 1)
memstats.heapStats.release()

// Update heapLive.
Expand All @@ -241,9 +241,9 @@ func (c *mcache) releaseAll() {
s := c.alloc[i]
if s != &emptymspan {
// Adjust nsmallalloc in case the span wasn't fully allocated.
n := uintptr(s.nelems) - uintptr(s.allocCount)
n := int64(s.nelems) - int64(s.allocCount)
stats := memstats.heapStats.acquire()
atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
memstats.heapStats.release()
if s.sweepgen != sg+1 {
// refill conservatively counted unallocated slots in gcController.heapLive.
Expand All @@ -253,7 +253,7 @@ func (c *mcache) releaseAll() {
// gcController.heapLive was totally recomputed since
// caching this span, so we don't do this for
// stale spans.
dHeapLive -= int64(n) * int64(s.elemsize)
dHeapLive -= n * int64(s.elemsize)
}
// Release the span to the mcentral.
mheap_.central[i].mcentral.uncacheSpan(s)
Expand All @@ -266,7 +266,7 @@ func (c *mcache) releaseAll() {

// Flush tinyAllocs.
stats := memstats.heapStats.acquire()
atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
c.tinyAllocs = 0
memstats.heapStats.release()

Expand Down
12 changes: 6 additions & 6 deletions src/runtime/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -388,13 +388,13 @@ func (a *heapStatsAggregate) compute() {
memstats.heapStats.read(&a.heapStatsDelta)

// Calculate derived stats.
a.totalAllocs = uint64(a.largeAllocCount)
a.totalFrees = uint64(a.largeFreeCount)
a.totalAllocated = uint64(a.largeAlloc)
a.totalFreed = uint64(a.largeFree)
a.totalAllocs = a.largeAllocCount
a.totalFrees = a.largeFreeCount
a.totalAllocated = a.largeAlloc
a.totalFreed = a.largeFree
for i := range a.smallAllocCount {
na := uint64(a.smallAllocCount[i])
nf := uint64(a.smallFreeCount[i])
na := a.smallAllocCount[i]
nf := a.smallFreeCount[i]
a.totalAllocs += na
a.totalFrees += nf
a.totalAllocated += na * uint64(class_to_size[i])
Expand Down
6 changes: 3 additions & 3 deletions src/runtime/mgcsweep.go
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// free slots zeroed.
s.needzero = 1
stats := memstats.heapStats.acquire()
atomic.Xadduintptr(&stats.smallFreeCount[spc.sizeclass()], uintptr(nfreed))
atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
memstats.heapStats.release()
}
if !preserve {
Expand Down Expand Up @@ -713,8 +713,8 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
mheap_.freeSpan(s)
}
stats := memstats.heapStats.acquire()
atomic.Xadduintptr(&stats.largeFreeCount, 1)
atomic.Xadduintptr(&stats.largeFree, size)
atomic.Xadd64(&stats.largeFreeCount, 1)
atomic.Xadd64(&stats.largeFree, int64(size))
memstats.heapStats.release()
return true
}
Expand Down
42 changes: 22 additions & 20 deletions src/runtime/mstats.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
package runtime

import (
"internal/goarch"
"runtime/internal/atomic"
"unsafe"
)
Expand Down Expand Up @@ -565,29 +564,29 @@ func updatememstats() {
memstats.heapStats.unsafeRead(&consStats)

// Collect large allocation stats.
totalAlloc := uint64(consStats.largeAlloc)
memstats.nmalloc += uint64(consStats.largeAllocCount)
totalFree := uint64(consStats.largeFree)
memstats.nfree += uint64(consStats.largeFreeCount)
totalAlloc := consStats.largeAlloc
memstats.nmalloc += consStats.largeAllocCount
totalFree := consStats.largeFree
memstats.nfree += consStats.largeFreeCount

// Collect per-sizeclass stats.
for i := 0; i < _NumSizeClasses; i++ {
// Malloc stats.
a := uint64(consStats.smallAllocCount[i])
a := consStats.smallAllocCount[i]
totalAlloc += a * uint64(class_to_size[i])
memstats.nmalloc += a
memstats.by_size[i].nmalloc = a

// Free stats.
f := uint64(consStats.smallFreeCount[i])
f := consStats.smallFreeCount[i]
totalFree += f * uint64(class_to_size[i])
memstats.nfree += f
memstats.by_size[i].nfree = f
}

// Account for tiny allocations.
memstats.nfree += uint64(consStats.tinyAllocCount)
memstats.nmalloc += uint64(consStats.tinyAllocCount)
memstats.nfree += consStats.tinyAllocCount
memstats.nmalloc += consStats.tinyAllocCount

// Calculate derived stats.
memstats.total_alloc = totalAlloc
Expand Down Expand Up @@ -703,17 +702,20 @@ type heapStatsDelta struct {
inPtrScalarBits int64 // byte delta of memory reserved for unrolled GC prog bits

// Allocator stats.
tinyAllocCount uintptr // number of tiny allocations
largeAlloc uintptr // bytes allocated for large objects
largeAllocCount uintptr // number of large object allocations
smallAllocCount [_NumSizeClasses]uintptr // number of allocs for small objects
largeFree uintptr // bytes freed for large objects (>maxSmallSize)
largeFreeCount uintptr // number of frees for large objects (>maxSmallSize)
smallFreeCount [_NumSizeClasses]uintptr // number of frees for small objects (<=maxSmallSize)

// Add a uint32 to ensure this struct is a multiple of 8 bytes in size.
// Only necessary on 32-bit platforms.
_ [(goarch.PtrSize / 4) % 2]uint32
//
// These are all uint64 because they're cumulative, and could quickly wrap
// around otherwise.
tinyAllocCount uint64 // number of tiny allocations
largeAlloc uint64 // bytes allocated for large objects
largeAllocCount uint64 // number of large object allocations
smallAllocCount [_NumSizeClasses]uint64 // number of allocs for small objects
largeFree uint64 // bytes freed for large objects (>maxSmallSize)
largeFreeCount uint64 // number of frees for large objects (>maxSmallSize)
smallFreeCount [_NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize)

// NOTE: This struct must be a multiple of 8 bytes in size because it
// is stored in an array. If it's not, atomic accesses to the above
// fields may be unaligned and fail on 32-bit platforms.
}

// merge adds in the deltas from b into a.
Expand Down

0 comments on commit f0eca66

Please sign in to comment.