Skip to content

Commit

Permalink
runtime: count scavenged bits for new allocation for new page allocator
Browse files Browse the repository at this point in the history
This change makes it so that the new page allocator returns the number
of pages that are scavenged in a new allocation so that mheap can update
memstats appropriately.

The accounting could be embedded into pageAlloc, but that would make
the new allocator more difficult to test.

Updates #35112.

Change-Id: I0f94f563d7af2458e6d534f589d2e7dd6af26d12
Reviewed-on: https://go-review.googlesource.com/c/go/+/195698
Reviewed-by: Austin Clements <austin@google.com>
  • Loading branch information
mknyszek committed Nov 7, 2019
1 parent 7331708 commit e1ddf05
Show file tree
Hide file tree
Showing 5 changed files with 281 additions and 76 deletions.
15 changes: 10 additions & 5 deletions src/runtime/export_test.go
Expand Up @@ -751,9 +751,10 @@ type PallocBits pallocBits
func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
return (*pallocBits)(b).find(npages, searchIdx)
}
func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }

// SummarizeSlow is a slow but more obviously correct implementation
// of (*pallocBits).summarize. Used for testing.
Expand Down Expand Up @@ -853,8 +854,12 @@ type ChunkIdx chunkIdx
// not in the heap, so is PageAlloc.
type PageAlloc pageAlloc

func (p *PageAlloc) Alloc(npages uintptr) uintptr { return (*pageAlloc)(p).alloc(npages) }
func (p *PageAlloc) Free(base, npages uintptr) { (*pageAlloc)(p).free(base, npages) }
func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
return (*pageAlloc)(p).alloc(npages)
}
func (p *PageAlloc) Free(base, npages uintptr) {
(*pageAlloc)(p).free(base, npages)
}
func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
}
Expand Down
29 changes: 20 additions & 9 deletions src/runtime/mpagealloc.go
Expand Up @@ -467,24 +467,33 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
// allocated. It also updates the summaries to reflect the newly-updated
// bitmap.
//
// Returns the amount of scavenged memory in bytes present in the
// allocated range.
//
// s.mheapLock must be held.
func (s *pageAlloc) allocRange(base, npages uintptr) {
func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
si, ei := chunkPageIndex(base), chunkPageIndex(limit)

scav := uint(0)
if sc == ec {
// The range doesn't cross any chunk boundaries.
scav += s.chunks[sc].scavenged.popcntRange(si, ei+1-si)
s.chunks[sc].allocRange(si, ei+1-si)
} else {
// The range crosses at least one chunk boundary.
scav += s.chunks[sc].scavenged.popcntRange(si, pallocChunkPages-si)
s.chunks[sc].allocRange(si, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
scav += s.chunks[c].scavenged.popcntRange(0, pallocChunkPages)
s.chunks[c].allocAll()
}
scav += s.chunks[ec].scavenged.popcntRange(0, ei+1)
s.chunks[ec].allocRange(0, ei+1)
}
s.update(base, npages, true, true)
return uintptr(scav) * pageSize
}

// find searches for the first (address-ordered) contiguous free region of
Expand Down Expand Up @@ -714,21 +723,23 @@ nextLevel:
}

// alloc allocates npages worth of memory from the page heap, returning the base
// address for the allocation.
// address for the allocation and the amount of scavenged memory in bytes
// contained in the region [base address, base address + npages*pageSize).
//
// Returns 0 on failure.
// Returns a 0 base address on failure, in which case other returned values
// should be ignored.
//
// s.mheapLock must be held.
func (s *pageAlloc) alloc(npages uintptr) uintptr {
func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
if chunkIndex(s.searchAddr) >= s.end {
return 0
return 0, 0
}

// If npages has a chance of fitting in the chunk where the searchAddr is,
// search it directly.
var addr, searchAddr uintptr
searchAddr := uintptr(0)
if pallocChunkPages-chunkPageIndex(s.searchAddr) >= uint(npages) {
// npages is guaranteed to be no greater than pallocChunkPages here.
i := chunkIndex(s.searchAddr)
Expand Down Expand Up @@ -756,19 +767,19 @@ func (s *pageAlloc) alloc(npages uintptr) uintptr {
// accommodate npages.
s.searchAddr = maxSearchAddr
}
return 0
return 0, 0
}
Found:
// Go ahead and actually mark the bits now that we have an address.
s.allocRange(addr, npages)
scav = s.allocRange(addr, npages)

// If we found a higher (linearized) searchAddr, we know that all the
// heap memory before that searchAddr in a linear address space is
// allocated, so bump s.searchAddr up to the new one.
if s.compareSearchAddrTo(searchAddr) > 0 {
s.searchAddr = searchAddr
}
return addr
return addr, scav
}

// free returns npages worth of memory starting at base back to the page heap.
Expand Down

0 comments on commit e1ddf05

Please sign in to comment.