Skip to content

Commit

Permalink
runtime: use offAddr in more parts of the runtime
Browse files Browse the repository at this point in the history
This change uses the new offAddr type in more parts of the runtime where
we've been implicitly switching from the default address space to a
contiguous view. The purpose of offAddr is to represent addresses in the
contiguous view of the address space, and to make direct computations
between real addresses and offset addresses impossible. This change thus
improves readability in the runtime.

Updates #35788.

Change-Id: I4e1c5fed3ed68aa12f49a42b82eb3f46aba82fc1
Reviewed-on: https://go-review.googlesource.com/c/go/+/230718
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
  • Loading branch information
mknyszek committed May 8, 2020
1 parent d69509f commit ea5f9b6
Show file tree
Hide file tree
Showing 4 changed files with 99 additions and 86 deletions.
14 changes: 7 additions & 7 deletions src/runtime/mgcscavenge.go
Expand Up @@ -452,8 +452,8 @@ func (s *pageAlloc) scavengeStartGen() {
s.inUse.cloneInto(&s.scav.inUse)

// Pick the new starting address for the scavenger cycle.
var startAddr uintptr
if s.scav.scavLWM < s.scav.freeHWM {
var startAddr offAddr
if s.scav.scavLWM.lessThan(s.scav.freeHWM) {
// The "free" high watermark exceeds the "scavenged" low watermark,
// so there are free scavengable pages in parts of the address space
// that the scavenger already searched, the high watermark being the
Expand All @@ -467,7 +467,7 @@ func (s *pageAlloc) scavengeStartGen() {
// scavenging from where we were.
startAddr = s.scav.scavLWM
}
s.scav.inUse.removeGreaterEqual(startAddr)
s.scav.inUse.removeGreaterEqual(startAddr.addr())

// reservationBytes may be zero if s.inUse.totalBytes is small, or if
// scavengeReservationShards is large. This case is fine as the scavenger
Expand All @@ -478,8 +478,8 @@ func (s *pageAlloc) scavengeStartGen() {
s.scav.reservationBytes = alignUp(s.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards
s.scav.gen++
s.scav.released = 0
s.scav.freeHWM = 0
s.scav.scavLWM = maxSearchAddr
s.scav.freeHWM = minOffAddr
s.scav.scavLWM = maxOffAddr
}

// scavengeReserve reserves a contiguous range of the address space
Expand Down Expand Up @@ -698,8 +698,8 @@ func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr
addr := chunkBase(ci) + uintptr(base)*pageSize

// Update the scavenge low watermark.
if addr < s.scav.scavLWM {
s.scav.scavLWM = addr
if oAddr := (offAddr{addr}); oAddr.lessThan(s.scav.scavLWM) {
s.scav.scavLWM = oAddr
}

// Only perform the actual scavenging if we're not in a test.
Expand Down
132 changes: 60 additions & 72 deletions src/runtime/mpagealloc.go
Expand Up @@ -81,15 +81,14 @@ const (
// there should this change.
pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
pallocChunksL1Shift = pallocChunksL2Bits

// Maximum searchAddr value, which indicates that the heap has no free space.
//
// We subtract arenaBaseOffset because we want this to represent the maximum
// value in the shifted address space, but searchAddr is stored as a regular
// memory address. See arenaBaseOffset for details.
maxSearchAddr = ^uintptr(0) - arenaBaseOffset
)

// Maximum searchAddr value, which indicates that the heap has no free space.
//
// We alias maxOffAddr just to make it clear that this is the maximum address
// for the page allocator's search space. See maxOffAddr for details.
var maxSearchAddr = maxOffAddr

// Global chunk index.
//
// Represents an index into the leaf level of the radix tree.
Expand Down Expand Up @@ -134,6 +133,18 @@ func (i chunkIdx) l2() uint {
}
}

// offAddrToLevelIndex converts an address in the offset address space
// to the index into summary[level] containing addr.
func offAddrToLevelIndex(level int, addr offAddr) int {
return int((addr.a + arenaBaseOffset) >> levelShift[level])
}

// levelIndexToOffAddr converts an index into summary[level] into
// the corresponding address in the offset address space.
func levelIndexToOffAddr(level, idx int) offAddr {
return offAddr{(uintptr(idx) << levelShift[level]) - arenaBaseOffset}
}

// addrsToSummaryRange converts base and limit pointers into a range
// of entries for the given summary level.
//
Expand Down Expand Up @@ -232,7 +243,7 @@ type pageAlloc struct {
// Note that adding in arenaBaseOffset transforms addresses
// to a new address space with a linear view of the full address
// space on architectures with segmented address spaces.
searchAddr uintptr
searchAddr offAddr

// start and end represent the chunk indices
// which pageAlloc knows about. It assumes
Expand Down Expand Up @@ -271,13 +282,13 @@ type pageAlloc struct {
// released is the amount of memory released this generation.
released uintptr

// scavLWM is the lowest address that the scavenger reached this
// scavLWM is the lowest (offset) address that the scavenger reached this
// scavenge generation.
scavLWM uintptr
scavLWM offAddr

// freeHWM is the highest address of a page that was freed to
// freeHWM is the highest (offset) address of a page that was freed to
// the page allocator this scavenge generation.
freeHWM uintptr
freeHWM offAddr
}

// mheap_.lock. This level of indirection makes it possible
Expand Down Expand Up @@ -319,29 +330,6 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
s.scav.scavLWM = maxSearchAddr
}

// compareSearchAddrTo compares an address against s.searchAddr in a linearized
// view of the address space on systems with discontinuous process address spaces.
// This linearized view is the same one generated by chunkIndex and arenaIndex,
// done by adding arenaBaseOffset.
//
// On systems without a discontinuous address space, it's just a normal comparison.
//
// Returns < 0 if addr is less than s.searchAddr in the linearized address space.
// Returns > 0 if addr is greater than s.searchAddr in the linearized address space.
// Returns 0 if addr and s.searchAddr are equal.
func (s *pageAlloc) compareSearchAddrTo(addr uintptr) int {
// Compare with arenaBaseOffset added because it gives us a linear, contiguous view
// of the heap on architectures with signed address spaces.
lAddr := addr + arenaBaseOffset
lSearchAddr := s.searchAddr + arenaBaseOffset
if lAddr < lSearchAddr {
return -1
} else if lAddr > lSearchAddr {
return 1
}
return 0
}

// chunkOf returns the chunk at the given chunk index.
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
return &s.chunks[ci.l1()][ci.l2()]
Expand Down Expand Up @@ -378,10 +366,10 @@ func (s *pageAlloc) grow(base, size uintptr) {
s.inUse.add(makeAddrRange(base, limit))

// A grow operation is a lot like a free operation, so if our
// chunk ends up below the (linearized) s.searchAddr, update
// s.searchAddr to the new address, just like in free.
if s.compareSearchAddrTo(base) < 0 {
s.searchAddr = base
// chunk ends up below s.searchAddr, update s.searchAddr to the
// new address, just like in free.
if b := (offAddr{base}); b.lessThan(s.searchAddr) {
s.searchAddr = b
}

// Add entries into chunks, which is sparse, if needed. Then,
Expand Down Expand Up @@ -545,7 +533,7 @@ func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
// searchAddr returned is invalid and must be ignored.
//
// s.mheapLock must be held.
func (s *pageAlloc) find(npages uintptr) (uintptr, uintptr) {
func (s *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
// Search algorithm.
//
// This algorithm walks each level l of the radix tree from the root level
Expand Down Expand Up @@ -585,13 +573,13 @@ func (s *pageAlloc) find(npages uintptr) (uintptr, uintptr) {
// firstFree is updated by calling foundFree each time free space in the
// heap is discovered.
//
// At the end of the search, base-arenaBaseOffset is the best new
// At the end of the search, base.addr() is the best new
// searchAddr we could deduce in this search.
firstFree := struct {
base, bound uintptr
base, bound offAddr
}{
base: 0,
bound: (1<<heapAddrBits - 1),
base: minOffAddr,
bound: maxOffAddr,
}
// foundFree takes the given address range [addr, addr+size) and
// updates firstFree if it is a narrower range. The input range must
Expand All @@ -602,17 +590,17 @@ func (s *pageAlloc) find(npages uintptr) (uintptr, uintptr) {
// pages on the root level and narrow that down if we descend into
// that summary. But as soon as we need to iterate beyond that summary
// in a level to find a large enough range, we'll stop narrowing.
foundFree := func(addr, size uintptr) {
if firstFree.base <= addr && addr+size-1 <= firstFree.bound {
foundFree := func(addr offAddr, size uintptr) {
if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) {
// This range fits within the current firstFree window, so narrow
// down the firstFree window to the base and bound of this range.
firstFree.base = addr
firstFree.bound = addr + size - 1
} else if !(addr+size-1 < firstFree.base || addr > firstFree.bound) {
firstFree.bound = addr.add(size - 1)
} else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) {
// This range only partially overlaps with the firstFree range,
// so throw.
print("runtime: addr = ", hex(addr), ", size = ", size, "\n")
print("runtime: base = ", hex(firstFree.base), ", bound = ", hex(firstFree.bound), "\n")
print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n")
print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n")
throw("range partially overlaps")
}
}
Expand Down Expand Up @@ -642,7 +630,7 @@ nextLevel:
// searchAddr on the previous level or we're on the root leve, in which
// case the searchAddr should be the same as i after levelShift.
j0 := 0
if searchIdx := int((s.searchAddr + arenaBaseOffset) >> levelShift[l]); searchIdx&^(entriesPerBlock-1) == i {
if searchIdx := offAddrToLevelIndex(l, s.searchAddr); searchIdx&^(entriesPerBlock-1) == i {
j0 = searchIdx & (entriesPerBlock - 1)
}

Expand All @@ -668,7 +656,7 @@ nextLevel:

// We've encountered a non-zero summary which means
// free memory, so update firstFree.
foundFree(uintptr((i+j)<<levelShift[l]), (uintptr(1)<<logMaxPages)*pageSize)
foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)

s := sum.start()
if size+s >= uint(npages) {
Expand Down Expand Up @@ -706,8 +694,8 @@ nextLevel:
if size >= uint(npages) {
// We found a sufficiently large run of free pages straddling
// some boundary, so compute the address and return it.
addr := uintptr(i<<levelShift[l]) - arenaBaseOffset + uintptr(base)*pageSize
return addr, firstFree.base - arenaBaseOffset
addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
return addr, firstFree.base
}
if l == 0 {
// We're at level zero, so that means we've exhausted our search.
Expand All @@ -719,7 +707,7 @@ nextLevel:
// lied to us. In either case, dump some useful state and throw.
print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n")
print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n")
print("runtime: s.searchAddr = ", hex(s.searchAddr), ", i = ", i, "\n")
print("runtime: s.searchAddr = ", hex(s.searchAddr.addr()), ", i = ", i, "\n")
print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n")
for j := 0; j < len(entries); j++ {
sum := entries[j]
Expand Down Expand Up @@ -752,8 +740,8 @@ nextLevel:
// Since we actually searched the chunk, we may have
// found an even narrower free window.
searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
foundFree(searchAddr+arenaBaseOffset, chunkBase(ci+1)-searchAddr)
return addr, firstFree.base - arenaBaseOffset
foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
return addr, firstFree.base
}

// alloc allocates npages worth of memory from the page heap, returning the base
Expand All @@ -767,25 +755,25 @@ nextLevel:
func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
if chunkIndex(s.searchAddr) >= s.end {
if chunkIndex(s.searchAddr.addr()) >= s.end {
return 0, 0
}

// If npages has a chance of fitting in the chunk where the searchAddr is,
// search it directly.
searchAddr := uintptr(0)
if pallocChunkPages-chunkPageIndex(s.searchAddr) >= uint(npages) {
searchAddr := minOffAddr
if pallocChunkPages-chunkPageIndex(s.searchAddr.addr()) >= uint(npages) {
// npages is guaranteed to be no greater than pallocChunkPages here.
i := chunkIndex(s.searchAddr)
i := chunkIndex(s.searchAddr.addr())
if max := s.summary[len(s.summary)-1][i].max(); max >= uint(npages) {
j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr))
j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr.addr()))
if j == ^uint(0) {
print("runtime: max = ", max, ", npages = ", npages, "\n")
print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr), ", s.searchAddr = ", hex(s.searchAddr), "\n")
print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr.addr()), ", s.searchAddr = ", hex(s.searchAddr.addr()), "\n")
throw("bad summary data")
}
addr = chunkBase(i) + uintptr(j)*pageSize
searchAddr = chunkBase(i) + uintptr(searchIdx)*pageSize
searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
goto Found
}
}
Expand All @@ -807,10 +795,10 @@ Found:
// Go ahead and actually mark the bits now that we have an address.
scav = s.allocRange(addr, npages)

// If we found a higher (linearized) searchAddr, we know that all the
// heap memory before that searchAddr in a linear address space is
// If we found a higher searchAddr, we know that all the
// heap memory before that searchAddr in an offset address space is
// allocated, so bump s.searchAddr up to the new one.
if s.compareSearchAddrTo(searchAddr) > 0 {
if s.searchAddr.lessThan(searchAddr) {
s.searchAddr = searchAddr
}
return addr, scav
Expand All @@ -820,14 +808,14 @@ Found:
//
// s.mheapLock must be held.
func (s *pageAlloc) free(base, npages uintptr) {
// If we're freeing pages below the (linearized) s.searchAddr, update searchAddr.
if s.compareSearchAddrTo(base) < 0 {
s.searchAddr = base
// If we're freeing pages below the s.searchAddr, update searchAddr.
if b := (offAddr{base}); b.lessThan(s.searchAddr) {
s.searchAddr = b
}
// Update the free high watermark for the scavenger.
limit := base + npages*pageSize - 1
if s.scav.freeHWM < limit {
s.scav.freeHWM = limit
if offLimit := (offAddr{limit}); s.scav.freeHWM.lessThan(offLimit) {
s.scav.freeHWM = offLimit
}
if npages == 1 {
// Fast path: we're clearing a single bit, and we know exactly
Expand Down
12 changes: 6 additions & 6 deletions src/runtime/mpagecache.go
Expand Up @@ -91,8 +91,8 @@ func (c *pageCache) flush(s *pageAlloc) {
}
// Since this is a lot like a free, we need to make sure
// we update the searchAddr just like free does.
if s.compareSearchAddrTo(c.base) < 0 {
s.searchAddr = c.base
if b := (offAddr{c.base}); b.lessThan(s.searchAddr) {
s.searchAddr = b
}
s.update(c.base, pageCachePages, false, false)
*c = pageCache{}
Expand All @@ -106,15 +106,15 @@ func (c *pageCache) flush(s *pageAlloc) {
func (s *pageAlloc) allocToCache() pageCache {
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
if chunkIndex(s.searchAddr) >= s.end {
if chunkIndex(s.searchAddr.addr()) >= s.end {
return pageCache{}
}
c := pageCache{}
ci := chunkIndex(s.searchAddr) // chunk index
ci := chunkIndex(s.searchAddr.addr()) // chunk index
if s.summary[len(s.summary)-1][ci] != 0 {
// Fast path: there's free pages at or near the searchAddr address.
chunk := s.chunkOf(ci)
j, _ := chunk.find(1, chunkPageIndex(s.searchAddr))
j, _ := chunk.find(1, chunkPageIndex(s.searchAddr.addr()))
if j == ^uint(0) {
throw("bad summary data")
}
Expand Down Expand Up @@ -156,6 +156,6 @@ func (s *pageAlloc) allocToCache() pageCache {
// However, s.searchAddr is not allowed to point into unmapped heap memory
// unless it is maxSearchAddr, so make it the last page as opposed to
// the page after.
s.searchAddr = c.base + pageSize*(pageCachePages-1)
s.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
return c
}

0 comments on commit ea5f9b6

Please sign in to comment.