Skip to content

Commit

Permalink
runtime: remove old page allocator
Browse files Browse the repository at this point in the history
This change removes the old page allocator from the runtime.

Updates #35112.

Change-Id: Ib20e1c030f869b6318cd6f4288a9befdbae1b771
Reviewed-on: https://go-review.googlesource.com/c/go/+/195700
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
  • Loading branch information
mknyszek committed Nov 8, 2019
1 parent e6135c2 commit 33dfd35
Show file tree
Hide file tree
Showing 8 changed files with 27 additions and 1,606 deletions.
178 changes: 3 additions & 175 deletions src/runtime/export_test.go
Expand Up @@ -12,8 +12,6 @@ import (
"unsafe"
)

const OldPageAllocator = oldPageAllocator

var Fadd64 = fadd64
var Fsub64 = fsub64
var Fmul64 = fmul64
Expand Down Expand Up @@ -356,15 +354,9 @@ func ReadMemStatsSlow() (base, slow MemStats) {
slow.BySize[i].Frees = bySize[i].Frees
}

if oldPageAllocator {
for i := mheap_.free.start(0, 0); i.valid(); i = i.next() {
slow.HeapReleased += uint64(i.span().released())
}
} else {
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}

// Unused space in the current arena also counts as released space.
Expand Down Expand Up @@ -543,170 +535,6 @@ func MapTombstoneCheck(m map[int]int) {
}
}

// UnscavHugePagesSlow returns the value of mheap_.freeHugePages
// and the number of unscavenged huge pages calculated by
// scanning the heap.
func UnscavHugePagesSlow() (uintptr, uintptr) {
var base, slow uintptr
// Run on the system stack to avoid deadlock from stack growth
// trying to acquire the heap lock.
systemstack(func() {
lock(&mheap_.lock)
base = mheap_.free.unscavHugePages
for _, s := range mheap_.allspans {
if s.state.get() == mSpanFree && !s.scavenged {
slow += s.hugePages()
}
}
unlock(&mheap_.lock)
})
return base, slow
}

// Span is a safe wrapper around an mspan, whose memory
// is managed manually.
type Span struct {
*mspan
}

func AllocSpan(base, npages uintptr, scavenged bool) Span {
var s *mspan
systemstack(func() {
lock(&mheap_.lock)
s = (*mspan)(mheap_.spanalloc.alloc())
unlock(&mheap_.lock)
})
s.init(base, npages)
s.scavenged = scavenged
return Span{s}
}

func (s *Span) Free() {
systemstack(func() {
lock(&mheap_.lock)
mheap_.spanalloc.free(unsafe.Pointer(s.mspan))
unlock(&mheap_.lock)
})
s.mspan = nil
}

func (s Span) Base() uintptr {
return s.mspan.base()
}

func (s Span) Pages() uintptr {
return s.mspan.npages
}

type TreapIterType treapIterType

const (
TreapIterScav TreapIterType = TreapIterType(treapIterScav)
TreapIterHuge = TreapIterType(treapIterHuge)
TreapIterBits = treapIterBits
)

type TreapIterFilter treapIterFilter

func TreapFilter(mask, match TreapIterType) TreapIterFilter {
return TreapIterFilter(treapFilter(treapIterType(mask), treapIterType(match)))
}

func (s Span) MatchesIter(mask, match TreapIterType) bool {
return treapFilter(treapIterType(mask), treapIterType(match)).matches(s.treapFilter())
}

type TreapIter struct {
treapIter
}

func (t TreapIter) Span() Span {
return Span{t.span()}
}

func (t TreapIter) Valid() bool {
return t.valid()
}

func (t TreapIter) Next() TreapIter {
return TreapIter{t.next()}
}

func (t TreapIter) Prev() TreapIter {
return TreapIter{t.prev()}
}

// Treap is a safe wrapper around mTreap for testing.
//
// It must never be heap-allocated because mTreap is
// notinheap.
//
//go:notinheap
type Treap struct {
mTreap
}

func (t *Treap) Start(mask, match TreapIterType) TreapIter {
return TreapIter{t.start(treapIterType(mask), treapIterType(match))}
}

func (t *Treap) End(mask, match TreapIterType) TreapIter {
return TreapIter{t.end(treapIterType(mask), treapIterType(match))}
}

func (t *Treap) Insert(s Span) {
// mTreap uses a fixalloc in mheap_ for treapNode
// allocation which requires the mheap_ lock to manipulate.
// Locking here is safe because the treap itself never allocs
// or otherwise ends up grabbing this lock.
systemstack(func() {
lock(&mheap_.lock)
t.insert(s.mspan)
unlock(&mheap_.lock)
})
t.CheckInvariants()
}

func (t *Treap) Find(npages uintptr) TreapIter {
return TreapIter{t.find(npages)}
}

func (t *Treap) Erase(i TreapIter) {
// mTreap uses a fixalloc in mheap_ for treapNode
// freeing which requires the mheap_ lock to manipulate.
// Locking here is safe because the treap itself never allocs
// or otherwise ends up grabbing this lock.
systemstack(func() {
lock(&mheap_.lock)
t.erase(i.treapIter)
unlock(&mheap_.lock)
})
t.CheckInvariants()
}

func (t *Treap) RemoveSpan(s Span) {
// See Erase about locking.
systemstack(func() {
lock(&mheap_.lock)
t.removeSpan(s.mspan)
unlock(&mheap_.lock)
})
t.CheckInvariants()
}

func (t *Treap) Size() int {
i := 0
t.mTreap.treap.walkTreap(func(t *treapNode) {
i++
})
return i
}

func (t *Treap) CheckInvariants() {
t.mTreap.treap.walkTreap(checkTreapNode)
t.mTreap.treap.validateInvariants()
}

func RunGetgThreadSwitchTest() {
// Test that getg works correctly with thread switch.
// With gccgo, if we generate getg inlined, the backend
Expand Down
23 changes: 0 additions & 23 deletions src/runtime/gc_test.go
Expand Up @@ -464,29 +464,6 @@ func TestReadMemStats(t *testing.T) {
}
}

func TestUnscavHugePages(t *testing.T) {
if !runtime.OldPageAllocator {
// This test is only relevant for the old page allocator.
return
}
// Allocate 20 MiB and immediately free it a few times to increase
// the chance that unscavHugePages isn't zero and that some kind of
// accounting had to happen in the runtime.
for j := 0; j < 3; j++ {
var large [][]byte
for i := 0; i < 5; i++ {
large = append(large, make([]byte, runtime.PhysHugePageSize))
}
runtime.KeepAlive(large)
runtime.GC()
}
base, slow := runtime.UnscavHugePagesSlow()
if base != slow {
logDiff(t, "unscavHugePages", reflect.ValueOf(base), reflect.ValueOf(slow))
t.Fatal("unscavHugePages mismatch")
}
}

func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
typ := got.Type()
switch typ.Kind() {
Expand Down
3 changes: 0 additions & 3 deletions src/runtime/malloc.go
Expand Up @@ -317,9 +317,6 @@ const (
//
// This should agree with minZeroPage in the compiler.
minLegalPointer uintptr = 4096

// Whether to use the old page allocator or not.
oldPageAllocator = false
)

// physPageSize is the size in bytes of the OS's physical pages.
Expand Down
4 changes: 0 additions & 4 deletions src/runtime/malloc_test.go
Expand Up @@ -177,10 +177,6 @@ func TestPhysicalMemoryUtilization(t *testing.T) {
}

func TestScavengedBitsCleared(t *testing.T) {
if OldPageAllocator {
// This test is only relevant for the new page allocator.
return
}
var mismatches [128]BitsMismatch
if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
t.Errorf("uncleared scavenged bits")
Expand Down

0 comments on commit 33dfd35

Please sign in to comment.