Skip to content

Commit

Permalink
mm: don't try to unmap more than needed
Browse files Browse the repository at this point in the history
maskAR can contain unrelated memory regions and it is unclear why we may want
to unmap them.

pendingFileDecRefs has been borrowed from cl/408102827.

PiperOrigin-RevId: 627867055
  • Loading branch information
avagin authored and gvisor-bot committed Apr 24, 2024
1 parent 0e2f70d commit 9c7a659
Show file tree
Hide file tree
Showing 2 changed files with 86 additions and 15 deletions.
3 changes: 3 additions & 0 deletions pkg/sentry/mm/address_space.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,9 @@ func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar hostarch.AddrRange, pl
//
// Preconditions: mm.activeMu must be locked.
func (mm *MemoryManager) unmapASLocked(ar hostarch.AddrRange) {
if ar.Length() == 0 {
return
}
if mm.as == nil {
// No AddressSpace? Force all mappings to be unmapped on the next
// Activate.
Expand Down
98 changes: 83 additions & 15 deletions pkg/sentry/mm/pma.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package mm

import (
"fmt"
"sync"
"sync/atomic"

"gvisor.dev/gvisor/pkg/context"
Expand Down Expand Up @@ -204,6 +205,14 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
panic(fmt.Sprintf("initial vma %v does not cover start of ar %v", vseg.Range(), ar))
}
}
var pfdrs *pendingFileDecRefs
defer func() { // must be a closure to avoid evaluating pfdrs immediately
pfdrs.Cleanup()
}()
var unmapAR hostarch.AddrRange
defer func() {
mm.unmapASLocked(unmapAR)
}()

memCgID := pgalloc.MemoryCgroupIDFromContext(ctx)
opts := pgalloc.AllocOpts{Kind: usage.Anonymous, Dir: pgalloc.BottomUp, MemCgID: memCgID}
Expand All @@ -217,7 +226,6 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter

// Limit the range we allocate to ar, aligned to privateAllocUnit.
maskAR := privateAligned(ar)
didUnmapAS := false
// The range in which we iterate vmas and pmas is still limited to ar, to
// ensure that we don't allocate or COW-break a pma we don't need.
pseg, pgap := mm.pmas.Find(ar.Start)
Expand Down Expand Up @@ -387,13 +395,6 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
if fr.Length() == 0 {
return pstart, pseg.PrevGap(), err
}
// Unmap all of maskAR, not just copyAR, to minimize host
// syscalls. AddressSpace mappings must be removed before
// oldpma.file.DecRef().
if !didUnmapAS {
mm.unmapASLocked(maskAR)
didUnmapAS = true
}
// Replace the pma with a copy in the part of the address
// range where copying was successful. This doesn't change
// RSS.
Expand All @@ -403,7 +404,8 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
pstart = pmaIterator{} // iterators invalidated
}
oldpma = pseg.ValuePtr()
oldpma.file.DecRef(pseg.fileRange())
unmapAR = joinAddrRanges(unmapAR, copyAR)
pfdrs = appendPendingFileDecRef(pfdrs, oldpma.file, pseg.fileRange())
oldpma.file = mm.mf
oldpma.off = fr.Start
oldpma.translatePerms = hostarch.AnyAccess
Expand Down Expand Up @@ -455,18 +457,15 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
}
// Remove the part of the existing pma covered by new
// Translations, then insert new pmas. This doesn't change
// RSS. Note that we don't need to call unmapASLocked: any
// existing AddressSpace mappings are still valid (though
// less permissive than the new pmas indicate) until
// Invalidate is called, and will be replaced by future
// calls to mapASLocked.
// RSS.
if len(ts) == 0 {
return pstart, pseg.PrevGap(), err
}
transMR := memmap.MappableRange{ts[0].Source.Start, ts[len(ts)-1].Source.End}
transAR := vseg.addrRangeOf(transMR)
pseg = mm.pmas.Isolate(pseg, transAR)
pseg.ValuePtr().file.DecRef(pseg.fileRange())
unmapAR = joinAddrRanges(unmapAR, transAR)
pfdrs = appendPendingFileDecRef(pfdrs, pseg.ValuePtr().file, pseg.fileRange())
pgap = mm.pmas.Remove(pseg)
pstart = pmaIterator{} // iterators invalidated
for _, t := range ts {
Expand Down Expand Up @@ -1054,3 +1053,72 @@ func (pseg pmaIterator) fileRangeOf(ar hostarch.AddrRange) memmap.FileRange {
pstart := pseg.Start()
return memmap.FileRange{pma.off + uint64(ar.Start-pstart), pma.off + uint64(ar.End-pstart)}
}

// joinAddrRanges returns the smallest hostarch.AddrRange that is a superset of
// both ar1 and ar2. If either ar1 or ar2 have length 0, joinAddrRanges returns
// the other range. If both ar1 and ar2 have length 0, joinAddrRanges returns
// an unspecified range with length 0.
func joinAddrRanges(ar1, ar2 hostarch.AddrRange) hostarch.AddrRange {
if ar1.Length() == 0 {
return ar2
}
if ar2.Length() == 0 {
return ar1
}
ar := ar1
if ar.Start > ar2.Start {
ar.Start = ar2.Start
}
if ar.End < ar2.End {
ar.End = ar2.End
}
if checkInvariants {
if !ar.IsSupersetOf(ar1) || !ar.IsSupersetOf(ar2) {
panic(fmt.Sprintf("%v is not a superset of both %v and %v", ar, ar1, ar2))
}
}
return ar
}

// pendingFileDecRefs accumulates released memmap.FileRange references so that
// calls to memmap.File.DecRef() can occur without holding locks.
type pendingFileDecRefs struct {
slice []pendingFileDecRef
}

type pendingFileDecRef struct {
file memmap.File
fr memmap.FileRange
}

var pendingFileDecRefsPool = sync.Pool{
New: func() any {
return &pendingFileDecRefs{}
},
}

func appendPendingFileDecRef(pfdrs *pendingFileDecRefs, file memmap.File, fr memmap.FileRange) *pendingFileDecRefs {
if pfdrs == nil {
pfdrs = pendingFileDecRefsPool.Get().(*pendingFileDecRefs)
}
pfdrs.slice = append(pfdrs.slice, pendingFileDecRef{file, fr})
return pfdrs
}

// Cleanup releases all references accumulated by pfdrs and releases ownership
// of pfdrs. pfdrs may be nil.
//
// Preconditions: No AddressSpace ranges may be awaiting unmapping (since such
// ranges may refer to memmap.File pages that will be dropped.)
func (pfdrs *pendingFileDecRefs) Cleanup() {
if pfdrs == nil {
return
}
for i := range pfdrs.slice {
pfdr := &pfdrs.slice[i]
pfdr.file.DecRef(pfdr.fr)
pfdr.file = nil // allow GC
}
pfdrs.slice = pfdrs.slice[:0]
pendingFileDecRefsPool.Put(pfdrs)
}

0 comments on commit 9c7a659

Please sign in to comment.