Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Revert "drop page heap lock when returning memory back to kernel"
This reverts commit be3da70.

There are reports of crashes and false-positive OOMs from this
patch. Crashes under aggressive decommit mode are understood, but I
have yet to get confirmations whether false-positive OOMs were seen
under aggressive decommit or not. Thus lets revert for now.

Updates issue #1227 and issue #1204.
  • Loading branch information
alk committed Dec 20, 2020
1 parent 151cbf5 commit 02d5264
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 64 deletions.
65 changes: 17 additions & 48 deletions src/page_heap.cc
Expand Up @@ -241,22 +241,12 @@ void PageHeap::CommitSpan(Span* span) {
stats_.total_commit_bytes += (span->length << kPageShift);
}

bool PageHeap::TryDecommitWithoutLock(Span* span) {
bool PageHeap::DecommitSpan(Span* span) {
++stats_.decommit_count;
ASSERT(span->location == Span::ON_NORMAL_FREELIST);
span->location = Span::IN_USE;

Static::pageheap_lock()->Unlock();
bool rv = TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
static_cast<size_t>(span->length << kPageShift));
Static::pageheap_lock()->Lock();

// We're not really on any free list at this point, but lets
// indicate if we're returned or not.
span->location = Span::ON_NORMAL_FREELIST;

if (rv) {
span->location = Span::ON_RETURNED_FREELIST;
stats_.committed_bytes -= span->length << kPageShift;
stats_.total_decommit_bytes += (span->length << kPageShift);
}
Expand Down Expand Up @@ -330,19 +320,15 @@ Span* PageHeap::CheckAndHandlePreMerge(Span* span, Span* other) {
if (other == NULL) {
return other;
}

// If we're in aggressive decommit mode and span is decommitted,
// then we try to decommit adjacent span. We also remove from it's
// free list as part of that.
// if we're in aggressive decommit mode and span is decommitted,
// then we try to decommit adjacent span.
if (aggressive_decommit_ && other->location == Span::ON_NORMAL_FREELIST
&& span->location == Span::ON_RETURNED_FREELIST) {
if (ReleaseSpan(other) != 0) {
return other;
bool worked = DecommitSpan(other);
if (!worked) {
return NULL;
}
return NULL;
}

if (other->location != span->location) {
} else if (other->location != span->location) {
return NULL;
}

Expand Down Expand Up @@ -374,7 +360,7 @@ void PageHeap::MergeIntoFreeList(Span* span) {
const Length n = span->length;

if (aggressive_decommit_ && span->location == Span::ON_NORMAL_FREELIST) {
if (TryDecommitWithoutLock(span)) {
if (DecommitSpan(span)) {
span->location = Span::ON_RETURNED_FREELIST;
}
}
Expand Down Expand Up @@ -481,35 +467,18 @@ void PageHeap::IncrementalScavenge(Length n) {
}
}

Length PageHeap::ReleaseSpan(Span* span) {
// We're dropping very important and otherwise contended
// pageheap_lock around call to potentially very slow syscall to
// release pages. Those syscalls can be slow even with "advanced"
// things such as MADV_FREE and its better equivalents, because they
// have to walk actual page tables, and we sometimes deal with large
// spans, which sometimes takes lots of time to walk. Plus Linux
// grabs per-address space mmap_sem lock which could be extremely
// contended at times. So it is best if we avoid holding one
// contended lock while waiting for another.
//
// Note, we set span location to in-use, because our span could be found via
// pagemap in e.g. MergeIntoFreeList while we're not holding the lock. By
// marking it in-use we prevent this possibility. So span is removed from free
// list and marked "unmergable" and that guarantees safety during unlock-ful
// release.
ASSERT(span->location == Span::ON_NORMAL_FREELIST);
RemoveFromFreeList(span);
Length PageHeap::ReleaseSpan(Span* s) {
ASSERT(s->location == Span::ON_NORMAL_FREELIST);

Length n = span->length;
if (TryDecommitWithoutLock(span)) {
span->location = Span::ON_RETURNED_FREELIST;
} else {
n = 0; // Mark that we failed to return.
span->location = Span::ON_NORMAL_FREELIST;
if (DecommitSpan(s)) {
RemoveFromFreeList(s);
const Length n = s->length;
s->location = Span::ON_RETURNED_FREELIST;
MergeIntoFreeList(s); // Coalesces if possible.
return n;
}

MergeIntoFreeList(span); // Coalesces if possible.
return n;
return 0;
}

Length PageHeap::ReleaseAtLeastNPages(Length num_pages) {
Expand Down
8 changes: 4 additions & 4 deletions src/page_heap.h
Expand Up @@ -314,7 +314,7 @@ class PERFTOOLS_DLL_DECL PageHeap {
void CommitSpan(Span* span);

// Decommit the span.
bool TryDecommitWithoutLock(Span* span);
bool DecommitSpan(Span* span);

// Prepends span to appropriate free list, and adjusts stats.
void PrependToFreeList(Span* span);
Expand All @@ -326,12 +326,12 @@ class PERFTOOLS_DLL_DECL PageHeap {
// IncrementalScavenge(n) is called whenever n pages are freed.
void IncrementalScavenge(Length n);

// Attempts to decommit 'span' and move it to the returned freelist.
// Attempts to decommit 's' and move it to the returned freelist.
//
// Returns the length of the Span or zero if release failed.
//
// REQUIRES: 'span' must be on the NORMAL freelist.
Length ReleaseSpan(Span *span);
// REQUIRES: 's' must be on the NORMAL freelist.
Length ReleaseSpan(Span *s);

// Checks if we are allowed to take more memory from the system.
// If limit is reached and allowRelease is true, tries to release
Expand Down
14 changes: 2 additions & 12 deletions src/tests/page_heap_test.cc
Expand Up @@ -11,19 +11,15 @@

#include <memory>

#include "base/logging.h"
#include "base/spinlock.h"
#include "common.h"
#include "page_heap.h"
#include "system-alloc.h"
#include "static_vars.h"
#include "base/logging.h"
#include "common.h"

DECLARE_int64(tcmalloc_heap_limit_mb);

namespace {

using tcmalloc::Static;

// The system will only release memory if the block size is equal or hight than
// system page size.
static bool HaveSystemRelease =
Expand All @@ -48,7 +44,6 @@ static void CheckStats(const tcmalloc::PageHeap* ph,

static void TestPageHeap_Stats() {
std::unique_ptr<tcmalloc::PageHeap> ph(new tcmalloc::PageHeap());
SpinLockHolder h(Static::pageheap_lock());

// Empty page heap
CheckStats(ph.get(), 0, 0, 0);
Expand Down Expand Up @@ -84,8 +79,6 @@ static void AllocateAllPageTables() {
// Make a separate PageHeap from the main test so the test can start without
// any pages in the lists.
std::unique_ptr<tcmalloc::PageHeap> ph(new tcmalloc::PageHeap());
SpinLockHolder h(Static::pageheap_lock());

tcmalloc::Span *spans[kNumberMaxPagesSpans * 2];
for (int i = 0; i < kNumberMaxPagesSpans; ++i) {
spans[i] = ph->New(kMaxPages);
Expand All @@ -107,7 +100,6 @@ static void TestPageHeap_Limit() {
AllocateAllPageTables();

std::unique_ptr<tcmalloc::PageHeap> ph(new tcmalloc::PageHeap());
SpinLockHolder h(Static::pageheap_lock());

CHECK_EQ(kMaxPages, 1 << (20 - kPageShift));

Expand Down Expand Up @@ -196,8 +188,6 @@ static void TestPageHeap_Limit() {
} // namespace

int main(int argc, char **argv) {
Static::InitStaticVars();

TestPageHeap_Stats();
TestPageHeap_Limit();
printf("PASS\n");
Expand Down

0 comments on commit 02d5264

Please sign in to comment.