Permalink
Browse files

Bug 826515 - Occasionally GC DMD stacks. r=njn

  • Loading branch information...
1 parent 3dc05bf commit 1ea44b5eb1bee26beadcd77bcd48bd6b747bc650 @jlebar jlebar committed Jan 8, 2013
Showing with 64 additions and 10 deletions.
  1. +64 −10 memory/replace/dmd/DMD.cpp
View
@@ -770,6 +770,9 @@ typedef js::HashSet<StackTrace*, StackTrace, InfallibleAllocPolicy>
StackTraceTable;
static StackTraceTable* gStackTraceTable = nullptr;
+// We won't GC the stack trace table until it this many elements.
+static uint32_t gGCStackTraceTableWhenSizeExceeds = 4 * 1024;
+
void
StackTrace::Print(const Writer& aWriter, LocationService* aLocService) const
{
@@ -1017,6 +1020,61 @@ class Block
typedef js::HashSet<Block, Block, InfallibleAllocPolicy> BlockTable;
static BlockTable* gBlockTable = nullptr;
+typedef js::HashSet<const StackTrace*, js::DefaultHasher<const StackTrace*>,
+ InfallibleAllocPolicy>
+ StackTraceSet;
+
+// Add a pointer to each live stack trace into the given StackTraceSet. (A
+// stack trace is live if it's used by one of the live blocks.)
+static void
+GatherUsedStackTraces(StackTraceSet& aStackTraces)
+{
+ MOZ_ASSERT(gStateLock->IsLocked());
+ MOZ_ASSERT(Thread::Fetch()->InterceptsAreBlocked());
+
+ aStackTraces.finish();
+ aStackTraces.init(1024);
+
+ for (BlockTable::Range r = gBlockTable->all(); !r.empty(); r.popFront()) {
+ const Block& b = r.front();
+ aStackTraces.put(b.AllocStackTrace());
+ aStackTraces.put(b.ReportStackTrace1());
+ aStackTraces.put(b.ReportStackTrace2());
+ }
+
+ // Any of the stack traces added above may have been null. For the sake of
+ // cleanliness, don't leave the null pointer in the set.
+ aStackTraces.remove(nullptr);
+}
+
+// Delete stack traces that we aren't using, and compact our hashtable.
+static void
+GCStackTraces()
+{
+ MOZ_ASSERT(gStateLock->IsLocked());
+ MOZ_ASSERT(Thread::Fetch()->InterceptsAreBlocked());
+
+ StackTraceSet usedStackTraces;
+ GatherUsedStackTraces(usedStackTraces);
+
+ // Delete all unused stack traces from gStackTraceTable. The Enum destructor
+ // will automatically rehash and compact the table.
+ for (StackTraceTable::Enum e(*gStackTraceTable);
+ !e.empty();
+ e.popFront()) {
+ StackTrace* const& st = e.front();
+
+ if (!usedStackTraces.has(st)) {
+ e.removeFront();
+ InfallibleAllocPolicy::delete_(st);
+ }
+ }
+
+ // Schedule a GC when we have twice as many stack traces as we had right after
+ // this GC finished.
+ gGCStackTraceTableWhenSizeExceeds = 2 * gStackTraceTable->count();
+}
+
//---------------------------------------------------------------------------
// malloc/free callbacks
//---------------------------------------------------------------------------
@@ -1070,6 +1128,10 @@ FreeCallback(void* aPtr, Thread* aT)
AutoBlockIntercepts block(aT);
gBlockTable->remove(aPtr);
+
+ if (gStackTraceTable->count() > gGCStackTraceTableWhenSizeExceeds) {
+ GCStackTraces();
+ }
}
//---------------------------------------------------------------------------
@@ -1886,16 +1948,8 @@ SizeOfInternal(Sizes* aSizes)
return;
}
- js::HashSet<const StackTrace*, js::DefaultHasher<const StackTrace*>,
- InfallibleAllocPolicy> usedStackTraces;
- usedStackTraces.init(1024);
-
- for(BlockTable::Range r = gBlockTable->all(); !r.empty(); r.popFront()) {
- const Block& b = r.front();
- usedStackTraces.put(b.AllocStackTrace());
- usedStackTraces.put(b.ReportStackTrace1());
- usedStackTraces.put(b.ReportStackTrace2());
- }
+ StackTraceSet usedStackTraces;
+ GatherUsedStackTraces(usedStackTraces);
for (StackTraceTable::Range r = gStackTraceTable->all();
!r.empty();

0 comments on commit 1ea44b5

Please sign in to comment.