Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Revert "Revert "Upgrade to dlmalloc 2.8.5.""

This reverts commit 729eebb.
  • Loading branch information...
commit 808a7c0e7e39b7ca3c7db1366e6e4089166052bb 1 parent a78e89c
Ian Rogers authored
View
1  vm/Dvm.mk
@@ -199,6 +199,7 @@ ifeq ($(WITH_COPYING_GC),true)
alloc/Copying.cpp.arm
else
LOCAL_SRC_FILES += \
+ alloc/DlMalloc.cpp \
alloc/HeapSource.cpp \
alloc/MarkSweep.cpp.arm
endif
View
5 vm/alloc/Copying.cpp
@@ -731,9 +731,8 @@ void dvmHeapSourceGrowForUtilization()
/* do nothing */
}
-void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
- const void *userptr, size_t userlen,
- void *arg),
+void dvmHeapSourceWalk(void(*callback)(void* start, void* end,
+ size_t used_bytes, void* arg),
void *arg)
{
assert(!"implemented");
View
200 vm/alloc/DdmHeap.cpp
@@ -23,6 +23,7 @@
#include "alloc/Heap.h"
#include "alloc/HeapInternal.h"
#include "alloc/DdmHeap.h"
+#include "alloc/DlMalloc.h"
#include "alloc/HeapSource.h"
#define DEFAULT_HEAP_ID 1
@@ -173,6 +174,7 @@ enum HpsgKind {
((u1)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
struct HeapChunkContext {
+ void* startOfNextMemoryChunk;
u1 *buf;
u1 *p;
u1 *pieceLenField;
@@ -205,36 +207,25 @@ static void flush_hpsg_chunk(HeapChunkContext *ctx)
ctx->pieceLenField = NULL;
}
-static void heap_chunk_callback(const void *chunkptr, size_t chunklen,
- const void *userptr, size_t userlen, void *arg)
-{
- HeapChunkContext *ctx = (HeapChunkContext *)arg;
- u1 state;
-
- UNUSED_PARAMETER(userlen);
-
- assert((chunklen & (ALLOCATION_UNIT_SIZE-1)) == 0);
-
+static void append_chunk(HeapChunkContext *ctx, u1 state, void* ptr, size_t length) {
/* Make sure there's enough room left in the buffer.
* We need to use two bytes for every fractional 256
- * allocation units used by the chunk.
+ * allocation units used by the chunk and 17 bytes for
+ * any header.
*/
{
- size_t needed = (((chunklen/ALLOCATION_UNIT_SIZE + 255) / 256) * 2);
+ size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
size_t bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
if (bytesLeft < needed) {
flush_hpsg_chunk(ctx);
}
-
bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
if (bytesLeft < needed) {
- ALOGW("chunk is too big to transmit (chunklen=%zd, %zd bytes)",
- chunklen, needed);
+ ALOGW("chunk is too big to transmit (length=%zd, %zd bytes)",
+ length, needed);
return;
}
}
-
-//TODO: notice when there's a gap and start a new heap, or at least a new range.
if (ctx->needHeader) {
/*
* Start a new HPSx chunk.
@@ -247,7 +238,7 @@ static void heap_chunk_callback(const void *chunkptr, size_t chunklen,
*ctx->p++ = 8;
/* [u4]: virtual address of segment start */
- set4BE(ctx->p, (uintptr_t)chunkptr); ctx->p += 4;
+ set4BE(ctx->p, (uintptr_t)ptr); ctx->p += 4;
/* [u4]: offset of this piece (relative to the virtual address) */
set4BE(ctx->p, 0); ctx->p += 4;
@@ -261,80 +252,123 @@ static void heap_chunk_callback(const void *chunkptr, size_t chunklen,
ctx->needHeader = false;
}
+ /* Write out the chunk description.
+ */
+ length /= ALLOCATION_UNIT_SIZE; // convert to allocation units
+ ctx->totalAllocationUnits += length;
+ while (length > 256) {
+ *ctx->p++ = state | HPSG_PARTIAL;
+ *ctx->p++ = 255; // length - 1
+ length -= 256;
+ }
+ *ctx->p++ = state;
+ *ctx->p++ = length - 1;
+}
+
+/*
+ * Called by dlmalloc_inspect_all. If used_bytes != 0 then start is
+ * the start of a malloc-ed piece of memory of size used_bytes. If
+ * start is 0 then start is the beginning of any free space not
+ * including dlmalloc's book keeping and end the start of the next
+ * dlmalloc chunk. Regions purely containing book keeping don't
+ * callback.
+ */
+static void heap_chunk_callback(void* start, void* end, size_t used_bytes,
+ void* arg)
+{
+ u1 state;
+ HeapChunkContext *ctx = (HeapChunkContext *)arg;
+ UNUSED_PARAMETER(end);
- /* Determine the type of this chunk.
+ if (used_bytes == 0) {
+ if (start == NULL) {
+ // Reset for start of new heap.
+ ctx->startOfNextMemoryChunk = NULL;
+ flush_hpsg_chunk(ctx);
+ }
+ // Only process in use memory so that free region information
+ // also includes dlmalloc book keeping.
+ return;
+ }
+
+ /* If we're looking at the native heap, we'll just return
+ * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
*/
- if (userptr == NULL) {
- /* It's a free chunk.
- */
- state = HPSG_STATE(SOLIDITY_FREE, 0);
- } else {
- const Object *obj = (const Object *)userptr;
- /* If we're looking at the native heap, we'll just return
- * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
- */
- bool native = ctx->type == CHUNK_TYPE("NHSG");
+ bool native = ctx->type == CHUNK_TYPE("NHSG");
+
+ if (ctx->startOfNextMemoryChunk != NULL) {
+ // Transmit any pending free memory. Native free memory of
+ // over kMaxFreeLen could be because of the use of mmaps, so
+ // don't report. If not free memory then start a new segment.
+ bool flush = true;
+ if (start > ctx->startOfNextMemoryChunk) {
+ const size_t kMaxFreeLen = 2 * SYSTEM_PAGE_SIZE;
+ void* freeStart = ctx->startOfNextMemoryChunk;
+ void* freeEnd = start;
+ size_t freeLen = (char*)freeEnd - (char*)freeStart;
+ if (!native || freeLen < kMaxFreeLen) {
+ append_chunk(ctx, HPSG_STATE(SOLIDITY_FREE, 0),
+ freeStart, freeLen);
+ flush = false;
+ }
+ }
+ if (flush) {
+ ctx->startOfNextMemoryChunk = NULL;
+ flush_hpsg_chunk(ctx);
+ }
+ }
+ const Object *obj = (const Object *)start;
- /* It's an allocated chunk. Figure out what it is.
- */
+ /* It's an allocated chunk. Figure out what it is.
+ */
//TODO: if ctx.merge, see if this chunk is different from the last chunk.
// If it's the same, we should combine them.
- if (!native && dvmIsValidObject(obj)) {
- ClassObject *clazz = obj->clazz;
- if (clazz == NULL) {
- /* The object was probably just created
- * but hasn't been initialized yet.
- */
- state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
- } else if (dvmIsTheClassClass(clazz)) {
- state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
- } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
- if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
+ if (!native && dvmIsValidObject(obj)) {
+ ClassObject *clazz = obj->clazz;
+ if (clazz == NULL) {
+ /* The object was probably just created
+ * but hasn't been initialized yet.
+ */
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
+ } else if (dvmIsTheClassClass(clazz)) {
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
+ } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
+ if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
+ } else {
+ switch (clazz->elementClass->primitiveType) {
+ case PRIM_BOOLEAN:
+ case PRIM_BYTE:
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
+ break;
+ case PRIM_CHAR:
+ case PRIM_SHORT:
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
+ break;
+ case PRIM_INT:
+ case PRIM_FLOAT:
state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
- } else {
- switch (clazz->elementClass->primitiveType) {
- case PRIM_BOOLEAN:
- case PRIM_BYTE:
- state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
- break;
- case PRIM_CHAR:
- case PRIM_SHORT:
- state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
- break;
- case PRIM_INT:
- case PRIM_FLOAT:
- state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
- break;
- case PRIM_DOUBLE:
- case PRIM_LONG:
- state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
- break;
- default:
- assert(!"Unknown GC heap object type");
- state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
- break;
- }
+ break;
+ case PRIM_DOUBLE:
+ case PRIM_LONG:
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
+ break;
+ default:
+ assert(!"Unknown GC heap object type");
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
+ break;
}
- } else {
- state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
}
} else {
- obj = NULL; // it's not actually an object
- state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
}
+ } else {
+ obj = NULL; // it's not actually an object
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
-
- /* Write out the chunk description.
- */
- chunklen /= ALLOCATION_UNIT_SIZE; // convert to allocation units
- ctx->totalAllocationUnits += chunklen;
- while (chunklen > 256) {
- *ctx->p++ = state | HPSG_PARTIAL;
- *ctx->p++ = 255; // length - 1
- chunklen -= 256;
- }
- *ctx->p++ = state;
- *ctx->p++ = chunklen - 1;
+ append_chunk(ctx, state, start, used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD);
+ ctx->startOfNextMemoryChunk =
+ (char*)start + used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD;
}
enum HpsgWhen {
@@ -353,8 +387,6 @@ enum HpsgWhat {
*/
#define HPSx_CHUNK_SIZE (16384 - 16)
-extern "C" void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*),void*);
-
static void walkHeap(bool merge, bool native)
{
HeapChunkContext ctx;
@@ -380,7 +412,7 @@ static void walkHeap(bool merge, bool native)
ctx.p = ctx.buf;
ctx.needHeader = true;
if (native) {
- dlmalloc_walk_heap(heap_chunk_callback, (void *)&ctx);
+ dlmalloc_inspect_all(heap_chunk_callback, (void*)&ctx);
} else {
dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx);
}
View
51 vm/alloc/DlMalloc.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DlMalloc.h"
+
+#include <stdint.h>
+#include "Common.h"
+
+/* Dalvik specific morecore implementation defined in HeapSource.cpp. */
+#define MORECORE(x) dvmHeapSourceMorecore(m, x)
+extern void* dvmHeapSourceMorecore(void* mspace, intptr_t increment);
+
+/* Custom heap error handling. */
+#define PROCEED_ON_ERROR 0
+static void heap_error(const char* msg, const char* function, void* p);
+#define CORRUPTION_ERROR_ACTION(m) \
+ heap_error("HEAP MEMORY CORRUPTION", __FUNCTION__, NULL)
+#define USAGE_ERROR_ACTION(m,p) \
+ heap_error("ARGUMENT IS INVALID HEAP ADDRESS", __FUNCTION__, p)
+
+/*
+ * Ugly inclusion of C file so that Dalvik specific #defines configure
+ * dlmalloc for our use for mspaces (regular dlmalloc is still declared
+ * in bionic).
+ */
+#pragma GCC diagnostic ignored "-Wempty-body"
+#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#include "../../../bionic/libc/upstream-dlmalloc/malloc.c"
+#pragma GCC diagnostic warning "-Wstrict-aliasing"
+#pragma GCC diagnostic warning "-Wempty-body"
+
+
+static void heap_error(const char* msg, const char* function, void* p) {
+ ALOG(LOG_FATAL, LOG_TAG, "@@@ ABORTING: DALVIK: %s IN %s addr=%p", msg,
+ function, p);
+ /* So that we can get a memory dump around p */
+ *((int **) 0xdeadbaad) = (int *) p;
+}
View
42 vm/alloc/DlMalloc.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DALVIK_VM_ALLOC_DLMALLOC_H_
+#define DALVIK_VM_ALLOC_DLMALLOC_H_
+
+/* Configure dlmalloc for mspaces. */
+#define HAVE_MMAP 0
+#define HAVE_MREMAP 0
+#define HAVE_MORECORE 1
+#define MSPACES 1
+#define NO_MALLINFO 1
+#define ONLY_MSPACES 1
+#define USE_DL_PREFIX 1
+#define MALLOC_INSPECT_ALL 1
+
+/* Include the proper definitions. */
+#include "../../../bionic/libc/upstream-dlmalloc/malloc.h"
+
+/*
+ * Define dlmalloc routines from bionic that cannot be included
+ * directly because of redefining symbols from the include above.
+ */
+extern "C" void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
+ void* arg);
+extern "C" int dlmalloc_trim(size_t);
+extern "C" void* dlmem2chunk(void* mem);
+
+#endif // DALVIK_VM_ALLOC_DLMALLOC_H_
View
12 vm/alloc/Heap.cpp
@@ -179,17 +179,6 @@ static void *tryMalloc(size_t size)
{
void *ptr;
- /* Don't try too hard if there's no way the allocation is
- * going to succeed. We have to collect SoftReferences before
- * throwing an OOME, though.
- */
- if (size >= gDvm.heapGrowthLimit) {
- ALOGW("%zd byte allocation exceeds the %zd byte maximum heap size",
- size, gDvm.heapGrowthLimit);
- ptr = NULL;
- goto collect_soft_refs;
- }
-
//TODO: figure out better heuristics
// There will be a lot of churn if someone allocates a bunch of
// big objects in a row, and we hit the frag case each time.
@@ -251,7 +240,6 @@ static void *tryMalloc(size_t size)
* been collected and cleared before throwing an OOME.
*/
//TODO: wait for the finalizers from the previous GC to finish
-collect_soft_refs:
LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation",
size);
gcForMalloc(true);
View
202 vm/alloc/HeapSource.cpp
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include <cutils/mspace.h>
#include <stdint.h>
#include <sys/mman.h>
#include <errno.h>
@@ -22,16 +21,13 @@
#define SIZE_MAX UINT_MAX // TODO: get SIZE_MAX from stdint.h
#include "Dalvik.h"
+#include "alloc/DlMalloc.h"
#include "alloc/Heap.h"
#include "alloc/HeapInternal.h"
#include "alloc/HeapSource.h"
#include "alloc/HeapBitmap.h"
#include "alloc/HeapBitmapInlines.h"
-// TODO: find a real header file for these.
-extern "C" int dlmalloc_trim(size_t);
-extern "C" void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
-
static void snapIdealFootprint();
static void setIdealFootprint(size_t max);
static size_t getMaximumSize(const HeapSource *hs);
@@ -97,6 +93,12 @@ struct Heap {
* The highest address of this heap, exclusive.
*/
char *limit;
+
+ /*
+ * If the heap has an mspace, the current high water mark in
+ * allocations requested via dvmHeapSourceMorecore.
+ */
+ char *brk;
};
struct HeapSource {
@@ -201,7 +203,7 @@ static size_t getAllocLimit(const HeapSource *hs)
if (isSoftLimited(hs)) {
return hs->softLimit;
} else {
- return mspace_max_allowed_footprint(hs2heap(hs)->msp);
+ return mspace_footprint_limit(hs2heap(hs)->msp);
}
}
@@ -258,7 +260,7 @@ static void countAllocation(Heap *heap, const void *ptr)
{
assert(heap->bytesAllocated < mspace_footprint(heap->msp));
- heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
+ heap->bytesAllocated += mspace_usable_size(ptr) +
HEAP_SOURCE_CHUNK_OVERHEAD;
heap->objectsAllocated++;
HeapSource* hs = gDvm.gcHeap->heapSource;
@@ -269,7 +271,7 @@ static void countAllocation(Heap *heap, const void *ptr)
static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
{
- size_t delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
+ size_t delta = mspace_usable_size(ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
assert(delta > 0);
if (delta < heap->bytesAllocated) {
heap->bytesAllocated -= delta;
@@ -286,38 +288,65 @@ static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
static HeapSource *gHs = NULL;
-static mspace createMspace(void *base, size_t startSize, size_t maximumSize)
+static mspace createMspace(void* begin, size_t morecoreStart, size_t startingSize)
{
- /* Create an unlocked dlmalloc mspace to use as
- * a heap source.
- *
- * We start off reserving startSize / 2 bytes but
- * letting the heap grow to startSize. This saves
- * memory in the case where a process uses even less
- * than the starting size.
- */
- LOGV_HEAP("Creating VM heap of size %zu", startSize);
+ // Clear errno to allow strerror on error.
errno = 0;
-
- mspace msp = create_contiguous_mspace_with_base(startSize/2,
- maximumSize, /*locked=*/false, base);
+ // Allow access to inital pages that will hold mspace.
+ mprotect(begin, morecoreStart, PROT_READ | PROT_WRITE);
+ // Create mspace using our backing storage starting at begin and with a footprint of
+ // morecoreStart. Don't use an internal dlmalloc lock. When morecoreStart bytes of memory are
+ // exhausted morecore will be called.
+ mspace msp = create_mspace_with_base(begin, morecoreStart, false /*locked*/);
if (msp != NULL) {
- /* Don't let the heap grow past the starting size without
- * our intervention.
- */
- mspace_set_max_allowed_footprint(msp, startSize);
+ // Do not allow morecore requests to succeed beyond the starting size of the heap.
+ mspace_set_footprint_limit(msp, startingSize);
} else {
- /* There's no guarantee that errno has meaning when the call
- * fails, but it often does.
- */
- LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s",
- startSize/2, maximumSize, strerror(errno));
+ ALOGE("create_mspace_with_base failed %s", strerror(errno));
}
-
return msp;
}
/*
+ * Service request from DlMalloc to increase heap size.
+ */
+void* dvmHeapSourceMorecore(void* mspace, intptr_t increment)
+{
+ Heap* heap = NULL;
+ for (size_t i = 0; i < gHs->numHeaps; i++) {
+ if (gHs->heaps[i].msp == mspace) {
+ heap = &gHs->heaps[i];
+ break;
+ }
+ }
+ if (heap == NULL) {
+ ALOGE("Failed to find heap for mspace %p", mspace);
+ dvmAbort();
+ }
+ char* original_brk = heap->brk;
+ if (increment != 0) {
+ char* new_brk = original_brk + increment;
+ if (increment > 0) {
+ // Should never be asked to increase the allocation beyond the capacity of the space.
+ // Enforced by mspace_set_footprint_limit.
+ assert(new_brk <= heap->limit);
+ mprotect(original_brk, increment, PROT_READ | PROT_WRITE);
+ } else {
+ // Should never be asked for negative footprint (ie before base).
+ assert(original_brk + increment > heap->base);
+ // Advise we don't need the pages and protect them.
+ size_t size = -increment;
+ madvise(new_brk, size, MADV_DONTNEED);
+ mprotect(new_brk, size, PROT_NONE);
+ }
+ // Update brk.
+ heap->brk = new_brk;
+ }
+ return original_brk;
+}
+
+const size_t kInitialMorecoreStart = SYSTEM_PAGE_SIZE;
+/*
* Add the initial heap. Returns false if the initial heap was
* already added to the heap source.
*/
@@ -332,7 +361,8 @@ static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
hs->heaps[0].maximumSize = maximumSize;
hs->heaps[0].concurrentStartBytes = SIZE_MAX;
hs->heaps[0].base = hs->heapBase;
- hs->heaps[0].limit = hs->heapBase + hs->heaps[0].maximumSize;
+ hs->heaps[0].limit = hs->heapBase + maximumSize;
+ hs->heaps[0].brk = hs->heapBase + kInitialMorecoreStart;
hs->numHeaps = 1;
return true;
}
@@ -359,8 +389,7 @@ static bool addNewHeap(HeapSource *hs)
* Heap storage comes from a common virtual memory reservation.
* The new heap will start on the page after the old heap.
*/
- void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
- char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
+ char *base = hs->heaps[0].brk;
size_t overhead = base - hs->heaps[0].base;
assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
@@ -370,12 +399,13 @@ static bool addNewHeap(HeapSource *hs)
overhead, hs->maximumSize);
return false;
}
-
+ size_t morecoreStart = SYSTEM_PAGE_SIZE;
heap.maximumSize = hs->growthLimit - overhead;
heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
heap.base = base;
heap.limit = heap.base + heap.maximumSize;
- heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
+ heap.brk = heap.base + morecoreStart;
+ heap.msp = createMspace(base, morecoreStart, HEAP_MIN_FREE);
if (heap.msp == NULL) {
return false;
}
@@ -384,8 +414,7 @@ static bool addNewHeap(HeapSource *hs)
*/
hs->heaps[0].maximumSize = overhead;
hs->heaps[0].limit = base;
- mspace msp = hs->heaps[0].msp;
- mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
+ mspace_set_footprint_limit(hs->heaps[0].msp, overhead);
/* Put the new heap in the list, at heaps[0].
* Shift existing heaps down.
@@ -530,7 +559,7 @@ GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
/* Create an unlocked dlmalloc mspace to use as
* a heap source.
*/
- msp = createMspace(base, startSize, maximumSize);
+ msp = createMspace(base, kInitialMorecoreStart, startSize);
if (msp == NULL) {
goto fail;
}
@@ -680,7 +709,7 @@ size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
value = mspace_footprint(heap->msp);
break;
case HS_ALLOWED_FOOTPRINT:
- value = mspace_max_allowed_footprint(heap->msp);
+ value = mspace_footprint_limit(heap->msp);
break;
case HS_BYTES_ALLOCATED:
value = heap->bytesAllocated;
@@ -837,14 +866,14 @@ static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
*/
size_t max = heap->maximumSize;
- mspace_set_max_allowed_footprint(heap->msp, max);
+ mspace_set_footprint_limit(heap->msp, max);
void* ptr = dvmHeapSourceAlloc(n);
/* Shrink back down as small as possible. Our caller may
* readjust max_allowed to a more appropriate value.
*/
- mspace_set_max_allowed_footprint(heap->msp,
- mspace_footprint(heap->msp));
+ mspace_set_footprint_limit(heap->msp,
+ mspace_footprint(heap->msp));
return ptr;
}
@@ -923,41 +952,14 @@ size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
// mspace_free, but on the other heaps we only do some
// accounting.
if (heap == gHs->heaps) {
- // mspace_merge_objects takes two allocated objects, and
- // if the second immediately follows the first, will merge
- // them, returning a larger object occupying the same
- // memory. This is a local operation, and doesn't require
- // dlmalloc to manipulate any freelists. It's pretty
- // inexpensive compared to free().
-
- // ptrs is an array of objects all in memory order, and if
- // client code has been allocating lots of short-lived
- // objects, this is likely to contain runs of objects all
- // now garbage, and thus highly amenable to this optimization.
-
- // Unroll the 0th iteration around the loop below,
- // countFree ptrs[0] and initializing merged.
- assert(ptrs[0] != NULL);
- assert(ptr2heap(gHs, ptrs[0]) == heap);
- countFree(heap, ptrs[0], &numBytes);
- void *merged = ptrs[0];
- for (size_t i = 1; i < numPtrs; i++) {
- assert(merged != NULL);
+ // Count freed objects.
+ for (size_t i = 0; i < numPtrs; i++) {
assert(ptrs[i] != NULL);
- assert((intptr_t)merged < (intptr_t)ptrs[i]);
assert(ptr2heap(gHs, ptrs[i]) == heap);
countFree(heap, ptrs[i], &numBytes);
- // Try to merge. If it works, merged now includes the
- // memory of ptrs[i]. If it doesn't, free merged, and
- // see if ptrs[i] starts a new run of adjacent
- // objects to merge.
- if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
- mspace_free(msp, merged);
- merged = ptrs[i];
- }
}
- assert(merged != NULL);
- mspace_free(msp, merged);
+ // Bulk free ptrs.
+ mspace_bulk_free(msp, ptrs, numPtrs);
} else {
// This is not an 'active heap'. Only do the accounting.
for (size_t i = 0; i < numPtrs; i++) {
@@ -1024,7 +1026,7 @@ size_t dvmHeapSourceChunkSize(const void *ptr)
Heap* heap = ptr2heap(gHs, ptr);
if (heap != NULL) {
- return mspace_usable_size(heap->msp, ptr);
+ return mspace_usable_size(ptr);
}
return 0;
}
@@ -1122,13 +1124,13 @@ static void setSoftLimit(HeapSource *hs, size_t softLimit)
if (softLimit < currentHeapSize) {
/* Don't let the heap grow any more, and impose a soft limit.
*/
- mspace_set_max_allowed_footprint(msp, currentHeapSize);
+ mspace_set_footprint_limit(msp, currentHeapSize);
hs->softLimit = softLimit;
} else {
/* Let the heap grow to the requested max, and remove any
* soft limit, if set.
*/
- mspace_set_max_allowed_footprint(msp, softLimit);
+ mspace_set_footprint_limit(msp, softLimit);
hs->softLimit = SIZE_MAX;
}
}
@@ -1284,17 +1286,22 @@ void dvmHeapSourceGrowForUtilization()
* Return free pages to the system.
* TODO: move this somewhere else, especially the native heap part.
*/
-static void releasePagesInRange(void *start, void *end, void *nbytes)
+static void releasePagesInRange(void* start, void* end, size_t used_bytes,
+ void* releasedBytes)
{
- /* Linux requires that the madvise() start address is page-aligned.
- * We also align the end address.
- */
- start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
- end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
- if (start < end) {
- size_t length = (char *)end - (char *)start;
- madvise(start, length, MADV_DONTNEED);
- *(size_t *)nbytes += length;
+ if (used_bytes == 0) {
+ /*
+ * We have a range of memory we can try to madvise()
+ * back. Linux requires that the madvise() start address is
+ * page-aligned. We also align the end address.
+ */
+ start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
+ end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
+ if (end > start) {
+ size_t length = (char *)end - (char *)start;
+ madvise(start, length, MADV_DONTNEED);
+ *(size_t *)releasedBytes += length;
+ }
}
}
@@ -1310,20 +1317,17 @@ static void trimHeaps()
for (size_t i = 0; i < hs->numHeaps; i++) {
Heap *heap = &hs->heaps[i];
- /* Return the wilderness chunk to the system.
- */
+ /* Return the wilderness chunk to the system. */
mspace_trim(heap->msp, 0);
- /* Return any whole free pages to the system.
- */
- mspace_walk_free_pages(heap->msp, releasePagesInRange, &heapBytes);
+ /* Return any whole free pages to the system. */
+ mspace_inspect_all(heap->msp, releasePagesInRange, &heapBytes);
}
- /* Same for the native heap.
- */
+ /* Same for the native heap. */
dlmalloc_trim(0);
size_t nativeBytes = 0;
- dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
+ dlmalloc_inspect_all(releasePagesInRange, &nativeBytes);
LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
heapBytes, nativeBytes, heapBytes + nativeBytes);
@@ -1333,9 +1337,8 @@ static void trimHeaps()
* Walks over the heap source and passes every allocated and
* free chunk to the callback.
*/
-void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
- const void *userptr, size_t userlen,
- void *arg),
+void dvmHeapSourceWalk(void(*callback)(void* start, void* end,
+ size_t used_bytes, void* arg),
void *arg)
{
HS_BOILERPLATE();
@@ -1345,7 +1348,8 @@ void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
//TODO: do this in address order
HeapSource *hs = gHs;
for (size_t i = hs->numHeaps; i > 0; --i) {
- mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
+ mspace_inspect_all(hs->heaps[i-1].msp, callback, arg);
+ callback(NULL, NULL, 0, arg); // Indicate end of a heap.
}
}
View
6 vm/alloc/HeapSource.h
@@ -22,7 +22,6 @@
/* dlmalloc uses one size_t per allocated chunk.
*/
#define HEAP_SOURCE_CHUNK_OVERHEAD (1 * sizeof (size_t))
-#define HEAP_SOURCE_WORST_CHUNK_OVERHEAD (32 * sizeof (size_t))
/* The largest number of separate heaps we can handle.
*/
@@ -157,9 +156,8 @@ void dvmHeapSourceGrowForUtilization(void);
* Walks over the heap source and passes every allocated and
* free chunk to the callback.
*/
-void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
- const void *userptr, size_t userlen,
- void *arg),
+void dvmHeapSourceWalk(void(*callback)(void* start, void* end,
+ size_t used_bytes, void* arg),
void *arg);
/*
* Gets the number of heaps available in the heap source.
Please sign in to comment.
Something went wrong with that request. Please try again.