Permalink
| /*------------------------------------------------------------------------- | |
| * | |
| * aset.c | |
| * Allocation set definitions. | |
| * | |
| * AllocSet is our standard implementation of the abstract MemoryContext | |
| * type. | |
| * | |
| * | |
| * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group | |
| * Portions Copyright (c) 1994, Regents of the University of California | |
| * | |
| * IDENTIFICATION | |
| * src/backend/utils/mmgr/aset.c | |
| * | |
| * NOTE: | |
| * This is a new (Feb. 05, 1999) implementation of the allocation set | |
| * routines. AllocSet...() does not use OrderedSet...() any more. | |
| * Instead it manages allocations in a block pool by itself, combining | |
| * many small allocations in a few bigger blocks. AllocSetFree() normally | |
| * doesn't free() memory really. It just add's the free'd area to some | |
| * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d | |
| * at once on AllocSetReset(), which happens when the memory context gets | |
| * destroyed. | |
| * Jan Wieck | |
| * | |
| * Performance improvement from Tom Lane, 8/99: for extremely large request | |
| * sizes, we do want to be able to give the memory back to free() as soon | |
| * as it is pfree()'d. Otherwise we risk tying up a lot of memory in | |
| * freelist entries that might never be usable. This is specially needed | |
| * when the caller is repeatedly repalloc()'ing a block bigger and bigger; | |
| * the previous instances of the block were guaranteed to be wasted until | |
| * AllocSetReset() under the old way. | |
| * | |
| * Further improvement 12/00: as the code stood, request sizes in the | |
| * midrange between "small" and "large" were handled very inefficiently, | |
| * because any sufficiently large free chunk would be used to satisfy a | |
| * request, even if it was much larger than necessary. This led to more | |
| * and more wasted space in allocated chunks over time. To fix, get rid | |
| * of the midrange behavior: we now handle only "small" power-of-2-size | |
| * chunks as chunks. Anything "large" is passed off to malloc(). Change | |
| * the number of freelists to change the small/large boundary. | |
| * | |
| * | |
| * About CLOBBER_FREED_MEMORY: | |
| * | |
| * If this symbol is defined, all freed memory is overwritten with 0x7F's. | |
| * This is useful for catching places that reference already-freed memory. | |
| * | |
| * About MEMORY_CONTEXT_CHECKING: | |
| * | |
| * Since we usually round request sizes up to the next power of 2, there | |
| * is often some unused space immediately after a requested data area. | |
| * Thus, if someone makes the common error of writing past what they've | |
| * requested, the problem is likely to go unnoticed ... until the day when | |
| * there *isn't* any wasted space, perhaps because of different memory | |
| * alignment on a new platform, or some other effect. To catch this sort | |
| * of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond | |
| * the requested space whenever the request is less than the actual chunk | |
| * size, and verifies that the byte is undamaged when the chunk is freed. | |
| * | |
| * | |
| * About USE_VALGRIND and Valgrind client requests: | |
| * | |
| * Valgrind provides "client request" macros that exchange information with | |
| * the host Valgrind (if any). Under !USE_VALGRIND, memdebug.h stubs out | |
| * currently-used macros. | |
| * | |
| * When running under Valgrind, we want a NOACCESS memory region both before | |
| * and after the allocation. The chunk header is tempting as the preceding | |
| * region, but mcxt.c expects to able to examine the standard chunk header | |
| * fields. Therefore, we use, when available, the requested_size field and | |
| * any subsequent padding. requested_size is made NOACCESS before returning | |
| * a chunk pointer to a caller. However, to reduce client request traffic, | |
| * it is kept DEFINED in chunks on the free list. | |
| * | |
| * The rounded-up capacity of the chunk usually acts as a post-allocation | |
| * NOACCESS region. If the request consumes precisely the entire chunk, | |
| * there is no such region; another chunk header may immediately follow. In | |
| * that case, Valgrind will not detect access beyond the end of the chunk. | |
| * | |
| * See also the cooperating Valgrind client requests in mcxt.c. | |
| * | |
| *------------------------------------------------------------------------- | |
| */ | |
| #include "postgres.h" | |
| #include "utils/memdebug.h" | |
| #include "utils/memutils.h" | |
| /* Define this to detail debug alloc information */ | |
| /* #define HAVE_ALLOCINFO */ | |
| /*-------------------- | |
| * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS), | |
| * for k = 0 .. ALLOCSET_NUM_FREELISTS-1. | |
| * | |
| * Note that all chunks in the freelists have power-of-2 sizes. This | |
| * improves recyclability: we may waste some space, but the wasted space | |
| * should stay pretty constant as requests are made and released. | |
| * | |
| * A request too large for the last freelist is handled by allocating a | |
| * dedicated block from malloc(). The block still has a block header and | |
| * chunk header, but when the chunk is freed we'll return the whole block | |
| * to malloc(), not put it on our freelists. | |
| * | |
| * CAUTION: ALLOC_MINBITS must be large enough so that | |
| * 1<<ALLOC_MINBITS is at least MAXALIGN, | |
| * or we may fail to align the smallest chunks adequately. | |
| * 8-byte alignment is enough on all currently known machines. | |
| * | |
| * With the current parameters, request sizes up to 8K are treated as chunks, | |
| * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS | |
| * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in | |
| * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may | |
| * set the allocChunkLimit to less than 8K, so as to avoid space wastage.) | |
| *-------------------- | |
| */ | |
| #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */ | |
| #define ALLOCSET_NUM_FREELISTS 11 | |
| #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS)) | |
| /* Size of largest chunk that we use a fixed size for */ | |
| #define ALLOC_CHUNK_FRACTION 4 | |
| /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */ | |
| /*-------------------- | |
| * The first block allocated for an allocset has size initBlockSize. | |
| * Each time we have to allocate another block, we double the block size | |
| * (if possible, and without exceeding maxBlockSize), so as to reduce | |
| * the bookkeeping load on malloc(). | |
| * | |
| * Blocks allocated to hold oversize chunks do not follow this rule, however; | |
| * they are just however big they need to be to hold that single chunk. | |
| *-------------------- | |
| */ | |
| #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData)) | |
| #define ALLOC_CHUNKHDRSZ MAXALIGN(sizeof(AllocChunkData)) | |
| /* Portion of ALLOC_CHUNKHDRSZ examined outside aset.c. */ | |
| #define ALLOC_CHUNK_PUBLIC \ | |
| (offsetof(AllocChunkData, size) + sizeof(Size)) | |
| /* Portion of ALLOC_CHUNKHDRSZ excluding trailing padding. */ | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| #define ALLOC_CHUNK_USED \ | |
| (offsetof(AllocChunkData, requested_size) + sizeof(Size)) | |
| #else | |
| #define ALLOC_CHUNK_USED \ | |
| (offsetof(AllocChunkData, size) + sizeof(Size)) | |
| #endif | |
| typedef struct AllocBlockData *AllocBlock; /* forward reference */ | |
| typedef struct AllocChunkData *AllocChunk; | |
| /* | |
| * AllocPointer | |
| * Aligned pointer which may be a member of an allocation set. | |
| */ | |
| typedef void *AllocPointer; | |
| /* | |
| * AllocSetContext is our standard implementation of MemoryContext. | |
| * | |
| * Note: header.isReset means there is nothing for AllocSetReset to do. | |
| * This is different from the aset being physically empty (empty blocks list) | |
| * because we may still have a keeper block. It's also different from the set | |
| * being logically empty, because we don't attempt to detect pfree'ing the | |
| * last active chunk. | |
| */ | |
| typedef struct AllocSetContext | |
| { | |
| MemoryContextData header; /* Standard memory-context fields */ | |
| /* Info about storage allocated in this context: */ | |
| AllocBlock blocks; /* head of list of blocks in this set */ | |
| AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */ | |
| /* Allocation parameters for this context: */ | |
| Size initBlockSize; /* initial block size */ | |
| Size maxBlockSize; /* maximum block size */ | |
| Size nextBlockSize; /* next block size to allocate */ | |
| Size allocChunkLimit; /* effective chunk size limit */ | |
| AllocBlock keeper; /* if not NULL, keep this block over resets */ | |
| } AllocSetContext; | |
| typedef AllocSetContext *AllocSet; | |
| /* | |
| * AllocBlock | |
| * An AllocBlock is the unit of memory that is obtained by aset.c | |
| * from malloc(). It contains one or more AllocChunks, which are | |
| * the units requested by palloc() and freed by pfree(). AllocChunks | |
| * cannot be returned to malloc() individually, instead they are put | |
| * on freelists by pfree() and re-used by the next palloc() that has | |
| * a matching request size. | |
| * | |
| * AllocBlockData is the header data for a block --- the usable space | |
| * within the block begins at the next alignment boundary. | |
| */ | |
| typedef struct AllocBlockData | |
| { | |
| AllocSet aset; /* aset that owns this block */ | |
| AllocBlock next; /* next block in aset's blocks list */ | |
| char *freeptr; /* start of free space in this block */ | |
| char *endptr; /* end of space in this block */ | |
| } AllocBlockData; | |
| /* | |
| * AllocChunk | |
| * The prefix of each piece of memory in an AllocBlock | |
| * | |
| * NB: this MUST match StandardChunkHeader as defined by utils/memutils.h. | |
| */ | |
| typedef struct AllocChunkData | |
| { | |
| /* aset is the owning aset if allocated, or the freelist link if free */ | |
| void *aset; | |
| /* size is always the size of the usable space in the chunk */ | |
| Size size; | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| /* when debugging memory usage, also store actual requested size */ | |
| /* this is zero in a free chunk */ | |
| Size requested_size; | |
| #endif | |
| } AllocChunkData; | |
| /* | |
| * AllocPointerIsValid | |
| * True iff pointer is valid allocation pointer. | |
| */ | |
| #define AllocPointerIsValid(pointer) PointerIsValid(pointer) | |
| /* | |
| * AllocSetIsValid | |
| * True iff set is valid allocation set. | |
| */ | |
| #define AllocSetIsValid(set) PointerIsValid(set) | |
| #define AllocPointerGetChunk(ptr) \ | |
| ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ)) | |
| #define AllocChunkGetPointer(chk) \ | |
| ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ)) | |
| /* | |
| * These functions implement the MemoryContext API for AllocSet contexts. | |
| */ | |
| static void *AllocSetAlloc(MemoryContext context, Size size); | |
| static void AllocSetFree(MemoryContext context, void *pointer); | |
| static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size); | |
| static void AllocSetInit(MemoryContext context); | |
| static void AllocSetReset(MemoryContext context); | |
| static void AllocSetDelete(MemoryContext context); | |
| static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer); | |
| static bool AllocSetIsEmpty(MemoryContext context); | |
| static void AllocSetStats(MemoryContext context, int level, bool print, | |
| MemoryContextCounters *totals); | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| static void AllocSetCheck(MemoryContext context); | |
| #endif | |
| /* | |
| * This is the virtual function table for AllocSet contexts. | |
| */ | |
| static MemoryContextMethods AllocSetMethods = { | |
| AllocSetAlloc, | |
| AllocSetFree, | |
| AllocSetRealloc, | |
| AllocSetInit, | |
| AllocSetReset, | |
| AllocSetDelete, | |
| AllocSetGetChunkSpace, | |
| AllocSetIsEmpty, | |
| AllocSetStats | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| ,AllocSetCheck | |
| #endif | |
| }; | |
| /* | |
| * Table for AllocSetFreeIndex | |
| */ | |
| #define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n | |
| static const unsigned char LogTable256[256] = | |
| { | |
| 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, | |
| LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7), | |
| LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8) | |
| }; | |
| /* ---------- | |
| * Debug macros | |
| * ---------- | |
| */ | |
| #ifdef HAVE_ALLOCINFO | |
| #define AllocFreeInfo(_cxt, _chunk) \ | |
| fprintf(stderr, "AllocFree: %s: %p, %d\n", \ | |
| (_cxt)->header.name, (_chunk), (_chunk)->size) | |
| #define AllocAllocInfo(_cxt, _chunk) \ | |
| fprintf(stderr, "AllocAlloc: %s: %p, %d\n", \ | |
| (_cxt)->header.name, (_chunk), (_chunk)->size) | |
| #else | |
| #define AllocFreeInfo(_cxt, _chunk) | |
| #define AllocAllocInfo(_cxt, _chunk) | |
| #endif | |
| /* ---------- | |
| * AllocSetFreeIndex - | |
| * | |
| * Depending on the size of an allocation compute which freechunk | |
| * list of the alloc set it belongs to. Caller must have verified | |
| * that size <= ALLOC_CHUNK_LIMIT. | |
| * ---------- | |
| */ | |
| static inline int | |
| AllocSetFreeIndex(Size size) | |
| { | |
| int idx; | |
| unsigned int t, | |
| tsize; | |
| if (size > (1 << ALLOC_MINBITS)) | |
| { | |
| tsize = (size - 1) >> ALLOC_MINBITS; | |
| /* | |
| * At this point we need to obtain log2(tsize)+1, ie, the number of | |
| * not-all-zero bits at the right. We used to do this with a | |
| * shift-and-count loop, but this function is enough of a hotspot to | |
| * justify micro-optimization effort. The best approach seems to be | |
| * to use a lookup table. Note that this code assumes that | |
| * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of | |
| * the tsize value. | |
| */ | |
| t = tsize >> 8; | |
| idx = t ? LogTable256[t] + 8 : LogTable256[tsize]; | |
| Assert(idx < ALLOCSET_NUM_FREELISTS); | |
| } | |
| else | |
| idx = 0; | |
| return idx; | |
| } | |
| #ifdef CLOBBER_FREED_MEMORY | |
| /* Wipe freed memory for debugging purposes */ | |
| static void | |
| wipe_mem(void *ptr, size_t size) | |
| { | |
| VALGRIND_MAKE_MEM_UNDEFINED(ptr, size); | |
| memset(ptr, 0x7F, size); | |
| VALGRIND_MAKE_MEM_NOACCESS(ptr, size); | |
| } | |
| #endif | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| static void | |
| set_sentinel(void *base, Size offset) | |
| { | |
| char *ptr = (char *) base + offset; | |
| VALGRIND_MAKE_MEM_UNDEFINED(ptr, 1); | |
| *ptr = 0x7E; | |
| VALGRIND_MAKE_MEM_NOACCESS(ptr, 1); | |
| } | |
| static bool | |
| sentinel_ok(const void *base, Size offset) | |
| { | |
| const char *ptr = (const char *) base + offset; | |
| bool ret; | |
| VALGRIND_MAKE_MEM_DEFINED(ptr, 1); | |
| ret = *ptr == 0x7E; | |
| VALGRIND_MAKE_MEM_NOACCESS(ptr, 1); | |
| return ret; | |
| } | |
| #endif | |
| #ifdef RANDOMIZE_ALLOCATED_MEMORY | |
| /* | |
| * Fill a just-allocated piece of memory with "random" data. It's not really | |
| * very random, just a repeating sequence with a length that's prime. What | |
| * we mainly want out of it is to have a good probability that two palloc's | |
| * of the same number of bytes start out containing different data. | |
| * | |
| * The region may be NOACCESS, so make it UNDEFINED first to avoid errors as | |
| * we fill it. Filling the region makes it DEFINED, so make it UNDEFINED | |
| * again afterward. Whether to finally make it UNDEFINED or NOACCESS is | |
| * fairly arbitrary. UNDEFINED is more convenient for AllocSetRealloc(), and | |
| * other callers have no preference. | |
| */ | |
| static void | |
| randomize_mem(char *ptr, size_t size) | |
| { | |
| static int save_ctr = 1; | |
| size_t remaining = size; | |
| int ctr; | |
| ctr = save_ctr; | |
| VALGRIND_MAKE_MEM_UNDEFINED(ptr, size); | |
| while (remaining-- > 0) | |
| { | |
| *ptr++ = ctr; | |
| if (++ctr > 251) | |
| ctr = 1; | |
| } | |
| VALGRIND_MAKE_MEM_UNDEFINED(ptr - size, size); | |
| save_ctr = ctr; | |
| } | |
| #endif /* RANDOMIZE_ALLOCATED_MEMORY */ | |
| /* | |
| * Public routines | |
| */ | |
| /* | |
| * AllocSetContextCreate | |
| * Create a new AllocSet context. | |
| * | |
| * parent: parent context, or NULL if top-level context | |
| * name: name of context (for debugging --- string will be copied) | |
| * minContextSize: minimum context size | |
| * initBlockSize: initial allocation block size | |
| * maxBlockSize: maximum allocation block size | |
| */ | |
| MemoryContext | |
| AllocSetContextCreate(MemoryContext parent, | |
| const char *name, | |
| Size minContextSize, | |
| Size initBlockSize, | |
| Size maxBlockSize) | |
| { | |
| AllocSet set; | |
| /* Do the type-independent part of context creation */ | |
| set = (AllocSet) MemoryContextCreate(T_AllocSetContext, | |
| sizeof(AllocSetContext), | |
| &AllocSetMethods, | |
| parent, | |
| name); | |
| /* | |
| * Make sure alloc parameters are reasonable, and save them. | |
| * | |
| * We somewhat arbitrarily enforce a minimum 1K block size. | |
| */ | |
| initBlockSize = MAXALIGN(initBlockSize); | |
| if (initBlockSize < 1024) | |
| initBlockSize = 1024; | |
| maxBlockSize = MAXALIGN(maxBlockSize); | |
| if (maxBlockSize < initBlockSize) | |
| maxBlockSize = initBlockSize; | |
| Assert(AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */ | |
| set->initBlockSize = initBlockSize; | |
| set->maxBlockSize = maxBlockSize; | |
| set->nextBlockSize = initBlockSize; | |
| /* | |
| * Compute the allocation chunk size limit for this context. It can't be | |
| * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists. | |
| * If maxBlockSize is small then requests exceeding the maxBlockSize, or | |
| * even a significant fraction of it, should be treated as large chunks | |
| * too. For the typical case of maxBlockSize a power of 2, the chunk size | |
| * limit will be at most 1/8th maxBlockSize, so that given a stream of | |
| * requests that are all the maximum chunk size we will waste at most | |
| * 1/8th of the allocated space. | |
| * | |
| * We have to have allocChunkLimit a power of two, because the requested | |
| * and actually-allocated sizes of any chunk must be on the same side of | |
| * the limit, else we get confused about whether the chunk is "big". | |
| * | |
| * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD. | |
| */ | |
| StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD, | |
| "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD"); | |
| set->allocChunkLimit = ALLOC_CHUNK_LIMIT; | |
| while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) > | |
| (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION)) | |
| set->allocChunkLimit >>= 1; | |
| /* | |
| * Grab always-allocated space, if requested | |
| */ | |
| if (minContextSize > ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ) | |
| { | |
| Size blksize = MAXALIGN(minContextSize); | |
| AllocBlock block; | |
| block = (AllocBlock) malloc(blksize); | |
| if (block == NULL) | |
| { | |
| MemoryContextStats(TopMemoryContext); | |
| ereport(ERROR, | |
| (errcode(ERRCODE_OUT_OF_MEMORY), | |
| errmsg("out of memory"), | |
| errdetail("Failed while creating memory context \"%s\".", | |
| name))); | |
| } | |
| block->aset = set; | |
| block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ; | |
| block->endptr = ((char *) block) + blksize; | |
| block->next = set->blocks; | |
| set->blocks = block; | |
| /* Mark block as not to be released at reset time */ | |
| set->keeper = block; | |
| /* Mark unallocated space NOACCESS; leave the block header alone. */ | |
| VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, | |
| blksize - ALLOC_BLOCKHDRSZ); | |
| } | |
| return (MemoryContext) set; | |
| } | |
| /* | |
| * AllocSetInit | |
| * Context-type-specific initialization routine. | |
| * | |
| * This is called by MemoryContextCreate() after setting up the | |
| * generic MemoryContext fields and before linking the new context | |
| * into the context tree. We must do whatever is needed to make the | |
| * new context minimally valid for deletion. We must *not* risk | |
| * failure --- thus, for example, allocating more memory is not cool. | |
| * (AllocSetContextCreate can allocate memory when it gets control | |
| * back, however.) | |
| */ | |
| static void | |
| AllocSetInit(MemoryContext context) | |
| { | |
| /* | |
| * Since MemoryContextCreate already zeroed the context node, we don't | |
| * have to do anything here: it's already OK. | |
| */ | |
| } | |
| /* | |
| * AllocSetReset | |
| * Frees all memory which is allocated in the given set. | |
| * | |
| * Actually, this routine has some discretion about what to do. | |
| * It should mark all allocated chunks freed, but it need not necessarily | |
| * give back all the resources the set owns. Our actual implementation is | |
| * that we hang onto any "keeper" block specified for the set. In this way, | |
| * we don't thrash malloc() when a context is repeatedly reset after small | |
| * allocations, which is typical behavior for per-tuple contexts. | |
| */ | |
| static void | |
| AllocSetReset(MemoryContext context) | |
| { | |
| AllocSet set = (AllocSet) context; | |
| AllocBlock block; | |
| AssertArg(AllocSetIsValid(set)); | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| /* Check for corruption and leaks before freeing */ | |
| AllocSetCheck(context); | |
| #endif | |
| /* Clear chunk freelists */ | |
| MemSetAligned(set->freelist, 0, sizeof(set->freelist)); | |
| block = set->blocks; | |
| /* New blocks list is either empty or just the keeper block */ | |
| set->blocks = set->keeper; | |
| while (block != NULL) | |
| { | |
| AllocBlock next = block->next; | |
| if (block == set->keeper) | |
| { | |
| /* Reset the block, but don't return it to malloc */ | |
| char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ; | |
| #ifdef CLOBBER_FREED_MEMORY | |
| wipe_mem(datastart, block->freeptr - datastart); | |
| #else | |
| /* wipe_mem() would have done this */ | |
| VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart); | |
| #endif | |
| block->freeptr = datastart; | |
| block->next = NULL; | |
| } | |
| else | |
| { | |
| /* Normal case, release the block */ | |
| #ifdef CLOBBER_FREED_MEMORY | |
| wipe_mem(block, block->freeptr - ((char *) block)); | |
| #endif | |
| free(block); | |
| } | |
| block = next; | |
| } | |
| /* Reset block size allocation sequence, too */ | |
| set->nextBlockSize = set->initBlockSize; | |
| } | |
| /* | |
| * AllocSetDelete | |
| * Frees all memory which is allocated in the given set, | |
| * in preparation for deletion of the set. | |
| * | |
| * Unlike AllocSetReset, this *must* free all resources of the set. | |
| * But note we are not responsible for deleting the context node itself. | |
| */ | |
| static void | |
| AllocSetDelete(MemoryContext context) | |
| { | |
| AllocSet set = (AllocSet) context; | |
| AllocBlock block = set->blocks; | |
| AssertArg(AllocSetIsValid(set)); | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| /* Check for corruption and leaks before freeing */ | |
| AllocSetCheck(context); | |
| #endif | |
| /* Make it look empty, just in case... */ | |
| MemSetAligned(set->freelist, 0, sizeof(set->freelist)); | |
| set->blocks = NULL; | |
| set->keeper = NULL; | |
| while (block != NULL) | |
| { | |
| AllocBlock next = block->next; | |
| #ifdef CLOBBER_FREED_MEMORY | |
| wipe_mem(block, block->freeptr - ((char *) block)); | |
| #endif | |
| free(block); | |
| block = next; | |
| } | |
| } | |
| /* | |
| * AllocSetAlloc | |
| * Returns pointer to allocated memory of given size or NULL if | |
| * request could not be completed; memory is added to the set. | |
| * | |
| * No request may exceed: | |
| * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ | |
| * All callers use a much-lower limit. | |
| */ | |
| static void * | |
| AllocSetAlloc(MemoryContext context, Size size) | |
| { | |
| AllocSet set = (AllocSet) context; | |
| AllocBlock block; | |
| AllocChunk chunk; | |
| int fidx; | |
| Size chunk_size; | |
| Size blksize; | |
| AssertArg(AllocSetIsValid(set)); | |
| /* | |
| * If requested size exceeds maximum for chunks, allocate an entire block | |
| * for this request. | |
| */ | |
| if (size > set->allocChunkLimit) | |
| { | |
| chunk_size = MAXALIGN(size); | |
| blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ; | |
| block = (AllocBlock) malloc(blksize); | |
| if (block == NULL) | |
| return NULL; | |
| block->aset = set; | |
| block->freeptr = block->endptr = ((char *) block) + blksize; | |
| chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ); | |
| chunk->aset = set; | |
| chunk->size = chunk_size; | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| /* Valgrind: Will be made NOACCESS below. */ | |
| chunk->requested_size = size; | |
| /* set mark to catch clobber of "unused" space */ | |
| if (size < chunk_size) | |
| set_sentinel(AllocChunkGetPointer(chunk), size); | |
| #endif | |
| #ifdef RANDOMIZE_ALLOCATED_MEMORY | |
| /* fill the allocated space with junk */ | |
| randomize_mem((char *) AllocChunkGetPointer(chunk), size); | |
| #endif | |
| /* | |
| * Stick the new block underneath the active allocation block, so that | |
| * we don't lose the use of the space remaining therein. | |
| */ | |
| if (set->blocks != NULL) | |
| { | |
| block->next = set->blocks->next; | |
| set->blocks->next = block; | |
| } | |
| else | |
| { | |
| block->next = NULL; | |
| set->blocks = block; | |
| } | |
| AllocAllocInfo(set, chunk); | |
| /* | |
| * Chunk header public fields remain DEFINED. The requested | |
| * allocation itself can be NOACCESS or UNDEFINED; our caller will | |
| * soon make it UNDEFINED. Make extra space at the end of the chunk, | |
| * if any, NOACCESS. | |
| */ | |
| VALGRIND_MAKE_MEM_NOACCESS((char *) chunk + ALLOC_CHUNK_PUBLIC, | |
| chunk_size + ALLOC_CHUNKHDRSZ - ALLOC_CHUNK_PUBLIC); | |
| return AllocChunkGetPointer(chunk); | |
| } | |
| /* | |
| * Request is small enough to be treated as a chunk. Look in the | |
| * corresponding free list to see if there is a free chunk we could reuse. | |
| * If one is found, remove it from the free list, make it again a member | |
| * of the alloc set and return its data address. | |
| */ | |
| fidx = AllocSetFreeIndex(size); | |
| chunk = set->freelist[fidx]; | |
| if (chunk != NULL) | |
| { | |
| Assert(chunk->size >= size); | |
| set->freelist[fidx] = (AllocChunk) chunk->aset; | |
| chunk->aset = (void *) set; | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| /* Valgrind: Free list requested_size should be DEFINED. */ | |
| chunk->requested_size = size; | |
| VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size, | |
| sizeof(chunk->requested_size)); | |
| /* set mark to catch clobber of "unused" space */ | |
| if (size < chunk->size) | |
| set_sentinel(AllocChunkGetPointer(chunk), size); | |
| #endif | |
| #ifdef RANDOMIZE_ALLOCATED_MEMORY | |
| /* fill the allocated space with junk */ | |
| randomize_mem((char *) AllocChunkGetPointer(chunk), size); | |
| #endif | |
| AllocAllocInfo(set, chunk); | |
| return AllocChunkGetPointer(chunk); | |
| } | |
| /* | |
| * Choose the actual chunk size to allocate. | |
| */ | |
| chunk_size = (1 << ALLOC_MINBITS) << fidx; | |
| Assert(chunk_size >= size); | |
| /* | |
| * If there is enough room in the active allocation block, we will put the | |
| * chunk into that block. Else must start a new one. | |
| */ | |
| if ((block = set->blocks) != NULL) | |
| { | |
| Size availspace = block->endptr - block->freeptr; | |
| if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ)) | |
| { | |
| /* | |
| * The existing active (top) block does not have enough room for | |
| * the requested allocation, but it might still have a useful | |
| * amount of space in it. Once we push it down in the block list, | |
| * we'll never try to allocate more space from it. So, before we | |
| * do that, carve up its free space into chunks that we can put on | |
| * the set's freelists. | |
| * | |
| * Because we can only get here when there's less than | |
| * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate | |
| * more than ALLOCSET_NUM_FREELISTS-1 times. | |
| */ | |
| while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ)) | |
| { | |
| Size availchunk = availspace - ALLOC_CHUNKHDRSZ; | |
| int a_fidx = AllocSetFreeIndex(availchunk); | |
| /* | |
| * In most cases, we'll get back the index of the next larger | |
| * freelist than the one we need to put this chunk on. The | |
| * exception is when availchunk is exactly a power of 2. | |
| */ | |
| if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS))) | |
| { | |
| a_fidx--; | |
| Assert(a_fidx >= 0); | |
| availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS)); | |
| } | |
| chunk = (AllocChunk) (block->freeptr); | |
| /* Prepare to initialize the chunk header. */ | |
| VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNK_USED); | |
| block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ); | |
| availspace -= (availchunk + ALLOC_CHUNKHDRSZ); | |
| chunk->size = availchunk; | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| chunk->requested_size = 0; /* mark it free */ | |
| #endif | |
| chunk->aset = (void *) set->freelist[a_fidx]; | |
| set->freelist[a_fidx] = chunk; | |
| } | |
| /* Mark that we need to create a new block */ | |
| block = NULL; | |
| } | |
| } | |
| /* | |
| * Time to create a new regular (multi-chunk) block? | |
| */ | |
| if (block == NULL) | |
| { | |
| Size required_size; | |
| /* | |
| * The first such block has size initBlockSize, and we double the | |
| * space in each succeeding block, but not more than maxBlockSize. | |
| */ | |
| blksize = set->nextBlockSize; | |
| set->nextBlockSize <<= 1; | |
| if (set->nextBlockSize > set->maxBlockSize) | |
| set->nextBlockSize = set->maxBlockSize; | |
| /* | |
| * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more | |
| * space... but try to keep it a power of 2. | |
| */ | |
| required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ; | |
| while (blksize < required_size) | |
| blksize <<= 1; | |
| /* Try to allocate it */ | |
| block = (AllocBlock) malloc(blksize); | |
| /* | |
| * We could be asking for pretty big blocks here, so cope if malloc | |
| * fails. But give up if there's less than a meg or so available... | |
| */ | |
| while (block == NULL && blksize > 1024 * 1024) | |
| { | |
| blksize >>= 1; | |
| if (blksize < required_size) | |
| break; | |
| block = (AllocBlock) malloc(blksize); | |
| } | |
| if (block == NULL) | |
| return NULL; | |
| block->aset = set; | |
| block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ; | |
| block->endptr = ((char *) block) + blksize; | |
| /* | |
| * If this is the first block of the set, make it the "keeper" block. | |
| * Formerly, a keeper block could only be created during context | |
| * creation, but allowing it to happen here lets us have fast reset | |
| * cycling even for contexts created with minContextSize = 0; that way | |
| * we don't have to force space to be allocated in contexts that might | |
| * never need any space. Don't mark an oversize block as a keeper, | |
| * however. | |
| */ | |
| if (set->keeper == NULL && blksize == set->initBlockSize) | |
| set->keeper = block; | |
| /* Mark unallocated space NOACCESS. */ | |
| VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, | |
| blksize - ALLOC_BLOCKHDRSZ); | |
| block->next = set->blocks; | |
| set->blocks = block; | |
| } | |
| /* | |
| * OK, do the allocation | |
| */ | |
| chunk = (AllocChunk) (block->freeptr); | |
| /* Prepare to initialize the chunk header. */ | |
| VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNK_USED); | |
| block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ); | |
| Assert(block->freeptr <= block->endptr); | |
| chunk->aset = (void *) set; | |
| chunk->size = chunk_size; | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| chunk->requested_size = size; | |
| VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size, | |
| sizeof(chunk->requested_size)); | |
| /* set mark to catch clobber of "unused" space */ | |
| if (size < chunk->size) | |
| set_sentinel(AllocChunkGetPointer(chunk), size); | |
| #endif | |
| #ifdef RANDOMIZE_ALLOCATED_MEMORY | |
| /* fill the allocated space with junk */ | |
| randomize_mem((char *) AllocChunkGetPointer(chunk), size); | |
| #endif | |
| AllocAllocInfo(set, chunk); | |
| return AllocChunkGetPointer(chunk); | |
| } | |
| /* | |
| * AllocSetFree | |
| * Frees allocated memory; memory is removed from the set. | |
| */ | |
| static void | |
| AllocSetFree(MemoryContext context, void *pointer) | |
| { | |
| AllocSet set = (AllocSet) context; | |
| AllocChunk chunk = AllocPointerGetChunk(pointer); | |
| AllocFreeInfo(set, chunk); | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size, | |
| sizeof(chunk->requested_size)); | |
| /* Test for someone scribbling on unused space in chunk */ | |
| if (chunk->requested_size < chunk->size) | |
| if (!sentinel_ok(pointer, chunk->requested_size)) | |
| elog(WARNING, "detected write past chunk end in %s %p", | |
| set->header.name, chunk); | |
| #endif | |
| if (chunk->size > set->allocChunkLimit) | |
| { | |
| /* | |
| * Big chunks are certain to have been allocated as single-chunk | |
| * blocks. Find the containing block and return it to malloc(). | |
| */ | |
| AllocBlock block = set->blocks; | |
| AllocBlock prevblock = NULL; | |
| while (block != NULL) | |
| { | |
| if (chunk == (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ)) | |
| break; | |
| prevblock = block; | |
| block = block->next; | |
| } | |
| if (block == NULL) | |
| elog(ERROR, "could not find block containing chunk %p", chunk); | |
| /* let's just make sure chunk is the only one in the block */ | |
| Assert(block->freeptr == ((char *) block) + | |
| (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ)); | |
| /* OK, remove block from aset's list and free it */ | |
| if (prevblock == NULL) | |
| set->blocks = block->next; | |
| else | |
| prevblock->next = block->next; | |
| #ifdef CLOBBER_FREED_MEMORY | |
| wipe_mem(block, block->freeptr - ((char *) block)); | |
| #endif | |
| free(block); | |
| } | |
| else | |
| { | |
| /* Normal case, put the chunk into appropriate freelist */ | |
| int fidx = AllocSetFreeIndex(chunk->size); | |
| chunk->aset = (void *) set->freelist[fidx]; | |
| #ifdef CLOBBER_FREED_MEMORY | |
| wipe_mem(pointer, chunk->size); | |
| #endif | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| /* Reset requested_size to 0 in chunks that are on freelist */ | |
| chunk->requested_size = 0; | |
| #endif | |
| set->freelist[fidx] = chunk; | |
| } | |
| } | |
| /* | |
| * AllocSetRealloc | |
| * Returns new pointer to allocated memory of given size or NULL if | |
| * request could not be completed; this memory is added to the set. | |
| * Memory associated with given pointer is copied into the new memory, | |
| * and the old memory is freed. | |
| * | |
| * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This | |
| * makes our Valgrind client requests less-precise, hazarding false negatives. | |
| * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old | |
| * request size.) | |
| */ | |
| static void * | |
| AllocSetRealloc(MemoryContext context, void *pointer, Size size) | |
| { | |
| AllocSet set = (AllocSet) context; | |
| AllocChunk chunk = AllocPointerGetChunk(pointer); | |
| Size oldsize = chunk->size; | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size, | |
| sizeof(chunk->requested_size)); | |
| /* Test for someone scribbling on unused space in chunk */ | |
| if (chunk->requested_size < oldsize) | |
| if (!sentinel_ok(pointer, chunk->requested_size)) | |
| elog(WARNING, "detected write past chunk end in %s %p", | |
| set->header.name, chunk); | |
| #endif | |
| /* | |
| * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the | |
| * allocated area already is >= the new size. (In particular, we always | |
| * fall out here if the requested size is a decrease.) | |
| */ | |
| if (oldsize >= size) | |
| { | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| Size oldrequest = chunk->requested_size; | |
| #ifdef RANDOMIZE_ALLOCATED_MEMORY | |
| /* We can only fill the extra space if we know the prior request */ | |
| if (size > oldrequest) | |
| randomize_mem((char *) pointer + oldrequest, | |
| size - oldrequest); | |
| #endif | |
| chunk->requested_size = size; | |
| VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size, | |
| sizeof(chunk->requested_size)); | |
| /* | |
| * If this is an increase, mark any newly-available part UNDEFINED. | |
| * Otherwise, mark the obsolete part NOACCESS. | |
| */ | |
| if (size > oldrequest) | |
| VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest, | |
| size - oldrequest); | |
| else | |
| VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, | |
| oldsize - size); | |
| /* set mark to catch clobber of "unused" space */ | |
| if (size < oldsize) | |
| set_sentinel(pointer, size); | |
| #else /* !MEMORY_CONTEXT_CHECKING */ | |
| /* | |
| * We don't have the information to determine whether we're growing | |
| * the old request or shrinking it, so we conservatively mark the | |
| * entire new allocation DEFINED. | |
| */ | |
| VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize); | |
| VALGRIND_MAKE_MEM_DEFINED(pointer, size); | |
| #endif | |
| return pointer; | |
| } | |
| if (oldsize > set->allocChunkLimit) | |
| { | |
| /* | |
| * The chunk must have been allocated as a single-chunk block. Find | |
| * the containing block and use realloc() to make it bigger with | |
| * minimum space wastage. | |
| */ | |
| AllocBlock block = set->blocks; | |
| AllocBlock prevblock = NULL; | |
| Size chksize; | |
| Size blksize; | |
| while (block != NULL) | |
| { | |
| if (chunk == (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ)) | |
| break; | |
| prevblock = block; | |
| block = block->next; | |
| } | |
| if (block == NULL) | |
| elog(ERROR, "could not find block containing chunk %p", chunk); | |
| /* let's just make sure chunk is the only one in the block */ | |
| Assert(block->freeptr == ((char *) block) + | |
| (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ)); | |
| /* Do the realloc */ | |
| chksize = MAXALIGN(size); | |
| blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ; | |
| block = (AllocBlock) realloc(block, blksize); | |
| if (block == NULL) | |
| return NULL; | |
| block->freeptr = block->endptr = ((char *) block) + blksize; | |
| /* Update pointers since block has likely been moved */ | |
| chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ); | |
| pointer = AllocChunkGetPointer(chunk); | |
| if (prevblock == NULL) | |
| set->blocks = block; | |
| else | |
| prevblock->next = block; | |
| chunk->size = chksize; | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| #ifdef RANDOMIZE_ALLOCATED_MEMORY | |
| /* We can only fill the extra space if we know the prior request */ | |
| randomize_mem((char *) pointer + chunk->requested_size, | |
| size - chunk->requested_size); | |
| #endif | |
| /* | |
| * realloc() (or randomize_mem()) will have left the newly-allocated | |
| * part UNDEFINED, but we may need to adjust trailing bytes from the | |
| * old allocation. | |
| */ | |
| VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size, | |
| oldsize - chunk->requested_size); | |
| chunk->requested_size = size; | |
| VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size, | |
| sizeof(chunk->requested_size)); | |
| /* set mark to catch clobber of "unused" space */ | |
| if (size < chunk->size) | |
| set_sentinel(AllocChunkGetPointer(chunk), size); | |
| #else /* !MEMORY_CONTEXT_CHECKING */ | |
| /* | |
| * We don't know how much of the old chunk size was the actual | |
| * allocation; it could have been as small as one byte. We have to be | |
| * conservative and just mark the entire old portion DEFINED. | |
| */ | |
| VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize); | |
| #endif | |
| /* Make any trailing alignment padding NOACCESS. */ | |
| VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size); | |
| return AllocChunkGetPointer(chunk); | |
| } | |
| else | |
| { | |
| /* | |
| * Small-chunk case. We just do this by brute force, ie, allocate a | |
| * new chunk and copy the data. Since we know the existing data isn't | |
| * huge, this won't involve any great memcpy expense, so it's not | |
| * worth being smarter. (At one time we tried to avoid memcpy when it | |
| * was possible to enlarge the chunk in-place, but that turns out to | |
| * misbehave unpleasantly for repeated cycles of | |
| * palloc/repalloc/pfree: the eventually freed chunks go into the | |
| * wrong freelist for the next initial palloc request, and so we leak | |
| * memory indefinitely. See pgsql-hackers archives for 2007-08-11.) | |
| */ | |
| AllocPointer newPointer; | |
| /* allocate new chunk */ | |
| newPointer = AllocSetAlloc((MemoryContext) set, size); | |
| /* leave immediately if request was not completed */ | |
| if (newPointer == NULL) | |
| return NULL; | |
| /* | |
| * AllocSetAlloc() just made the region NOACCESS. Change it to | |
| * UNDEFINED for the moment; memcpy() will then transfer definedness | |
| * from the old allocation to the new. If we know the old allocation, | |
| * copy just that much. Otherwise, make the entire old chunk defined | |
| * to avoid errors as we copy the currently-NOACCESS trailing bytes. | |
| */ | |
| VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size); | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| oldsize = chunk->requested_size; | |
| #else | |
| VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize); | |
| #endif | |
| /* transfer existing data (certain to fit) */ | |
| memcpy(newPointer, pointer, oldsize); | |
| /* free old chunk */ | |
| AllocSetFree((MemoryContext) set, pointer); | |
| return newPointer; | |
| } | |
| } | |
| /* | |
| * AllocSetGetChunkSpace | |
| * Given a currently-allocated chunk, determine the total space | |
| * it occupies (including all memory-allocation overhead). | |
| */ | |
| static Size | |
| AllocSetGetChunkSpace(MemoryContext context, void *pointer) | |
| { | |
| AllocChunk chunk = AllocPointerGetChunk(pointer); | |
| return chunk->size + ALLOC_CHUNKHDRSZ; | |
| } | |
| /* | |
| * AllocSetIsEmpty | |
| * Is an allocset empty of any allocated space? | |
| */ | |
| static bool | |
| AllocSetIsEmpty(MemoryContext context) | |
| { | |
| /* | |
| * For now, we say "empty" only if the context is new or just reset. We | |
| * could examine the freelists to determine if all space has been freed, | |
| * but it's not really worth the trouble for present uses of this | |
| * functionality. | |
| */ | |
| if (context->isReset) | |
| return true; | |
| return false; | |
| } | |
| /* | |
| * AllocSetStats | |
| * Compute stats about memory consumption of an allocset. | |
| * | |
| * level: recursion level (0 at top level); used for print indentation. | |
| * print: true to print stats to stderr. | |
| * totals: if not NULL, add stats about this allocset into *totals. | |
| */ | |
| static void | |
| AllocSetStats(MemoryContext context, int level, bool print, | |
| MemoryContextCounters *totals) | |
| { | |
| AllocSet set = (AllocSet) context; | |
| Size nblocks = 0; | |
| Size freechunks = 0; | |
| Size totalspace = 0; | |
| Size freespace = 0; | |
| AllocBlock block; | |
| int fidx; | |
| for (block = set->blocks; block != NULL; block = block->next) | |
| { | |
| nblocks++; | |
| totalspace += block->endptr - ((char *) block); | |
| freespace += block->endptr - block->freeptr; | |
| } | |
| for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++) | |
| { | |
| AllocChunk chunk; | |
| for (chunk = set->freelist[fidx]; chunk != NULL; | |
| chunk = (AllocChunk) chunk->aset) | |
| { | |
| freechunks++; | |
| freespace += chunk->size + ALLOC_CHUNKHDRSZ; | |
| } | |
| } | |
| if (print) | |
| { | |
| int i; | |
| for (i = 0; i < level; i++) | |
| fprintf(stderr, " "); | |
| fprintf(stderr, | |
| "%s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n", | |
| set->header.name, totalspace, nblocks, freespace, freechunks, | |
| totalspace - freespace); | |
| } | |
| if (totals) | |
| { | |
| totals->nblocks += nblocks; | |
| totals->freechunks += freechunks; | |
| totals->totalspace += totalspace; | |
| totals->freespace += freespace; | |
| } | |
| } | |
| #ifdef MEMORY_CONTEXT_CHECKING | |
| /* | |
| * AllocSetCheck | |
| * Walk through chunks and check consistency of memory. | |
| * | |
| * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll | |
| * find yourself in an infinite loop when trouble occurs, because this | |
| * routine will be entered again when elog cleanup tries to release memory! | |
| */ | |
| static void | |
| AllocSetCheck(MemoryContext context) | |
| { | |
| AllocSet set = (AllocSet) context; | |
| char *name = set->header.name; | |
| AllocBlock block; | |
| for (block = set->blocks; block != NULL; block = block->next) | |
| { | |
| char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ; | |
| long blk_used = block->freeptr - bpoz; | |
| long blk_data = 0; | |
| long nchunks = 0; | |
| /* | |
| * Empty block - empty can be keeper-block only | |
| */ | |
| if (!blk_used) | |
| { | |
| if (set->keeper != block) | |
| elog(WARNING, "problem in alloc set %s: empty block %p", | |
| name, block); | |
| } | |
| /* | |
| * Chunk walker | |
| */ | |
| while (bpoz < block->freeptr) | |
| { | |
| AllocChunk chunk = (AllocChunk) bpoz; | |
| Size chsize, | |
| dsize; | |
| chsize = chunk->size; /* aligned chunk size */ | |
| VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size, | |
| sizeof(chunk->requested_size)); | |
| dsize = chunk->requested_size; /* real data */ | |
| if (dsize > 0) /* not on a free list */ | |
| VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size, | |
| sizeof(chunk->requested_size)); | |
| /* | |
| * Check chunk size | |
| */ | |
| if (dsize > chsize) | |
| elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p", | |
| name, chunk, block); | |
| if (chsize < (1 << ALLOC_MINBITS)) | |
| elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p", | |
| name, chsize, chunk, block); | |
| /* single-chunk block? */ | |
| if (chsize > set->allocChunkLimit && | |
| chsize + ALLOC_CHUNKHDRSZ != blk_used) | |
| elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p", | |
| name, chunk, block); | |
| /* | |
| * If chunk is allocated, check for correct aset pointer. (If it's | |
| * free, the aset is the freelist pointer, which we can't check as | |
| * easily...) | |
| */ | |
| if (dsize > 0 && chunk->aset != (void *) set) | |
| elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p", | |
| name, block, chunk); | |
| /* | |
| * Check for overwrite of "unallocated" space in chunk | |
| */ | |
| if (dsize > 0 && dsize < chsize && | |
| !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize)) | |
| elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p", | |
| name, block, chunk); | |
| blk_data += chsize; | |
| nchunks++; | |
| bpoz += ALLOC_CHUNKHDRSZ + chsize; | |
| } | |
| if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used) | |
| elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p", | |
| name, block); | |
| } | |
| } | |
| #endif /* MEMORY_CONTEXT_CHECKING */ |