Skip to content

Commit

Permalink
Move a few ResourceOwnerEnlarge() calls for safety and clarity.
Browse files Browse the repository at this point in the history
These are functions where a lot of things happen between the
ResourceOwnerEnlarge and ResourceOwnerRemember calls. It's important
that there are no unrelated ResourceOwnerRemember calls in the code in
between, otherwise the reserved entry might be used up by the
intervening ResourceOwnerRemember and not be available at the intended
ResourceOwnerRemember call anymore. I don't see any bugs here, but the
longer the code path between the calls is, the harder it is to verify.

In bufmgr.c, there is a function similar to ResourceOwnerEnlarge,
ReservePrivateRefCountEntry(), to ensure that the private refcount
array has enough space. The ReservePrivateRefCountEntry() calls were
made at different places than the ResourceOwnerEnlargeBuffers()
calls. Move the ResourceOwnerEnlargeBuffers() and
ReservePrivateRefCountEntry() calls together for consistency.

Reviewed-by: Aleksander Alekseev, Michael Paquier, Julien Rouhaud
Reviewed-by: Kyotaro Horiguchi, Hayato Kuroda, Álvaro Herrera, Zhihong Yu
Reviewed-by: Peter Eisentraut, Andres Freund
Discussion: https://www.postgresql.org/message-id/cbfabeb0-cd3c-e951-a572-19b365ed314d%40iki.fi
  • Loading branch information
hlinnaka committed Nov 8, 2023
1 parent e9f075f commit b70c214
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 29 deletions.
49 changes: 22 additions & 27 deletions src/backend/storage/buffer/bufmgr.c
Expand Up @@ -1023,9 +1023,6 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
forkNum, strategy, flags);
}

/* Make sure we will have room to remember the buffer pin */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
Expand Down Expand Up @@ -1230,6 +1227,10 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
BufferDesc *victim_buf_hdr;
uint32 victim_buf_state;

/* Make sure we will have room to remember the buffer pin */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
ReservePrivateRefCountEntry();

/* create a tag so we can lookup the buffer */
InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);

Expand Down Expand Up @@ -1591,7 +1592,7 @@ GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context)

/*
* Ensure, while the spinlock's not yet held, that there's a free refcount
* entry.
* entry, and a resource owner slot for the pin.
*/
ReservePrivateRefCountEntry();
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
Expand Down Expand Up @@ -1859,9 +1860,6 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
MemSet((char *) buf_block, 0, BLCKSZ);
}

/* in case we need to pin an existing buffer below */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

/*
* Lock relation against concurrent extensions, unless requested not to.
*
Expand Down Expand Up @@ -1947,6 +1945,10 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
LWLock *partition_lock;
int existing_id;

/* in case we need to pin an existing buffer below */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
ReservePrivateRefCountEntry();

InitBufferTag(&tag, &bmr.smgr->smgr_rlocator.locator, fork, first_block + i);
hash = BufTableHashCode(&tag);
partition_lock = BufMappingPartitionLock(hash);
Expand Down Expand Up @@ -2281,7 +2283,8 @@ ReleaseAndReadBuffer(Buffer buffer,
* taking the buffer header lock; instead update the state variable in loop of
* CAS operations. Hopefully it's just a single CAS.
*
* Note that ResourceOwnerEnlargeBuffers must have been done already.
* Note that ResourceOwnerEnlargeBuffers and ReservePrivateRefCountEntry()
* must have been done already.
*
* Returns true if buffer is BM_VALID, else false. This provision allows
* some callers to avoid an extra spinlock cycle.
Expand All @@ -2294,6 +2297,7 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
PrivateRefCountEntry *ref;

Assert(!BufferIsLocal(b));
Assert(ReservedRefCountEntry != NULL);

ref = GetPrivateRefCountEntry(b, true);

Expand All @@ -2302,7 +2306,6 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
uint32 buf_state;
uint32 old_buf_state;

ReservePrivateRefCountEntry();
ref = NewPrivateRefCountEntry(b);

old_buf_state = pg_atomic_read_u32(&buf->state);
Expand Down Expand Up @@ -2375,7 +2378,8 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
* The spinlock is released before return.
*
* As this function is called with the spinlock held, the caller has to
* previously call ReservePrivateRefCountEntry().
* previously call ReservePrivateRefCountEntry() and
* ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
*
* Currently, no callers of this function want to modify the buffer's
* usage_count at all, so there's no need for a strategy parameter.
Expand Down Expand Up @@ -2550,9 +2554,6 @@ BufferSync(int flags)
int mask = BM_DIRTY;
WritebackContext wb_context;

/* Make sure we can handle the pin inside SyncOneBuffer */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

/*
* Unless this is a shutdown checkpoint or we have been explicitly told,
* we write only permanent, dirty buffers. But at shutdown or end of
Expand Down Expand Up @@ -3029,9 +3030,6 @@ BgBufferSync(WritebackContext *wb_context)
* requirements, or hit the bgwriter_lru_maxpages limit.
*/

/* Make sure we can handle the pin inside SyncOneBuffer */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

num_to_scan = bufs_to_lap;
num_written = 0;
reusable_buffers = reusable_buffers_est;
Expand Down Expand Up @@ -3113,8 +3111,6 @@ BgBufferSync(WritebackContext *wb_context)
*
* (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
* after locking it, but we don't care all that much.)
*
* Note: caller must have done ResourceOwnerEnlargeBuffers.
*/
static int
SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Expand All @@ -3124,7 +3120,9 @@ SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
uint32 buf_state;
BufferTag tag;

/* Make sure we can handle the pin */
ReservePrivateRefCountEntry();
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

/*
* Check whether buffer needs writing.
Expand Down Expand Up @@ -4169,9 +4167,6 @@ FlushRelationBuffers(Relation rel)
return;
}

/* Make sure we can handle the pin inside the loop */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

for (i = 0; i < NBuffers; i++)
{
uint32 buf_state;
Expand All @@ -4185,7 +4180,9 @@ FlushRelationBuffers(Relation rel)
if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
continue;

/* Make sure we can handle the pin */
ReservePrivateRefCountEntry();
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

buf_state = LockBufHdr(bufHdr);
if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
Expand Down Expand Up @@ -4242,9 +4239,6 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
if (use_bsearch)
pg_qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);

/* Make sure we can handle the pin inside the loop */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

for (i = 0; i < NBuffers; i++)
{
SMgrSortArray *srelent = NULL;
Expand Down Expand Up @@ -4283,7 +4277,9 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
if (srelent == NULL)
continue;

/* Make sure we can handle the pin */
ReservePrivateRefCountEntry();
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

buf_state = LockBufHdr(bufHdr);
if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
Expand Down Expand Up @@ -4478,9 +4474,6 @@ FlushDatabaseBuffers(Oid dbid)
int i;
BufferDesc *bufHdr;

/* Make sure we can handle the pin inside the loop */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

for (i = 0; i < NBuffers; i++)
{
uint32 buf_state;
Expand All @@ -4494,7 +4487,9 @@ FlushDatabaseBuffers(Oid dbid)
if (bufHdr->tag.dbOid != dbid)
continue;

/* Make sure we can handle the pin */
ReservePrivateRefCountEntry();
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

buf_state = LockBufHdr(bufHdr);
if (bufHdr->tag.dbOid == dbid &&
Expand Down
2 changes: 2 additions & 0 deletions src/backend/storage/buffer/localbuf.c
Expand Up @@ -130,6 +130,8 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
if (LocalBufHash == NULL)
InitLocalBuffers();

ResourceOwnerEnlargeBuffers(CurrentResourceOwner);

/* See if the desired buffer already exists */
hresult = (LocalBufferLookupEnt *)
hash_search(LocalBufHash, &newTag, HASH_FIND, NULL);
Expand Down
5 changes: 3 additions & 2 deletions src/backend/utils/cache/catcache.c
Expand Up @@ -1605,8 +1605,6 @@ SearchCatCacheList(CatCache *cache,
* block to ensure we can undo those refcounts if we get an error before
* we finish constructing the CatCList.
*/
ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);

ctlist = NIL;

PG_TRY();
Expand Down Expand Up @@ -1694,6 +1692,9 @@ SearchCatCacheList(CatCache *cache,

table_close(relation, AccessShareLock);

/* Make sure the resource owner has room to remember this entry. */
ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);

/* Now we can build the CatCList entry. */
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
nmembers = list_length(ctlist);
Expand Down

0 comments on commit b70c214

Please sign in to comment.