Skip to content

Commit

Permalink
Optimize method PoolChunk.allocate(...) to reduce overhead (#13528)
Browse files Browse the repository at this point in the history
Motivation:

When goes into method `PoolChunk.allocate(...)`, we need to check
whether `head.next == head`, it has been checked before in method
`PoolArena.tcacheAllocateSmall(...)`, but `head.next` may have been
changed after that, so it needs to be checked again.

This can lower the chance to call `PoolChunk.allocateSubpage(...)` and
`PoolChunk.allocateRun(...)`, so will reduce overhead.

Modification:

Check whether `head.next == head` in method `PoolChunk.allocate(...)`.

Result:

Lower the chance of calling `PoolChunk.allocateSubpage(...)` and
`PoolChunk.allocateRun(...)` to reduce overhead.

Co-authored-by: laosijikaichele <laosijikaichele>
  • Loading branch information
laosijikaichele committed Aug 18, 2023
1 parent 5cb4974 commit ec9875d
Showing 1 changed file with 40 additions and 28 deletions.
68 changes: 40 additions & 28 deletions buffer/src/main/java/io/netty/buffer/PoolChunk.java
Original file line number Diff line number Diff line change
Expand Up @@ -314,12 +314,31 @@ private int usage(int freeBytes) {
boolean allocate(PooledByteBuf<T> buf, int reqCapacity, int sizeIdx, PoolThreadCache cache) {
final long handle;
if (sizeIdx <= arena.smallMaxSizeIdx) {
final PoolSubpage<T> nextSub;
// small
handle = allocateSubpage(sizeIdx);
if (handle < 0) {
return false;
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
PoolSubpage<T> head = arena.findSubpagePoolHead(sizeIdx);
head.lock();
try {
nextSub = head.next;
if (nextSub != head) {
assert nextSub.doNotDestroy && nextSub.elemSize == arena.sizeIdx2size(sizeIdx) : "doNotDestroy=" +
nextSub.doNotDestroy + ", elemSize=" + nextSub.elemSize + ", sizeIdx=" + sizeIdx;
handle = nextSub.allocate();
assert handle >= 0;
assert isSubpage(handle);
nextSub.chunk.initBufWithSubpage(buf, null, handle, reqCapacity, cache);
return true;
}
handle = allocateSubpage(sizeIdx, head);
if (handle < 0) {
return false;
}
assert isSubpage(handle);
} finally {
head.unlock();
}
assert isSubpage(handle);
} else {
// normal
// runSize must be multiple of pageSize
Expand Down Expand Up @@ -432,38 +451,31 @@ private long splitLargeRun(long handle, int needPages) {

/**
* Create / initialize a new PoolSubpage of normCapacity. Any PoolSubpage created / initialized here is added to
* subpage pool in the PoolArena that owns this PoolChunk
* subpage pool in the PoolArena that owns this PoolChunk.
*
* @param sizeIdx sizeIdx of normalized size
* @param head head of subpages
*
* @return index in memoryMap
*/
private long allocateSubpage(int sizeIdx) {
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
PoolSubpage<T> head = arena.findSubpagePoolHead(sizeIdx);
head.lock();
try {
//allocate a new run
int runSize = calculateRunSize(sizeIdx);
//runSize must be multiples of pageSize
long runHandle = allocateRun(runSize);
if (runHandle < 0) {
return -1;
}
private long allocateSubpage(int sizeIdx, PoolSubpage<T> head) {
//allocate a new run
int runSize = calculateRunSize(sizeIdx);
//runSize must be multiples of pageSize
long runHandle = allocateRun(runSize);
if (runHandle < 0) {
return -1;
}

int runOffset = runOffset(runHandle);
assert subpages[runOffset] == null;
int elemSize = arena.sizeIdx2size(sizeIdx);
int runOffset = runOffset(runHandle);
assert subpages[runOffset] == null;
int elemSize = arena.sizeIdx2size(sizeIdx);

PoolSubpage<T> subpage = new PoolSubpage<T>(head, this, pageShifts, runOffset,
runSize(pageShifts, runHandle), elemSize);
PoolSubpage<T> subpage = new PoolSubpage<T>(head, this, pageShifts, runOffset,
runSize(pageShifts, runHandle), elemSize);

subpages[runOffset] = subpage;
return subpage.allocate();
} finally {
head.unlock();
}
subpages[runOffset] = subpage;
return subpage.allocate();
}

/**
Expand Down

0 comments on commit ec9875d

Please sign in to comment.