Skip to content
Permalink
Browse files
HBASE-26142 NullPointerException when set 'hbase.hregion.memstore.msl…
…ab.indexchunksize.percent' to zero (#3531)

Signed-off-by: Duo Zhang <zhangduo@apache.org>
  • Loading branch information
comnetwork committed Aug 5, 2021
1 parent da950b9 commit d4aed4d59e966341456cb275f2d426a95003b570
Showing 6 changed files with 255 additions and 51 deletions.
@@ -19,6 +19,8 @@

import java.nio.ByteBuffer;
import java.util.concurrent.atomic.AtomicInteger;

import org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;

@@ -49,6 +51,8 @@ public abstract class Chunk {
// The unique id associated with the chunk.
private final int id;

private final ChunkType chunkType;

// indicates if the chunk is formed by ChunkCreator#MemstorePool
private final boolean fromPool;

@@ -58,8 +62,8 @@ public abstract class Chunk {
* @param size in bytes
* @param id the chunk id
*/
public Chunk(int size, int id) {
this(size, id, false);
public Chunk(int size, int id, ChunkType chunkType) {
this(size, id, chunkType, false);
}

/**
@@ -69,26 +73,35 @@ public Chunk(int size, int id) {
* @param id the chunk id
* @param fromPool if the chunk is formed by pool
*/
public Chunk(int size, int id, boolean fromPool) {
public Chunk(int size, int id, ChunkType chunkType, boolean fromPool) {
this.size = size;
this.id = id;
this.chunkType = chunkType;
this.fromPool = fromPool;
}

int getId() {
return this.id;
}

ChunkType getChunkType() {
return this.chunkType;
}

boolean isFromPool() {
return this.fromPool;
}

boolean isJumbo() {
return size > ChunkCreator.getInstance().getChunkSize();
return chunkType == ChunkCreator.ChunkType.JUMBO_CHUNK;
}

boolean isIndexChunk() {
return size == ChunkCreator.getInstance().getChunkSize(ChunkCreator.ChunkType.INDEX_CHUNK);
return chunkType == ChunkCreator.ChunkType.INDEX_CHUNK;
}

boolean isDataChunk() {
return chunkType == ChunkCreator.ChunkType.DATA_CHUNK;
}

/**
@@ -77,6 +77,7 @@ public enum ChunkType {
static boolean chunkPoolDisabled = false;
private MemStoreChunkPool dataChunksPool;
private final int chunkSize;
private int indexChunkSize;
private MemStoreChunkPool indexChunksPool;

ChunkCreator(int chunkSize, boolean offheap, long globalMemStoreSize, float poolSizePercentage,
@@ -94,13 +95,14 @@ private void initializePools(int chunkSize, long globalMemStoreSize,
HeapMemoryManager heapMemoryManager) {
this.dataChunksPool = initializePool("data", globalMemStoreSize,
(1 - indexChunkSizePercentage) * poolSizePercentage,
initialCountPercentage, chunkSize, heapMemoryManager);
initialCountPercentage, chunkSize, ChunkType.DATA_CHUNK, heapMemoryManager);
// The index chunks pool is needed only when the index type is CCM.
// Since the pools are not created at all when the index type isn't CCM,
// we don't need to check it here.
this.indexChunkSize = (int) (indexChunkSizePercentage * chunkSize);
this.indexChunksPool = initializePool("index", globalMemStoreSize,
indexChunkSizePercentage * poolSizePercentage,
initialCountPercentage, (int) (indexChunkSizePercentage * chunkSize),
initialCountPercentage, this.indexChunkSize, ChunkType.INDEX_CHUNK,
heapMemoryManager);
}

@@ -163,14 +165,20 @@ Chunk getChunk(CompactingMemStore.IndexType chunkIndexType) {
Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, ChunkType chunkType) {
switch (chunkType) {
case INDEX_CHUNK:
if (indexChunksPool != null) {
return getChunk(chunkIndexType, indexChunksPool.getChunkSize());
if (indexChunksPool == null) {
if (indexChunkSize <= 0) {
throw new IllegalArgumentException(
"chunkType is INDEX_CHUNK but indexChunkSize is:[" + this.indexChunkSize + "]");
}
return getChunk(chunkIndexType, chunkType, indexChunkSize);
} else {
return getChunk(chunkIndexType, chunkType, indexChunksPool.getChunkSize());
}
case DATA_CHUNK:
if (dataChunksPool == null) {
return getChunk(chunkIndexType, chunkSize);
return getChunk(chunkIndexType, chunkType, chunkSize);
} else {
return getChunk(chunkIndexType, dataChunksPool.getChunkSize());
return getChunk(chunkIndexType, chunkType, dataChunksPool.getChunkSize());
}
default:
throw new IllegalArgumentException(
@@ -184,14 +192,14 @@ Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, ChunkType chunkType)
* @param chunkIndexType whether the requested chunk is going to be used with CellChunkMap index
* @param size the size of the chunk to be allocated, in bytes
*/
Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, int size) {
Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, ChunkType chunkType, int size) {
Chunk chunk = null;
MemStoreChunkPool pool = null;

// if the size is suitable for one of the pools
if (dataChunksPool != null && size == dataChunksPool.getChunkSize()) {
// if it is one of the pools
if (dataChunksPool != null && chunkType == ChunkType.DATA_CHUNK) {
pool = dataChunksPool;
} else if (indexChunksPool != null && size == indexChunksPool.getChunkSize()) {
} else if (indexChunksPool != null && chunkType == ChunkType.INDEX_CHUNK) {
pool = indexChunksPool;
}

@@ -211,7 +219,7 @@ Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, int size) {
if (chunk == null) {
// the second parameter explains whether CellChunkMap index is requested,
// in that case, put allocated on demand chunk mapping into chunkIdMap
chunk = createChunk(false, chunkIndexType, size);
chunk = createChunk(false, chunkIndexType, chunkType, size);
}

// now we need to actually do the expensive memory allocation step in case of a new chunk,
@@ -228,14 +236,15 @@ Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, int size) {
*/
Chunk getJumboChunk(int jumboSize) {
int allocSize = jumboSize + SIZEOF_CHUNK_HEADER;
if (allocSize <= dataChunksPool.getChunkSize()) {

if (allocSize <= this.getChunkSize(ChunkType.DATA_CHUNK)) {
LOG.warn("Jumbo chunk size " + jumboSize + " must be more than regular chunk size "
+ dataChunksPool.getChunkSize() + ". Converting to regular chunk.");
+ this.getChunkSize(ChunkType.DATA_CHUNK) + ". Converting to regular chunk.");
return getChunk(CompactingMemStore.IndexType.CHUNK_MAP);
}
// the new chunk is going to hold the jumbo cell data and needs to be referenced by
// a strong map. Therefore the CCM index type
return getChunk(CompactingMemStore.IndexType.CHUNK_MAP, allocSize);
return getChunk(CompactingMemStore.IndexType.CHUNK_MAP, ChunkType.JUMBO_CHUNK, allocSize);
}

/**
@@ -245,15 +254,16 @@ Chunk getJumboChunk(int jumboSize) {
* @param size the size of the chunk to be allocated, in bytes
* @return the chunk
*/
private Chunk createChunk(boolean pool, CompactingMemStore.IndexType chunkIndexType, int size) {
private Chunk createChunk(boolean pool, CompactingMemStore.IndexType chunkIndexType,
ChunkType chunkType, int size) {
Chunk chunk = null;
int id = chunkID.getAndIncrement();
assert id > 0;
// do not create offheap chunk on demand
if (pool && this.offheap) {
chunk = new OffheapChunk(size, id, pool);
chunk = new OffheapChunk(size, id, chunkType, pool);
} else {
chunk = new OnheapChunk(size, id, pool);
chunk = new OnheapChunk(size, id, chunkType, pool);
}
if (pool || (chunkIndexType == CompactingMemStore.IndexType.CHUNK_MAP)) {
// put the pool chunk into the chunkIdMap so it is not GC-ed
@@ -264,12 +274,13 @@ private Chunk createChunk(boolean pool, CompactingMemStore.IndexType chunkIndexT

// Chunks from pool are created covered with strong references anyway
// TODO: change to CHUNK_MAP if it is generally defined
private Chunk createChunkForPool(CompactingMemStore.IndexType chunkIndexType, int chunkSize) {
private Chunk createChunkForPool(CompactingMemStore.IndexType chunkIndexType, ChunkType chunkType,
int chunkSize) {
if (chunkSize != dataChunksPool.getChunkSize() &&
chunkSize != indexChunksPool.getChunkSize()) {
return null;
}
return createChunk(true, chunkIndexType, chunkSize);
return createChunk(true, chunkIndexType, chunkType, chunkSize);
}

// Used to translate the ChunkID into a chunk ref
@@ -309,6 +320,7 @@ void clearChunkIds() {
*/
private class MemStoreChunkPool implements HeapMemoryTuneObserver {
private final int chunkSize;
private final ChunkType chunkType;
private int maxCount;

// A queue of reclaimed chunks
@@ -323,15 +335,18 @@ private class MemStoreChunkPool implements HeapMemoryTuneObserver {
private final LongAdder reusedChunkCount = new LongAdder();
private final String label;

MemStoreChunkPool(String label, int chunkSize, int maxCount, int initialCount,
MemStoreChunkPool(String label, int chunkSize, ChunkType chunkType, int maxCount,
int initialCount,
float poolSizePercentage) {
this.label = label;
this.chunkSize = chunkSize;
this.chunkType = chunkType;
this.maxCount = maxCount;
this.poolSizePercentage = poolSizePercentage;
this.reclaimedChunks = new LinkedBlockingQueue<>();
for (int i = 0; i < initialCount; i++) {
Chunk chunk = createChunk(true, CompactingMemStore.IndexType.ARRAY_MAP, chunkSize);
Chunk chunk =
createChunk(true, CompactingMemStore.IndexType.ARRAY_MAP, chunkType, chunkSize);
chunk.init();
reclaimedChunks.add(chunk);
}
@@ -367,7 +382,7 @@ Chunk getChunk(CompactingMemStore.IndexType chunkIndexType) {
long created = this.chunkCount.get();
if (created < this.maxCount) {
if (this.chunkCount.compareAndSet(created, created + 1)) {
chunk = createChunkForPool(chunkIndexType, chunkSize);
chunk = createChunkForPool(chunkIndexType, chunkType, chunkSize);
break;
}
} else {
@@ -465,7 +480,7 @@ static void clearDisableFlag() {
}

private MemStoreChunkPool initializePool(String label, long globalMemStoreSize,
float poolSizePercentage, float initialCountPercentage, int chunkSize,
float poolSizePercentage, float initialCountPercentage, int chunkSize, ChunkType chunkType,
HeapMemoryManager heapMemoryManager) {
if (poolSizePercentage <= 0) {
LOG.info("{} poolSizePercentage is less than 0. So not using pool", label);
@@ -486,8 +501,8 @@ private MemStoreChunkPool initializePool(String label, long globalMemStoreSize,
int initialCount = (int) (initialCountPercentage * maxCount);
LOG.info("Allocating {} MemStoreChunkPool with chunk size {}, max count {}, initial count {}",
label, StringUtils.byteDesc(chunkSize), maxCount, initialCount);
MemStoreChunkPool memStoreChunkPool = new MemStoreChunkPool(label, chunkSize, maxCount,
initialCount, poolSizePercentage);
MemStoreChunkPool memStoreChunkPool = new MemStoreChunkPool(label, chunkSize, chunkType,
maxCount, initialCount, poolSizePercentage);
if (heapMemoryManager != null && memStoreChunkPool != null) {
// Register with Heap Memory manager
heapMemoryManager.registerTuneObserver(memStoreChunkPool);
@@ -578,6 +593,8 @@ int getChunkSize(ChunkType chunkType) {
case INDEX_CHUNK:
if (indexChunksPool != null) {
return indexChunksPool.getChunkSize();
} else {
return indexChunkSize;
}
case DATA_CHUNK:
if (dataChunksPool != null) {
@@ -606,7 +623,7 @@ synchronized void putbackChunks(Set<Integer> chunks) {
if (chunk != null) {
if (chunk.isFromPool() && chunk.isIndexChunk()) {
indexChunksPool.putbackChunks(chunk);
} else if (chunk.isFromPool() && chunk.size == dataChunksPool.getChunkSize()) {
} else if (chunk.isFromPool() && chunk.isDataChunk()) {
dataChunksPool.putbackChunks(chunk);
} else {
// chunks which are not from one of the pools
@@ -621,5 +638,13 @@ synchronized void putbackChunks(Set<Integer> chunks) {
return;
}

MemStoreChunkPool getIndexChunksPool() {
return this.indexChunksPool;
}

MemStoreChunkPool getDataChunksPool() {
return this.dataChunksPool;
}

}

@@ -19,6 +19,7 @@

import java.nio.ByteBuffer;

import org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType;
import org.apache.yetus.audience.InterfaceAudience;

/**
@@ -27,13 +28,13 @@
@InterfaceAudience.Private
public class OffheapChunk extends Chunk {

OffheapChunk(int size, int id) {
OffheapChunk(int size, int id, ChunkType chunkType) {
// better if this is always created fromPool. This should not be called
super(size, id);
super(size, id, chunkType);
}

OffheapChunk(int size, int id, boolean fromPool) {
super(size, id, fromPool);
OffheapChunk(int size, int id, ChunkType chunkType, boolean fromPool) {
super(size, id, chunkType, fromPool);
assert fromPool == true;
}

@@ -19,6 +19,7 @@

import java.nio.ByteBuffer;

import org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType;
import org.apache.yetus.audience.InterfaceAudience;

/**
@@ -27,12 +28,12 @@
@InterfaceAudience.Private
public class OnheapChunk extends Chunk {

OnheapChunk(int size, int id) {
super(size, id);
OnheapChunk(int size, int id, ChunkType chunkType) {
super(size, id, chunkType);
}

OnheapChunk(int size, int id, boolean fromPool) {
super(size, id, fromPool);
OnheapChunk(int size, int id, ChunkType chunkType, boolean fromPool) {
super(size, id, chunkType, fromPool);
}

@Override
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
@@ -330,7 +331,9 @@ private CellChunkMap setUpJumboCellChunkMap(boolean asc) {
// allocate new chunks and use the data JUMBO chunk to hold the full data of the cells
// and the normal index chunk to hold the cell-representations
Chunk dataJumboChunk =
chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP, smallChunkSize);
chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP, ChunkType.JUMBO_CHUNK,
smallChunkSize);
assertTrue(dataJumboChunk.isJumbo());
Chunk idxChunk = chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP);
// the array of index chunks to be used as a basis for CellChunkMap
Chunk[] chunkArray = new Chunk[8]; // according to test currently written 8 is way enough
@@ -364,7 +367,10 @@ private CellChunkMap setUpJumboCellChunkMap(boolean asc) {

// Jumbo chunks are working only with one cell per chunk, thus always allocate a new jumbo
// data chunk for next cell
dataJumboChunk = chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP,smallChunkSize);
dataJumboChunk =
chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP, ChunkType.JUMBO_CHUNK,
smallChunkSize);
assertTrue(dataJumboChunk.isJumbo());
dataBuffer = dataJumboChunk.getData();
dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER;
}

0 comments on commit d4aed4d

Please sign in to comment.