Skip to content

Commit

Permalink
HBASE-24659 Calculate FIXED_OVERHEAD automatically
Browse files Browse the repository at this point in the history
  • Loading branch information
niuyulin committed Jul 5, 2020
1 parent a7a0e1a commit 330f7f9
Show file tree
Hide file tree
Showing 7 changed files with 28 additions and 95 deletions.
Expand Up @@ -37,13 +37,7 @@
*/
@InterfaceAudience.Private
public class HFileContext implements HeapSize, Cloneable {
public static final int FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT +
// Algorithm, checksumType, encoding, Encryption.Context, hfileName reference,
5 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT +
// usesHBaseChecksum, includesMvcc, includesTags and compressTags
4 * Bytes.SIZEOF_BOOLEAN + Bytes.SIZEOF_LONG +
//byte[] headers for column family and table name
2 * ClassSize.ARRAY + 2 * ClassSize.REFERENCE);
public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HFileContext.class, false);

private static final int DEFAULT_BYTES_PER_CHECKSUM = 16 * 1024;

Expand Down
Expand Up @@ -19,7 +19,6 @@

import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;

/**
Expand All @@ -42,7 +41,8 @@ public BlockCacheKey(String hfileName, long offset) {
this(hfileName, offset, true, BlockType.DATA);
}

public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica, BlockType blockType) {
public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica,
BlockType blockType) {
this.isPrimaryReplicaBlock = isPrimaryReplica;
this.hfileName = hfileName;
this.offset = offset;
Expand Down Expand Up @@ -71,12 +71,7 @@ public String toString() {
return this.hfileName + '_' + this.offset;
}

public static final long FIXED_OVERHEAD = ClassSize.align(
ClassSize.OBJECT +
Bytes.SIZEOF_BOOLEAN +
ClassSize.REFERENCE + // this.hfileName
ClassSize.REFERENCE + // this.blockType
Bytes.SIZEOF_LONG); // this.offset
public static final long FIXED_OVERHEAD = ClassSize.estimateBase(BlockCacheKey.class, false);

/**
* Strings have two bytes per character due to default Java Unicode encoding
Expand Down
Expand Up @@ -113,14 +113,7 @@
@InterfaceAudience.Private
public class HFileBlock implements Cacheable {
private static final Logger LOG = LoggerFactory.getLogger(HFileBlock.class);
public static final int FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT +
// BlockType, ByteBuff, MemoryType, HFileContext, ByteBuffAllocator
5 * ClassSize.REFERENCE +
// On-disk size, uncompressed size, and next block's on-disk size
// bytePerChecksum and onDiskDataSize
4 * Bytes.SIZEOF_INT +
// This and previous block offset
2 * Bytes.SIZEOF_LONG);
public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HFileBlock.class, false);

// Block Header fields.

Expand Down
Expand Up @@ -37,7 +37,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience;
Expand Down Expand Up @@ -440,7 +439,7 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory)
map.put(cacheKey, cb);
long val = elements.incrementAndGet();
if (buf.getBlockType().isData()) {
dataBlockElements.increment();
dataBlockElements.increment();
}
if (LOG.isTraceEnabled()) {
long size = map.size();
Expand Down Expand Up @@ -497,7 +496,7 @@ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) {
heapsize *= -1;
}
if (bt != null && bt.isData()) {
dataBlockSize.add(heapsize);
dataBlockSize.add(heapsize);
}
return size.addAndGet(heapsize);
}
Expand Down Expand Up @@ -583,8 +582,9 @@ public int evictBlocksByHfileName(String hfileName) {
int numEvicted = 0;
for (BlockCacheKey key : map.keySet()) {
if (key.getHfileName().equals(hfileName)) {
if (evictBlock(key))
if (evictBlock(key)) {
++numEvicted;
}
}
}
if (victimHandler != null) {
Expand Down Expand Up @@ -657,7 +657,9 @@ long getOverhead() {
void evict() {

// Ensure only one eviction at a time
if(!evictionLock.tryLock()) return;
if (!evictionLock.tryLock()) {
return;
}

try {
evictionInProgress = true;
Expand All @@ -670,7 +672,9 @@ void evict() {
StringUtils.byteDesc(currentSize));
}

if (bytesToFree <= 0) return;
if (bytesToFree <= 0) {
return;
}

// Instantiate priority buckets
BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize());
Expand Down Expand Up @@ -945,7 +949,9 @@ public void run() {
}
}
LruBlockCache cache = this.cache.get();
if (cache == null) break;
if (cache == null) {
break;
}
cache.evict();
}
}
Expand Down Expand Up @@ -1022,10 +1028,8 @@ public CacheStats getStats() {
return this.stats;
}

public final static long CACHE_FIXED_OVERHEAD = ClassSize.align(
(4 * Bytes.SIZEOF_LONG) + (11 * ClassSize.REFERENCE) +
(6 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN)
+ ClassSize.OBJECT);
public final static long CACHE_FIXED_OVERHEAD =
ClassSize.estimateBase(LruBlockCache.class, false);

@Override
public long heapSize() {
Expand Down Expand Up @@ -1093,9 +1097,13 @@ public String getFilename() {
@Override
public int compareTo(CachedBlock other) {
int diff = this.getFilename().compareTo(other.getFilename());
if (diff != 0) return diff;
if (diff != 0) {
return diff;
}
diff = Long.compare(this.getOffset(), other.getOffset());
if (diff != 0) return diff;
if (diff != 0) {
return diff;
}
if (other.getCachedTime() < 0 || this.getCachedTime() < 0) {
throw new IllegalStateException(this.getCachedTime() + ", " + other.getCachedTime());
}
Expand Down
Expand Up @@ -8405,12 +8405,7 @@ private static List<Cell> sort(List<Cell> cells, final CellComparator comparator
return cells;
}

public static final long FIXED_OVERHEAD = ClassSize.align(
ClassSize.OBJECT +
56 * ClassSize.REFERENCE +
3 * Bytes.SIZEOF_INT +
14 * Bytes.SIZEOF_LONG +
3 * Bytes.SIZEOF_BOOLEAN);
public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HRegion.class, false);

// woefully out of date - currently missing:
// 1 x HashMap - coprocessorServiceHandlers
Expand Down
Expand Up @@ -2566,9 +2566,7 @@ public CacheConfig getCacheConfig() {
return this.cacheConf;
}

public static final long FIXED_OVERHEAD =
ClassSize.align(ClassSize.OBJECT + (29 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG)
+ (6 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN));
public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false);

public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD
+ ClassSize.OBJECT + ClassSize.REENTRANT_LOCK
Expand Down
Expand Up @@ -42,11 +42,7 @@
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
import org.apache.hadoop.hbase.io.hfile.ExclusiveMemHFileBlock;
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.io.hfile.LruCachedBlock;
import org.apache.hadoop.hbase.io.hfile.SharedMemHFileBlock;
import org.apache.hadoop.hbase.regionserver.CSLMImmutableSegment;
Expand All @@ -56,8 +52,6 @@
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.regionserver.CompactionPipeline;
import org.apache.hadoop.hbase.regionserver.DefaultMemStore;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.ImmutableSegment;
import org.apache.hadoop.hbase.regionserver.MemStoreCompactor;
import org.apache.hadoop.hbase.regionserver.MutableSegment;
Expand Down Expand Up @@ -309,15 +303,6 @@ public void testSizes() throws IOException {
assertEquals(expected, actual);
}

//LruBlockCache Overhead
cl = LruBlockCache.class;
actual = LruBlockCache.CACHE_FIXED_OVERHEAD;
expected = ClassSize.estimateBase(cl, false);
if(expected != actual) {
ClassSize.estimateBase(cl, true);
assertEquals(expected, actual);
}

// CachedBlock Fixed Overhead
// We really need "deep" sizing but ClassSize does not do this.
// Perhaps we should do all these more in this style....
Expand Down Expand Up @@ -475,23 +460,6 @@ public void testSizes() throws IOException {
assertEquals(expected, actual);
}

// Store Overhead
cl = HStore.class;
actual = HStore.FIXED_OVERHEAD;
expected = ClassSize.estimateBase(cl, false);
if(expected != actual) {
ClassSize.estimateBase(cl, true);
assertEquals(expected, actual);
}

// Region Overhead
cl = HRegion.class;
actual = HRegion.FIXED_OVERHEAD;
expected = ClassSize.estimateBase(cl, false);
if (expected != actual) {
ClassSize.estimateBase(cl, true);
assertEquals(expected, actual);
}

cl = StoreHotnessProtector.class;
actual = StoreHotnessProtector.FIXED_SIZE;
Expand All @@ -501,16 +469,6 @@ public void testSizes() throws IOException {
assertEquals(expected, actual);
}

// Block cache key overhead. Only tests fixed overhead as estimating heap
// size of strings is hard.
cl = BlockCacheKey.class;
actual = BlockCacheKey.FIXED_OVERHEAD;
expected = ClassSize.estimateBase(cl, false);
if (expected != actual) {
ClassSize.estimateBase(cl, true);
assertEquals(expected, actual);
}

// Currently NOT testing Deep Overheads of many of these classes.
// Deep overheads cover a vast majority of stuff, but will not be 100%
// accurate because it's unclear when we're referencing stuff that's already
Expand All @@ -524,14 +482,6 @@ public void testHFileBlockSize() throws IOException {
long expected;
long actual;

actual = HFileContext.FIXED_OVERHEAD;
expected = ClassSize.estimateBase(HFileContext.class, false);
assertEquals(expected, actual);

actual = HFileBlock.FIXED_OVERHEAD;
expected = ClassSize.estimateBase(HFileBlock.class, false);
assertEquals(expected, actual);

actual = ExclusiveMemHFileBlock.FIXED_OVERHEAD;
expected = ClassSize.estimateBase(ExclusiveMemHFileBlock.class, false);
assertEquals(expected, actual);
Expand Down

0 comments on commit 330f7f9

Please sign in to comment.