Skip to content

Commit

Permalink
HBASE-24205 Create metric to know the number of reads that happens fr… (
Browse files Browse the repository at this point in the history
#1552)

* HBASE-24205 Create metric to know the number of reads that happens from memstore

* Fix checkstyles and whitespaces

* Checkstyl, whitespace and javadoc

* Fixed review comments

* Fix unused imports

* Rebase with latest commit

* Adding the table vs store metric by consolidating

* Combine get and scan metrics and make all relevant changes

* Track for full row and then increment either memstore or file read
metric

* TestMetricsStore test fix

* Only increment the memstore metric if all cells are from memstore, if
not treat as mixed reads

* Remove metricsstore and aggregate at region level

* Addresses review comments-metric name updated everywhere

* Metric name change

* Review comment changes

Co-authored-by: Ramkrishna <ramkrishna@apache.org>
Signed-off by:Anoop Sam John<anoopsamjohn@gmail.com>
Signed-off by:Viraj Jasani<virajjasani@apache.org>
  • Loading branch information
ramkrish86 committed Jun 16, 2020
1 parent 3558ee0 commit 510aad3
Show file tree
Hide file tree
Showing 16 changed files with 444 additions and 168 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ public interface MetricsRegionSource extends Comparable<MetricsRegionSource> {
String COPROCESSOR_EXECUTION_STATISTICS_DESC = "Statistics for coprocessor execution times";
String REPLICA_ID = "replicaid";
String REPLICA_ID_DESC = "The replica ID of a region. 0 is primary, otherwise is secondary";
String ROW_READS_ONLY_ON_MEMSTORE = "memstoreOnlyRowReadsCount";
String ROW_READS_ONLY_ON_MEMSTORE_DESC = "Row reads happening completely out of memstore";
String MIXED_ROW_READS = "mixedRowReadsCount";
String MIXED_ROW_READS_ON_STORE_DESC = "Row reads happening out of files and memstore on store";

/**
* Close the region's metrics as this region is closing.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@

package org.apache.hadoop.hbase.regionserver;

import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicBoolean;

import org.apache.hadoop.hbase.metrics.Interns;
Expand All @@ -33,6 +35,8 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource {

private static final Logger LOG = LoggerFactory.getLogger(MetricsRegionSourceImpl.class);

private static final String _STORE = "_store_";

private AtomicBoolean closed = new AtomicBoolean(false);

// Non-final so that we can null out the wrapper
Expand All @@ -45,6 +49,8 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource {
private final DynamicMetricsRegistry registry;

private final String regionNamePrefix;
private final String regionNamePrefix1;
private final String regionNamePrefix2;
private final String regionPutKey;
private final String regionDeleteKey;
private final String regionGetKey;
Expand Down Expand Up @@ -77,10 +83,10 @@ public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper,

registry = agg.getMetricsRegistry();

regionNamePrefix = "Namespace_" + regionWrapper.getNamespace() +
"_table_" + regionWrapper.getTableName() +
"_region_" + regionWrapper.getRegionName() +
"_metric_";
regionNamePrefix1 = "Namespace_" + regionWrapper.getNamespace() + "_table_"
+ regionWrapper.getTableName() + "_region_" + regionWrapper.getRegionName();
regionNamePrefix2 = "_metric_";
regionNamePrefix = regionNamePrefix1 + regionNamePrefix2;

String suffix = "Count";

Expand Down Expand Up @@ -302,6 +308,24 @@ void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
regionNamePrefix + MetricsRegionSource.MAX_FLUSH_QUEUE_SIZE,
MetricsRegionSource.MAX_FLUSH_QUEUE_DESC),
this.regionWrapper.getMaxFlushQueueSize());
addCounter(mrb, this.regionWrapper.getMemstoreOnlyRowReadsCount(),
MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE,
MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE_DESC);
addCounter(mrb, this.regionWrapper.getMixedRowReadsCount(),
MetricsRegionSource.MIXED_ROW_READS,
MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC);
}
}

private void addCounter(MetricsRecordBuilder mrb, Map<String, Long> metricMap, String metricName,
String metricDesc) {
if (metricMap != null) {
for (Entry<String, Long> entry : metricMap.entrySet()) {
// append 'store' and its name to the metric
mrb.addCounter(Interns.info(
this.regionNamePrefix1 + _STORE + entry.getKey() + this.regionNamePrefix2 + metricName,
metricDesc), entry.getValue());
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@

package org.apache.hadoop.hbase.regionserver;

import java.util.Map;

import org.apache.yetus.audience.InterfaceAudience;

/**
Expand Down Expand Up @@ -170,4 +172,15 @@ public interface MetricsRegionWrapper {
* all compacted store files that belong to this region
*/
long getMaxCompactedStoreFileRefCount();

/**
* @return the number of row reads completely on memstore per store
*/
Map<String, Long> getMemstoreOnlyRowReadsCount();

/**
* @return the number of row reads on memstore and file per store
*/
Map<String, Long> getMixedRowReadsCount();

}
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ public MetricsTableSource getOrCreateTableSource(String table,
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder mrb = collector.addRecord(metricsName);

if (tableSources != null) {
for (MetricsTableSource tableMetricSource : tableSources.values()) {
if (tableMetricSource instanceof MetricsTableSourceImpl) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,10 @@
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_SUCCESS_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_SUCCESS_KEY;

import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicBoolean;

import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.Interns;
import org.apache.hadoop.metrics2.MetricHistogram;
Expand All @@ -75,6 +78,8 @@
@InterfaceAudience.Private
public class MetricsTableSourceImpl implements MetricsTableSource {

private static final String _COLUMNFAMILY = "_columnfamily_";

private static final Logger LOG = LoggerFactory.getLogger(MetricsTableSourceImpl.class);

private AtomicBoolean closed = new AtomicBoolean(false);
Expand All @@ -87,6 +92,8 @@ public class MetricsTableSourceImpl implements MetricsTableSource {
private final MetricsTableAggregateSourceImpl agg;
private final DynamicMetricsRegistry registry;
private final String tableNamePrefix;
private final String tableNamePrefixPart1;
private final String tableNamePrefixPart2;
private final TableName tableName;
private final int hashCode;

Expand Down Expand Up @@ -127,8 +134,11 @@ public MetricsTableSourceImpl(String tblName,

this.tableWrapperAgg = tblWrapperAgg;
this.registry = agg.getMetricsRegistry();
this.tableNamePrefix = "Namespace_" + this.tableName.getNamespaceAsString() +
"_table_" + this.tableName.getQualifierAsString() + "_metric_";
this.tableNamePrefixPart1 = "Namespace_" + this.tableName.getNamespaceAsString() +
"_table_" + this.tableName.getQualifierAsString();
this.tableNamePrefixPart2 = "_metric_";
this.tableNamePrefix = tableNamePrefixPart1 +
tableNamePrefixPart2;
this.hashCode = this.tableName.hashCode();
}

Expand Down Expand Up @@ -311,6 +321,25 @@ void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES,
MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC),
tableWrapperAgg.getNumReferenceFiles(tableName.getNameAsString()));
addGauge(mrb, tableWrapperAgg.getMemstoreOnlyRowReadsCount(tableName.getNameAsString()),
MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE,
MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE_DESC);
addGauge(mrb, tableWrapperAgg.getMixedRowReadsCount(tableName.getNameAsString()),
MetricsRegionSource.MIXED_ROW_READS,
MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC);
}
}
}

private void addGauge(MetricsRecordBuilder mrb, Map<String, Long> metricMap, String metricName,
String metricDesc) {
if (metricMap != null) {
for (Entry<String, Long> entry : metricMap.entrySet()) {
// append 'store' and its name to the metric
mrb.addGauge(Interns.info(this.tableNamePrefixPart1 + _COLUMNFAMILY
+ entry.getKey().split(MetricsTableWrapperAggregate.UNDERSCORE)[1]
+ this.tableNamePrefixPart2 + metricName,
metricDesc), entry.getValue());
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@

package org.apache.hadoop.hbase.regionserver;

import java.util.Map;

import org.apache.yetus.audience.InterfaceAudience;

/**
Expand All @@ -26,7 +28,7 @@
*/
@InterfaceAudience.Private
public interface MetricsTableWrapperAggregate {

public String UNDERSCORE = "_";
/**
* Get the number of read requests that have been issued against this table
*/
Expand Down Expand Up @@ -107,6 +109,13 @@ public interface MetricsTableWrapperAggregate {
*/
long getNumReferenceFiles(String table);

/**
* @return number of row reads completely from memstore per store for this table
*/
Map<String, Long> getMemstoreOnlyRowReadsCount(String table);


/**
* @return number of row reads from file and memstore per store for this table
*/
Map<String, Long> getMixedRowReadsCount(String table);
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@

package org.apache.hadoop.hbase.regionserver;

import java.util.HashMap;
import java.util.Map;

public class MetricsTableWrapperStub implements MetricsTableWrapperAggregate {

private String tableName;
Expand Down Expand Up @@ -109,4 +112,18 @@ public long getAvgRegionSize(String table) {
public long getCpRequestsCount(String table) {
return 99;
}

@Override
public Map<String, Long> getMemstoreOnlyRowReadsCount(String table) {
Map<String, Long> map = new HashMap<String, Long>();
map.put("table_info", 3L);
return map;
}

@Override
public Map<String, Long> getMixedRowReadsCount(String table) {
Map<String, Long> map = new HashMap<String, Long>();
map.put("table_info", 3L);
return map;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;

import java.util.HashMap;
import java.util.Map;

import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MetricsTests;
Expand Down Expand Up @@ -216,5 +219,19 @@ public long getMaxFlushQueueSize() {
public long getTotalRequestCount() {
return 0;
}

@Override
public Map<String, Long> getMemstoreOnlyRowReadsCount() {
Map<String, Long> map = new HashMap<String, Long>();
map.put("info", 0L);
return map;
}

@Override
public Map<String, Long> getMixedRowReadsCount() {
Map<String, Long> map = new HashMap<String, Long>();
map.put("info", 0L);
return map;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,14 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Predicate;
import java.util.function.ToLongFunction;
import java.util.stream.Collectors;
import java.util.stream.LongStream;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
Expand Down Expand Up @@ -94,6 +96,8 @@
import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
import org.apache.hadoop.hbase.security.EncryptionUtil;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.ClassSize;
Expand All @@ -114,9 +118,6 @@
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;

/**
* A Store holds a column family in a Region. Its a memstore and a set of zero
* or more StoreFiles, which stretch backwards over time.
Expand Down Expand Up @@ -162,6 +163,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
volatile boolean forceMajor = false;
private AtomicLong storeSize = new AtomicLong();
private AtomicLong totalUncompressedBytes = new AtomicLong();
private LongAdder memstoreOnlyRowReadsCount = new LongAdder();
// rows that has cells from both memstore and files (or only files)
private LongAdder mixedRowReadsCount = new LongAdder();

private boolean cacheOnWriteLogged;

Expand Down Expand Up @@ -331,7 +335,8 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family,
confPrintThreshold = 10;
}
this.parallelPutCountPrintThreshold = confPrintThreshold;
LOG.info("{} created, memstore type={}, storagePolicy={}, verifyBulkLoads={}, "

LOG.info("Store={}, memstore type={}, storagePolicy={}, verifyBulkLoads={}, "
+ "parallelPutCountPrintThreshold={}, encoding={}, compression={}",
this, memstore.getClass().getSimpleName(), policyName, verifyBulkLoads,
parallelPutCountPrintThreshold, family.getDataBlockEncoding(),
Expand Down Expand Up @@ -2546,7 +2551,7 @@ public CacheConfig getCacheConfig() {
}

public static final long FIXED_OVERHEAD =
ClassSize.align(ClassSize.OBJECT + (27 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG)
ClassSize.align(ClassSize.OBJECT + (29 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG)
+ (6 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN));

public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD
Expand Down Expand Up @@ -2886,8 +2891,7 @@ public int getStoreRefCount() {
}

/**
* @return get maximum ref count of storeFile among all compacted HStore Files
* for the HStore
* @return get maximum ref count of storeFile among all compacted HStore Files for the HStore
*/
public int getMaxCompactedStoreFileRefCount() {
OptionalInt maxCompactedStoreFileRefCount = this.storeEngine.getStoreFileManager()
Expand All @@ -2901,4 +2905,21 @@ public int getMaxCompactedStoreFileRefCount() {
? maxCompactedStoreFileRefCount.getAsInt() : 0;
}

@Override
public long getMemstoreOnlyRowReadsCount() {
return memstoreOnlyRowReadsCount.sum();
}

@Override
public long getMixedRowReadsCount() {
return mixedRowReadsCount.sum();
}

void updateMetricsStore(boolean memstoreRead) {
if (memstoreRead) {
memstoreOnlyRowReadsCount.increment();
} else {
mixedRowReadsCount.increment();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,10 @@ public Cell peek() {
return this.current.peek();
}

boolean isLatestCellFromMemstore() {
return !this.current.isFileScanner();
}

@Override
public Cell next() throws IOException {
if(this.current == null) {
Expand Down
Loading

0 comments on commit 510aad3

Please sign in to comment.