Skip to content

Commit

Permalink
HBASE-12891 Parallel execution for Hbck checkRegionConsistency
Browse files Browse the repository at this point in the history
Signed-off-by: Enis Soztutar <enis@apache.org>
  • Loading branch information
Dave Latham authored and enis committed Apr 2, 2015
1 parent 1e6ea5a commit 61f4ce6
Showing 1 changed file with 77 additions and 13 deletions.
Expand Up @@ -56,8 +56,10 @@

import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
import com.google.common.collect.Ordering;
import com.google.common.collect.TreeMultimap;
import com.google.protobuf.ServiceException;

Expand Down Expand Up @@ -1767,27 +1769,80 @@ private void checkAndFixConsistency()
throws IOException, KeeperException, InterruptedException {
// Divide the checks in two phases. One for default/primary replicas and another
// for the non-primary ones. Keeps code cleaner this way.

List<CheckRegionConsistencyWorkItem> workItems =
new ArrayList<CheckRegionConsistencyWorkItem>(regionInfoMap.size());
for (java.util.Map.Entry<String, HbckInfo> e: regionInfoMap.entrySet()) {
if (e.getValue().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
checkRegionConsistency(e.getKey(), e.getValue());
workItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue()));
}
}
checkRegionConsistencyConcurrently(workItems);

boolean prevHdfsCheck = shouldCheckHdfs();
setCheckHdfs(false); //replicas don't have any hdfs data
// Run a pass over the replicas and fix any assignment issues that exist on the currently
// deployed/undeployed replicas.
List<CheckRegionConsistencyWorkItem> replicaWorkItems =
new ArrayList<CheckRegionConsistencyWorkItem>(regionInfoMap.size());
for (java.util.Map.Entry<String, HbckInfo> e: regionInfoMap.entrySet()) {
if (e.getValue().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
checkRegionConsistency(e.getKey(), e.getValue());
replicaWorkItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue()));
}
}
checkRegionConsistencyConcurrently(replicaWorkItems);
setCheckHdfs(prevHdfsCheck);

if (shouldCheckHdfs()) {
checkAndFixTableStates();
}
}

/**
* Check consistency of all regions using mulitple threads concurrently.
*/
private void checkRegionConsistencyConcurrently(
final List<CheckRegionConsistencyWorkItem> workItems)
throws IOException, KeeperException, InterruptedException {
if (workItems.isEmpty()) {
return; // nothing to check
}

List<Future<Void>> workFutures = executor.invokeAll(workItems);
for(Future<Void> f: workFutures) {
try {
f.get();
} catch(ExecutionException e1) {
LOG.warn("Could not check region consistency " , e1.getCause());
if (e1.getCause() instanceof IOException) {
throw (IOException)e1.getCause();
} else if (e1.getCause() instanceof KeeperException) {
throw (KeeperException)e1.getCause();
} else if (e1.getCause() instanceof InterruptedException) {
throw (InterruptedException)e1.getCause();
} else {
throw new IOException(e1.getCause());
}
}
}
}

class CheckRegionConsistencyWorkItem implements Callable<Void> {
private final String key;
private final HbckInfo hbi;

CheckRegionConsistencyWorkItem(String key, HbckInfo hbi) {
this.key = key;
this.hbi = hbi;
}

@Override
public synchronized Void call() throws Exception {
checkRegionConsistency(key, hbi);
return null;
}
}

/**
* Check and fix table states, assumes full info available:
* - tableInfos
Expand Down Expand Up @@ -2128,16 +2183,8 @@ else if (!inMeta && !inHdfs && !isDeployed) {

HRegionInfo hri = hbi.getHdfsHRI();
TableInfo tableInfo = tablesInfo.get(hri.getTable());
if (tableInfo.regionsFromMeta.isEmpty()) {
for (HbckInfo h : regionInfoMap.values()) {
if (hri.getTable().equals(h.getTableName())) {
if (h.metaEntry != null) tableInfo.regionsFromMeta
.add((HRegionInfo) h.metaEntry);
}
}
Collections.sort(tableInfo.regionsFromMeta);
}
for (HRegionInfo region : tableInfo.regionsFromMeta) {

for (HRegionInfo region : tableInfo.getRegionsFromMeta()) {
if (Bytes.compareTo(region.getStartKey(), hri.getStartKey()) <= 0
&& (region.getEndKey().length == 0 || Bytes.compareTo(region.getEndKey(),
hri.getEndKey()) >= 0)
Expand Down Expand Up @@ -2493,7 +2540,7 @@ public class TableInfo {
TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, cmp);

// list of regions derived from meta entries.
final List<HRegionInfo> regionsFromMeta = new ArrayList<HRegionInfo>();
private ImmutableList<HRegionInfo> regionsFromMeta = null;

TableInfo(TableName name) {
this.tableName = name;
Expand Down Expand Up @@ -2550,6 +2597,23 @@ public int getNumRegions() {
return sc.getStarts().size() + backwards.size();
}

public synchronized ImmutableList<HRegionInfo> getRegionsFromMeta() {
// lazy loaded, synchronized to ensure a single load
if (regionsFromMeta == null) {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
for (HbckInfo h : HBaseFsck.this.regionInfoMap.values()) {
if (tableName.equals(h.getTableName())) {
if (h.metaEntry != null) {
regions.add((HRegionInfo) h.metaEntry);
}
}
}
regionsFromMeta = Ordering.natural().immutableSortedCopy(regions);
}

return regionsFromMeta;
}

private class IntegrityFixSuggester extends TableIntegrityErrorHandlerImpl {
ErrorReporter errors;

Expand Down

0 comments on commit 61f4ce6

Please sign in to comment.