Skip to content
Permalink
Browse files
HBASE-24482 [hbase-operator-tools] build of hbck2 fails with HBase br…
…anch-2.3, due to missing dependencies

Make it so we compile and tests pass against hbase-2.3.x as
well as 2.1.x.

Remove use of @nullable -- nice-to-have but dodgy provenance.

Use CommonFSUtils instead of FSUtils as latter no longer subclasses
CommonFSUtils whose utility we make use of in our import of hbck1
functionality (TODO: Undo this reliance on IA.Private)

hbase-hbck2/src/main/java/org/apache/hbase/FileSystemFsck.java
hbase-hbck2/src/main/java/org/apache/hbase/FsRegionsMetaRecoverer.java
hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java
hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsckRepair.java
hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HFileCorruptionChecker.java
hbase-hbck2/src/main/java/org/apache/hbase/hbck1/OfflineMetaRepair.java
hbase-hbck2/src/test/java/org/apache/hbase/TestHBCK2.java
 s/CommonFSUtils/FSUtils/g

hbase-hbck2/src/main/java/org/apache/hbase/HBCKMetaTableAccessor.java
 Purge @nullable usage.

hbase-hbck2/src/test/java/org/apache/hbase/TestSchedulingRecoveries.java
 Remove test that behaves differently between 2.1.x and 2.3.x hbase.

hbase-tools/src/test/java/org/apache/hbase/TestRegionsMerger.java
 Amend test where merging works differently between hbase-2.3.x and
 hbase-2.1.x (2.3 does more protective checks).
  • Loading branch information
saintstack committed Jun 17, 2020
1 parent 88adebf commit 8d716173b6f713d3b375f9eef18189cf2f1a3745
Showing 10 changed files with 51 additions and 84 deletions.
@@ -26,6 +26,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hbase.hbck1.HBaseFsck;
import org.apache.hbase.hbck1.HFileCorruptionChecker;
@@ -46,7 +47,7 @@ public class FileSystemFsck implements Closeable {

FileSystemFsck(Configuration conf) throws IOException {
this.configuration = conf;
this.rootDir = FSUtils.getRootDir(this.configuration);
this.rootDir = CommonFSUtils.getRootDir(this.configuration);
this.fs = rootDir.getFileSystem(this.configuration);

}
@@ -82,7 +83,7 @@ int fsck(String[] args) throws IOException {
Collection<String> tables = commandLine.getArgList();
Collection<Path> tableDirs = tables.isEmpty()?
FSUtils.getTableDirs(this.fs, this.rootDir):
tables.stream().map(t -> FSUtils.getTableDir(this.rootDir, TableName.valueOf(t))).
tables.stream().map(t -> CommonFSUtils.getTableDir(this.rootDir, TableName.valueOf(t))).
collect(Collectors.toList());
hfcc.checkTables(tableDirs);
hfcc.report(hbaseFsck.getErrors());
@@ -77,7 +77,7 @@ public FsRegionsMetaRecoverer(Configuration configuration) throws IOException {

private List<Path> getTableRegionsDirs(String table) throws IOException {
String hbaseRoot = this.config.get(HConstants.HBASE_DIR);
Path tableDir = FSUtils.getTableDir(new Path(hbaseRoot), TableName.valueOf(table));
Path tableDir = CommonFSUtils.getTableDir(new Path(hbaseRoot), TableName.valueOf(table));
return FSUtils.getRegionDirs(fs, tableDir);
}

@@ -22,8 +22,6 @@
import static org.apache.hadoop.hbase.HConstants.TABLE_FAMILY;
import static org.apache.hadoop.hbase.HConstants.TABLE_STATE_QUALIFIER;

import edu.umd.cs.findbugs.annotations.Nullable;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -65,6 +63,7 @@
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;

import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -391,7 +390,6 @@ public static int getRegionCount(final Connection connection, final TableName ta
* (Copied from MetaTableAccessor)
* @return null if not found
*/
@Nullable
public static TableState getTableState(Result r) throws IOException {
Cell cell = r.getColumnLatestCell(TABLE_FAMILY, TABLE_STATE_QUALIFIER);
if (cell == null) {
@@ -411,7 +409,6 @@ public static TableState getTableState(Result r) throws IOException {
* @param conn connection to use
* @param tableName table to fetch state for
*/
@Nullable
public static TableState getTableState(Connection conn, TableName tableName)
throws IOException {
if (tableName.equals(TableName.META_TABLE_NAME)) {
@@ -489,7 +486,6 @@ public static void updateTableState(Connection conn, TableName tableName,
* @param qualifier Column family qualifier
* @return An RegionInfo instance or null.
*/
@Nullable
public static RegionInfo getRegionInfo(final Result r, byte [] qualifier) {
Cell cell = r.getColumnLatestCell(CATALOG_FAMILY, qualifier);
if (cell == null) {
@@ -538,7 +534,6 @@ private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
* @param r Result to pull from
* @return A ServerName instance or null if necessary fields not found or empty.
*/
@Nullable
@InterfaceAudience.Private // for use by HMaster#getTableRegionRow which is used for testing only
public static ServerName getServerName(final Result r, final int replicaId) {
byte[] serverColumn = getServerColumn(replicaId);
@@ -568,7 +563,6 @@ public static ServerName getServerName(final Result r, final int replicaId) {
* @return an HRegionLocationList containing all locations for the region range or null if
* we can't deserialize the result.
*/
@Nullable
public static RegionLocations getRegionLocations(final Result r) {
if (r == null) {
return null;
@@ -126,19 +126,8 @@
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.*;
import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.KeyRange;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.RegionSplitCalculator;
import org.apache.hadoop.hbase.util.RetryCounter;
import org.apache.hadoop.hbase.util.RetryCounterFactory;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
@@ -432,7 +421,7 @@ private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration
*/
@VisibleForTesting
public static Path getTmpDir(Configuration conf) throws IOException {
return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);
return new Path(CommonFSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);
}

/**
@@ -464,8 +453,8 @@ Path getHbckLockPath() {
@Override
public FSDataOutputStream call() throws IOException {
try {
FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);
FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,
FileSystem fs = CommonFSUtils.getCurrentFileSystem(this.conf);
FsPermission defaultPerms = CommonFSUtils.getFilePermissions(fs, this.conf,
HConstants.DATA_FILE_UMASK_KEY);
Path tmpDir = getTmpDir(conf);
this.hbckLockPath = new Path(tmpDir, this.lockFileName);
@@ -493,7 +482,7 @@ private FSDataOutputStream createFileWithRetries(final FileSystem fs,
IOException exception = null;
do {
try {
return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);
return CommonFSUtils.create(fs, hbckLockFilePath, defaultPerms, false);
} catch (IOException ioe) {
LOG.info("Failed to create lock file " + hbckLockFilePath.getName()
+ ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "
@@ -558,7 +547,7 @@ private void unlockHbck() {
do {
try {
IOUtils.closeQuietly(hbckOutFd);
FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), hbckLockPath, true);
CommonFSUtils.delete(CommonFSUtils.getCurrentFileSystem(getConf()), hbckLockPath, true);
return;
} catch (IOException ioe) {
LOG.info("Failed to delete " + hbckLockPath + ", try="
@@ -943,9 +932,9 @@ public void checkRegionBoundaries() {
List<RegionInfo> regions = HBCKMetaTableAccessor.getAllRegions(connection);
final RegionBoundariesInformation currentRegionBoundariesInformation =
new RegionBoundariesInformation();
Path hbaseRoot = FSUtils.getRootDir(getConf());
Path hbaseRoot = CommonFSUtils.getRootDir(getConf());
for (RegionInfo regionInfo : regions) {
Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
Path tableDir = CommonFSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
// For each region, get the start and stop key from the META and compare them to the
// same information from the Stores.
@@ -1072,7 +1061,6 @@ private void adoptHdfsOrphan(HbckInfo hi) throws IOException {
try {
hf = HFile.createReader(fs, hfile.getPath(), CacheConfig.DISABLED,
true, getConf());
hf.loadFileInfo();
Optional<Cell> startKv = hf.getFirstKey();
start = CellUtil.cloneRow(startKv.get());
Optional<Cell> endKv = hf.getLastKey();
@@ -1187,7 +1175,7 @@ private int restoreHdfsIntegrity() throws IOException, InterruptedException {
private void offlineReferenceFileRepair() throws IOException, InterruptedException {
clearState();
Configuration conf = getConf();
Path hbaseRoot = FSUtils.getRootDir(conf);
Path hbaseRoot = CommonFSUtils.getRootDir(conf);
FileSystem fs = hbaseRoot.getFileSystem(conf);
Map<String, Path> allFiles =
getTableStoreFilePathMap(fs, hbaseRoot, new FSUtils.ReferenceFileFilter(fs), executor);
@@ -1265,7 +1253,7 @@ private static Map<String, Path> getTableStoreFilePathMap(final FileSystem fs,

// only include the directory paths to tables
for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
getTableStoreFilePathMap(map, fs, hbaseRootDir, FSUtils.getTableName(tableDir),
getTableStoreFilePathMap(map, fs, hbaseRootDir, CommonFSUtils.getTableName(tableDir),
sfFilter, executor);
}
return map;
@@ -1306,7 +1294,7 @@ private static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> resu
resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap;

// only include the directory paths to tables
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableName);
// Inside a table, there are compaction.dir directories to skip. Otherwise, all else
// should be regions.
final FSUtils.FamilyDirFilter familyFilter = new FSUtils.FamilyDirFilter(fs);
@@ -1412,7 +1400,7 @@ public void run() {
*/
private void offlineHLinkFileRepair() throws IOException, InterruptedException {
Configuration conf = getConf();
Path hbaseRoot = FSUtils.getRootDir(conf);
Path hbaseRoot = CommonFSUtils.getRootDir(conf);
FileSystem fs = hbaseRoot.getFileSystem(conf);
Map<String, Path> allFiles = getTableStoreFilePathMap(fs, hbaseRoot,
new FSUtils.HFileLinkFilter(), executor);
@@ -1586,7 +1574,7 @@ private SortedMap<TableName, TableInfo> loadHdfsRegionInfos()
}
}

Path hbaseRoot = FSUtils.getRootDir(getConf());
Path hbaseRoot = CommonFSUtils.getRootDir(getConf());
FileSystem fs = hbaseRoot.getFileSystem(getConf());
// serialized table info gathering.
for (HbckInfo hbi: hbckInfos) {
@@ -1927,7 +1915,7 @@ public boolean rebuildMeta() throws IOException, InterruptedException {
HBaseTestingUtility.closeRegionAndWAL(meta);
// Clean out the WAL we created and used here.
LOG.info("Deleting {}, result={}", waldir,
FSUtils.delete(FileSystem.get(getConf()), waldir, true));
CommonFSUtils.delete(FileSystem.get(getConf()), waldir, true));
}
LOG.info("Success! hbase:meta table rebuilt. Old hbase:meta moved into " + backupDir);
return true;
@@ -1939,7 +1927,7 @@ public boolean rebuildMeta() throws IOException, InterruptedException {
* @return an open hbase:meta HRegion
*/
private HRegion createNewMeta() throws IOException {
Path rootdir = FSUtils.getRootDir(getConf());
Path rootdir = CommonFSUtils.getRootDir(getConf());
RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
TableDescriptor td = new FSTableDescriptors(getConf()).get(TableName.META_TABLE_NAME);
return HBaseTestingUtility.createRegionAndWAL(ri, rootdir, getConf(), td);
@@ -1979,7 +1967,7 @@ private SortedMap<TableName, TableInfo> checkHdfsIntegrity(boolean fixHoles,

private Path getSidelineDir() throws IOException {
if (sidelineDir == null) {
Path hbaseDir = FSUtils.getRootDir(getConf());
Path hbaseDir = CommonFSUtils.getRootDir(getConf());
Path hbckDir = new Path(hbaseDir, HConstants.HBCK_SIDELINEDIR_NAME);
sidelineDir = new Path(hbckDir, hbaseDir.getName() + "-"
+ startMillis);
@@ -2016,7 +2004,7 @@ Path sidelineRegionDir(FileSystem fs,
if (parentDir != null) {
rootDir = new Path(rootDir, parentDir);
}
Path sidelineTableDir= FSUtils.getTableDir(rootDir, tableName);
Path sidelineTableDir= CommonFSUtils.getTableDir(rootDir, tableName);
Path sidelineRegionDir = new Path(sidelineTableDir, regionDir.getName());
fs.mkdirs(sidelineRegionDir);
boolean success = false;
@@ -2077,9 +2065,9 @@ Path sidelineRegionDir(FileSystem fs,
*/
void sidelineTable(FileSystem fs, TableName tableName, Path hbaseDir,
Path backupHbaseDir) throws IOException {
Path tableDir = FSUtils.getTableDir(hbaseDir, tableName);
Path tableDir = CommonFSUtils.getTableDir(hbaseDir, tableName);
if (fs.exists(tableDir)) {
Path backupTableDir= FSUtils.getTableDir(backupHbaseDir, tableName);
Path backupTableDir= CommonFSUtils.getTableDir(backupHbaseDir, tableName);
fs.mkdirs(backupTableDir.getParent());
boolean success = fs.rename(tableDir, backupTableDir);
if (!success) {
@@ -2096,7 +2084,7 @@ void sidelineTable(FileSystem fs, TableName tableName, Path hbaseDir,
*/
Path sidelineOldMeta() throws IOException {
// put current hbase:meta aside.
Path hbaseDir = FSUtils.getRootDir(getConf());
Path hbaseDir = CommonFSUtils.getRootDir(getConf());
FileSystem fs = hbaseDir.getFileSystem(getConf());
Path backupDir = getSidelineDir();
fs.mkdirs(backupDir);
@@ -2157,15 +2145,15 @@ public static void versionFileCreate(Configuration configuration, FileSystem fs,
* regionInfoMap
*/
public void loadHdfsRegionDirs() throws IOException, InterruptedException {
Path rootDir = FSUtils.getRootDir(getConf());
Path rootDir = CommonFSUtils.getRootDir(getConf());
FileSystem fs = rootDir.getFileSystem(getConf());

// List all tables from HDFS
List<FileStatus> tableDirs = Lists.newArrayList();

List<Path> paths = FSUtils.getTableDirs(fs, rootDir);
for (Path path : paths) {
TableName tableName = FSUtils.getTableName(path);
TableName tableName = CommonFSUtils.getTableName(path);
if ((!checkMetaOnly && isTableIncluded(tableName)) ||
tableName.equals(TableName.META_TABLE_NAME)) {
tableDirs.add(fs.getFileStatus(path));
@@ -2455,7 +2443,7 @@ private void preCheckPermission() throws IOException {
return;
}

Path hbaseDir = FSUtils.getRootDir(getConf());
Path hbaseDir = CommonFSUtils.getRootDir(getConf());
FileSystem fs = hbaseDir.getFileSystem(getConf());
UserProvider userProvider = UserProvider.instantiate(getConf());
UserGroupInformation ugi = userProvider.getCurrent().getUGI();
@@ -4270,7 +4258,7 @@ public TableName getTableName() {
// we are only guaranteed to have a path and not an HRI for hdfsEntry,
// so we get the name from the Path
Path tableDir = this.hdfsEntry.hdfsRegionDir.getParent();
return FSUtils.getTableName(tableDir);
return CommonFSUtils.getTableName(tableDir);
} else {
// return the info from the first online/deployed hri
for (OnlineEntry e : deployedEntries) {
@@ -5435,13 +5423,13 @@ public HBaseFsck exec(ExecutorService exec, String[] args)
setHFileCorruptionChecker(hfcc); // so we can get result
Collection<TableName> tables = getIncludedTables();
Collection<Path> tableDirs = new ArrayList<>();
Path rootdir = FSUtils.getRootDir(getConf());
Path rootdir = CommonFSUtils.getRootDir(getConf());
if (tables.size() > 0) {
for (TableName t : tables) {
tableDirs.add(FSUtils.getTableDir(rootdir, t));
tableDirs.add(CommonFSUtils.getTableDir(rootdir, t));
}
} else {
tableDirs = FSUtils.getTableDirs(FSUtils.getCurrentFileSystem(getConf()), rootdir);
tableDirs = FSUtils.getTableDirs(CommonFSUtils.getCurrentFileSystem(getConf()), rootdir);
}
hfcc.checkTables(tableDirs);
hfcc.report(errors);
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hbase.HBCKMetaTableAccessor;
import org.apache.zookeeper.KeeperException;
@@ -187,7 +188,7 @@ public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf,
public static HRegion createHDFSRegionDir(Configuration conf,
RegionInfo hri, TableDescriptor htd) throws IOException {
// Create HRegion
Path root = FSUtils.getRootDir(conf);
Path root = CommonFSUtils.getRootDir(conf);
HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);

// Close the new region to flush to disk. Close log file too.
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.io.hfile.CorruptHFileException;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.FSUtils.FamilyDirFilter;
import org.apache.hadoop.hbase.util.FSUtils.HFileFilter;
@@ -144,7 +145,7 @@ Path createQuarantinePath(Path hFile) throws IOException {
Path tableDir = regionDir.getParent();

// build up the corrupted dirs structure
Path corruptBaseDir = new Path(FSUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME);
Path corruptBaseDir = new Path(CommonFSUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME);
if (conf.get("hbase.hfile.quarantine.dir") != null) {
LOG.warn("hbase.hfile.quarantine.dir is deprecated. Default to " + corruptBaseDir);
}
@@ -418,7 +419,7 @@ public Void call() throws IOException {
* @return An instance of MobRegionDirChecker.
*/
private MobRegionDirChecker createMobRegionDirChecker(Path tableDir) {
TableName tableName = FSUtils.getTableName(tableDir);
TableName tableName = CommonFSUtils.getTableName(tableDir);
Path mobDir = MobUtils.getMobRegionPath(conf, tableName);
return new MobRegionDirChecker(mobDir);
}
@@ -22,6 +22,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.MultipleIOException;

@@ -70,7 +71,7 @@ public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf));
HBaseFsck fsck = new HBaseFsck(conf);

// Process command-line args.
@@ -85,8 +86,8 @@ public static void main(String[] args) throws Exception {
}
// update hbase root dir to user-specified base
i++;
FSUtils.setRootDir(conf, new Path(args[i]));
FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
CommonFSUtils.setRootDir(conf, new Path(args[i]));
CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf));
} else if (cmd.equals("-sidelineDir")) {
if (i == args.length - 1) {
System.err.println("OfflineMetaRepair: -sidelineDir needs an HDFS path.");

0 comments on commit 8d71617

Please sign in to comment.