Skip to content
Permalink
Browse files
HBASE-22386 HBOSS: Limit depth that listing locks check for other locks
Signed-off-by: Josh Elser <elserj@apache.org>
  • Loading branch information
mackrorysd authored and joshelser committed May 14, 2019
1 parent 1bf5e84 commit 074c852bd590eec1646ee24ff39fcbc9ce4167ea
Showing 6 changed files with 233 additions and 67 deletions.
@@ -53,6 +53,7 @@
import org.apache.hadoop.hbase.oss.sync.AutoLock.LockedFSDataOutputStream;
import org.apache.hadoop.hbase.oss.sync.AutoLock.LockedRemoteIterator;
import org.apache.hadoop.hbase.oss.sync.TreeLockManager;
import org.apache.hadoop.hbase.oss.sync.TreeLockManager.Depth;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Progressable;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
@@ -533,7 +534,7 @@ public ContentSummary getContentSummary(Path f) throws IOException {

public FileStatus[] listStatus(Path f) throws FileNotFoundException,
IOException {
try (AutoLock l = sync.lockListing(f)) {
try (AutoLock l = sync.lockListing(f, Depth.DIRECTORY)) {
return fs.listStatus(f);
}
}
@@ -547,21 +548,21 @@ public RemoteIterator<Path> listCorruptFileBlocks(Path path)

public FileStatus[] listStatus(Path f, PathFilter filter)
throws FileNotFoundException, IOException {
try (AutoLock l = sync.lockListing(f)) {
try (AutoLock l = sync.lockListing(f, Depth.DIRECTORY)) {
return fs.listStatus(f, filter);
}
}

public FileStatus[] listStatus(Path[] files)
throws FileNotFoundException, IOException {
try (AutoLock l = sync.lockListings(files)) {
try (AutoLock l = sync.lockListings(files, Depth.DIRECTORY)) {
return fs.listStatus(files);
}
}

public FileStatus[] listStatus(Path[] files, PathFilter filter)
throws FileNotFoundException, IOException {
try (AutoLock l = sync.lockListings(files)) {
try (AutoLock l = sync.lockListings(files, Depth.DIRECTORY)) {
return fs.listStatus(files, filter);
}
}
@@ -579,7 +580,7 @@ public FileStatus[] globStatus(Path pathPattern, PathFilter filter)

public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
throws FileNotFoundException, IOException {
AutoLock lock = sync.lockListing(f);
AutoLock lock = sync.lockListing(f, Depth.DIRECTORY);
try {
RemoteIterator<LocatedFileStatus> iterator = fs.listLocatedStatus(f);
return new LockedRemoteIterator<LocatedFileStatus>(iterator, lock);
@@ -591,7 +592,7 @@ public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)

public RemoteIterator<FileStatus> listStatusIterator(final Path p)
throws FileNotFoundException, IOException {
AutoLock lock = sync.lockListing(p);
AutoLock lock = sync.lockListing(p, Depth.DIRECTORY);
try {
RemoteIterator<FileStatus> iterator = fs.listStatusIterator(p);
return new LockedRemoteIterator<FileStatus>(iterator, lock);
@@ -604,7 +605,8 @@ public RemoteIterator<FileStatus> listStatusIterator(final Path p)
public RemoteIterator<LocatedFileStatus> listFiles(
final Path f, final boolean recursive)
throws FileNotFoundException, IOException {
AutoLock lock = sync.lockListing(f);
Depth depth = recursive ? Depth.RECURSIVE : Depth.DIRECTORY;
AutoLock lock = sync.lockListing(f, depth);
try {
RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(f, recursive);
return new LockedRemoteIterator<LocatedFileStatus>(iterator, lock);
@@ -847,7 +849,7 @@ public void setTimes(Path p, long mtime, long atime

public Path createSnapshot(Path path, String snapshotName)
throws IOException {
try (AutoLock l = sync.lockListing(path)) {
try (AutoLock l = sync.lockListing(path, Depth.RECURSIVE)) {
return fs.createSnapshot(path, snapshotName);
}
}
@@ -30,6 +30,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.oss.Constants;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger;
@@ -46,6 +47,10 @@ public abstract class TreeLockManager {
private static final Logger LOG =
LoggerFactory.getLogger(TreeLockManager.class);

public static enum Depth {
DIRECTORY, RECURSIVE
}

public static synchronized TreeLockManager get(FileSystem fs)
throws IOException {
Configuration conf = fs.getConf();
@@ -78,7 +83,7 @@ public static synchronized TreeLockManager get(FileSystem fs)
* and filesystems, and assumes the URI scheme mapping is consistent
* everywhere.
*/
private Path norm(Path path) {
public Path norm(Path path) {
URI uri = fs.makeQualified(path).toUri();
String uriScheme = uri.getScheme();
String uriHost = uri.getHost();
@@ -96,6 +101,7 @@ private Path norm(Path path) {
* Convenience function for calling norm on an array. Returned copy of the
* array will also be sorted for deadlock avoidance.
*/
@VisibleForTesting
private Path[] norm(Path[] paths) {
Path[] newPaths = new Path[paths.length];
for (int i = 0; i < paths.length; i++) {
@@ -177,7 +183,8 @@ public void close() throws IOException {
* @param p Path to check
* @return True if a lock is found, false otherwise
*/
protected abstract boolean writeLockBelow(Path p) throws IOException;
@VisibleForTesting
public abstract boolean writeLockBelow(Path p, Depth depth) throws IOException;

/**
* Checks for the presence of a write lock on all child directories of the
@@ -186,7 +193,8 @@ public void close() throws IOException {
* @param p Path to check
* @return True if a lock is found, false otherwise
*/
protected abstract boolean readLockBelow(Path p) throws IOException;
@VisibleForTesting
public abstract boolean readLockBelow(Path p, Depth depth) throws IOException;

/**
* Recursively cleans up locks that won't be used again.
@@ -224,13 +232,13 @@ private boolean retryBackoff(int retries) throws IOException {
*
* @param path Path to lock
*/
protected void treeWriteLock(Path p) throws IOException {
protected void treeWriteLock(Path p, Depth depth) throws IOException {
int outerRetries = 0;
do {
int innerRetries = 0;
do {
// If there's already a write-lock above or below us in the tree, wait for it to leave
if (writeLockAbove(p) || writeLockBelow(p)) {
if (writeLockAbove(p) || writeLockBelow(p, depth)) {
LOG.warn("Blocked on some parent write lock, waiting: {}", p);
continue;
}
@@ -239,7 +247,7 @@ protected void treeWriteLock(Path p) throws IOException {
// Try obtain the write lock just for our node
writeLock(p);
// If there's now a write-lock above or below us in the tree, release and retry
if (writeLockAbove(p) || writeLockBelow(p)) {
if (writeLockAbove(p) || writeLockBelow(p, depth)) {
LOG.warn("Blocked on some other write lock, retrying: {}", p);
writeUnlock(p);
continue;
@@ -250,7 +258,7 @@ protected void treeWriteLock(Path p) throws IOException {
// Once we know we're the only write-lock in our path, drain all read-locks below
int drainReadLocksRetries = 0;
do {
if (readLockBelow(p)) {
if (readLockBelow(p, depth)) {
LOG.warn("Blocked on some child read lock, writing: {}", p);
continue;
}
@@ -303,7 +311,9 @@ protected void treeReadLock(Path p) throws IOException {
public AutoLock lockWrite(Path rawPath) throws IOException {
Path path = norm(rawPath);
LOG.debug("About to lock for create / write: {}", rawPath);
treeWriteLock(path);
// Depth should not matter here, as this is only called on files, not
// directories.
treeWriteLock(path, Depth.RECURSIVE);
return new AutoLock() {
public void close() throws IOException {
LOG.debug("About to unlock after create / write: {}", path);
@@ -323,7 +333,7 @@ public void close() throws IOException {
public AutoLock lockDelete(Path rawPath) throws IOException {
Path path = norm(rawPath);
LOG.debug("About to lock for delete: {}", path);
treeWriteLock(path);
treeWriteLock(path, Depth.RECURSIVE);
return new AutoLock() {
public void close() throws IOException {
LOG.debug("About to recursively delete locks: {}", path);
@@ -343,10 +353,10 @@ public void close() throws IOException {
* @param path Root of the listing operation
* @return AutoCloseable to release this path
*/
public AutoLock lockListing(Path rawPath) throws IOException {
public AutoLock lockListing(Path rawPath, Depth depth) throws IOException {
Path path = norm(rawPath);
LOG.debug("About to lock for listing: {}", path);
treeWriteLock(path);
treeWriteLock(path, depth);
return new AutoLock() {
public void close() throws IOException {
LOG.debug("About to unlock after listing: {}", path);
@@ -362,11 +372,11 @@ public void close() throws IOException {
* @param paths
* @return AutoCloseable that encapsulate all paths
*/
public AutoLock lockListings(Path[] rawPaths) throws IOException {
public AutoLock lockListings(Path[] rawPaths, Depth depth) throws IOException {
Path[] paths = norm(rawPaths);
for (int i = 0; i < paths.length; i++) {
LOG.debug("About to lock for listings: {}", paths[i]);
treeWriteLock(paths[i]);
treeWriteLock(paths[i], depth);
}
return new AutoLock() {
public void close() throws IOException {
@@ -406,11 +416,11 @@ public AutoLock lockRename(Path rawSrc, Path rawDst) throws IOException {
Path dst = norm(rawDst);
LOG.debug("About to lock for rename: from {} to {}", src, dst);
if (src.compareTo(dst) < 0) {
treeWriteLock(src);
treeWriteLock(dst);
treeWriteLock(src, Depth.RECURSIVE);
treeWriteLock(dst, Depth.RECURSIVE);
} else {
treeWriteLock(dst);
treeWriteLock(src);
treeWriteLock(dst, Depth.RECURSIVE);
treeWriteLock(src, Depth.RECURSIVE);
}
return new AutoLock() {
public void close() throws IOException {
@@ -39,6 +39,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.oss.Constants;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.zookeeper.KeeperException;
@@ -167,14 +168,18 @@ protected boolean writeLockAbove(Path p) throws IOException {
}

@Override
protected boolean writeLockBelow(Path p) throws IOException {
boolean b = writeLockBelow(p, true);
@VisibleForTesting
public boolean writeLockBelow(Path p, Depth depth) throws IOException {
int maxLevel = (depth == Depth.DIRECTORY) ? 1 : Integer.MAX_VALUE;
boolean b = writeLockBelow(p, 0, maxLevel);
return b;
}

@Override
protected boolean readLockBelow(Path p) throws IOException {
boolean b = readLockBelow(p, true);
@VisibleForTesting
public boolean readLockBelow(Path p, Depth depth) throws IOException {
int maxLevel = (depth == Depth.DIRECTORY) ? 1 : Integer.MAX_VALUE;
boolean b = readLockBelow(p, 0, maxLevel);
return b;
}

@@ -214,19 +219,21 @@ private boolean isBeneath(Path parent, Path other) {
return 0 == other.toString().indexOf(parent.toString());
}

private boolean writeLockBelow(Path p, boolean firstLevel) throws IOException {
private boolean writeLockBelow(Path p, int level, int maxLevel) throws IOException {
try {
if (!firstLevel && isLocked(get(p).writeLock())) {
if (level > 0 && isLocked(get(p).writeLock())) {
return true;
}
List<String> children = curator.getChildren().forPath(p.toString());
for (String child : children) {
if (child.equals(lockSubZnode)) {
continue;
}
if (writeLockBelow(new Path(p, child), false)) {
LOG.warn("Parent write lock currently held: {}", p);
return true;
if (level < maxLevel) {
List<String> children = curator.getChildren().forPath(p.toString());
for (String child : children) {
if (child.equals(lockSubZnode)) {
continue;
}
if (writeLockBelow(new Path(p, child), level+1, maxLevel)) {
LOG.warn("Parent write lock currently held: {}", p);
return true;
}
}
}
} catch (KeeperException.NoNodeException e) {
@@ -237,19 +244,21 @@ private boolean writeLockBelow(Path p, boolean firstLevel) throws IOException {
return false;
}

private boolean readLockBelow(Path p, boolean firstLevel) throws IOException {
private boolean readLockBelow(Path p, int level, int maxLevel) throws IOException {
try {
if (!firstLevel && isLocked(get(p).readLock())) {
if (level > 0 && isLocked(get(p).readLock())) {
return true;
}
List<String> children = curator.getChildren().forPath(p.toString());
for (String child : children) {
if (child.equals(lockSubZnode)) {
continue;
}
if (readLockBelow(new Path(p, child), false)) {
LOG.warn("Child read lock currently held: {}", p);
return true;
if (level < maxLevel) {
List<String> children = curator.getChildren().forPath(p.toString());
for (String child : children) {
if (child.equals(lockSubZnode)) {
continue;
}
if (readLockBelow(new Path(p, child), level+1, maxLevel)) {
LOG.warn("Child read lock currently held: {}", p);
return true;
}
}
}
} catch (KeeperException.NoNodeException e) {

0 comments on commit 074c852

Please sign in to comment.