Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HDFS-15817. Rename snapshots while marking them deleted. #2677

Merged
merged 5 commits into from Feb 9, 2021
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -61,6 +61,10 @@ public static String generateDefaultSnapshotName() {
return new SimpleDateFormat(DEFAULT_SNAPSHOT_NAME_PATTERN).format(new Date());
}

public static String generateDeletedSnapshotName(Snapshot s) {
return getSnapshotName(s) + "#" + s.getId();
}

public static String getSnapshotPath(String snapshottableDir,
String snapshotRelativePath) {
final StringBuilder b = new StringBuilder(snapshottableDir);
Expand Down
Expand Up @@ -66,16 +66,17 @@
import org.apache.hadoop.metrics2.util.MBeans;

import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Manage snapshottable directories and their snapshots.
*
*
* This class includes operations that create, access, modify snapshots and/or
* snapshot-related data. In general, the locking structure of snapshot
* operations is: <br>
*
*
* 1. Lock the {@link FSNamesystem} lock in {@link FSNamesystem} before calling
* into {@link SnapshotManager} methods.<br>
* 2. Lock the {@link FSDirectory} lock for the {@link SnapshotManager} methods
Expand Down Expand Up @@ -133,7 +134,7 @@ public void initThreadLocals() {
private int snapshotCounter = 0;
private final int maxSnapshotLimit;
private final int maxSnapshotFSLimit;

/** All snapshottable directories in the namesystem. */
private final Map<Long, INodeDirectory> snapshottables =
new ConcurrentHashMap<>();
Expand Down Expand Up @@ -258,7 +259,7 @@ public void setSnapshottable(final String path, boolean checkNestedSnapshottable
}
addSnapshottable(d);
}

/** Add the given snapshottable directory to {@link #snapshottables}. */
public void addSnapshottable(INodeDirectory dir) {
Preconditions.checkArgument(dir.isSnapshottable());
Expand All @@ -269,7 +270,7 @@ public void addSnapshottable(INodeDirectory dir) {
private void removeSnapshottable(INodeDirectory s) {
snapshottables.remove(s.getId());
}

/** Remove snapshottable directories from {@link #snapshottables} */
public void removeSnapshottable(List<INodeDirectory> toRemove) {
if (toRemove != null) {
Expand All @@ -281,7 +282,7 @@ public void removeSnapshottable(List<INodeDirectory> toRemove) {

/**
* Set the given snapshottable directory to non-snapshottable.
*
*
* @throws SnapshotException if there are snapshots in the directory.
*/
public void resetSnapshottable(final String path) throws IOException {
Expand Down Expand Up @@ -461,7 +462,7 @@ public String createSnapshot(final LeaseManager leaseManager,
int n = numSnapshots.get();
checkFileSystemSnapshotLimit(n);
srcRoot.addSnapshot(this, snapshotName, leaseManager, mtime);

//create success, update id
snapshotCounter++;
numSnapshots.getAndIncrement();
Expand Down Expand Up @@ -533,6 +534,8 @@ public void deleteSnapshot(final INodesInPath iip, final String snapshotName,
INodesInPath.append(iip, snapshot.getRoot(),
DFSUtil.string2Bytes(snapshotName)), xattrs,
EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
renameSnapshot(iip, srcRoot.getFullPathName(), snapshotName,
Snapshot.generateDeletedSnapshotName(snapshot), Time.now());
return;
}

Expand Down
Expand Up @@ -92,18 +92,25 @@ public void testOrderedSnapshotDeletion() throws Exception {
assertXAttrSet("s2", hdfs, null);
hdfs.deleteSnapshot(snapshottableDir, "s0");
assertXAttrSet("s2", hdfs, null);
hdfs.deleteSnapshot(snapshottableDir, "s1");
hdfs.deleteSnapshot(snapshottableDir, "s2");
hdfs.deleteSnapshot(snapshottableDir,
getDeletedSnapshotName(hdfs, snapshottableDir, "s1"));
hdfs.deleteSnapshot(snapshottableDir,
getDeletedSnapshotName(hdfs, snapshottableDir, "s2"));
}

static void assertMarkedAsDeleted(Path snapshotRoot, MiniDFSCluster cluster)
throws IOException {
static void assertMarkedAsDeleted(Path snapshotRoot, Path snapshottableDir,
MiniDFSCluster cluster) throws IOException {
final String snapName =
getDeletedSnapshotName(cluster.getFileSystem(), snapshottableDir,
snapshotRoot.getName());
final Path snapPathNew =
SnapshotTestHelper.getSnapshotRoot(snapshottableDir, snapName);
// Check if the path exists
Assert.assertNotNull(cluster.getFileSystem().getFileStatus(snapshotRoot));
Assert.assertNotNull(cluster.getFileSystem().getFileStatus(snapPathNew));

// Check xAttr for snapshotRoot
final INode inode = cluster.getNamesystem().getFSDirectory()
.getINode(snapshotRoot.toString());
.getINode(snapPathNew.toString());
final XAttrFeature f = inode.getXAttrFeature();
final XAttr xAttr = f.getXAttr(XATTR_SNAPSHOT_DELETED);
Assert.assertNotNull(xAttr);
Expand All @@ -114,7 +121,7 @@ static void assertMarkedAsDeleted(Path snapshotRoot, MiniDFSCluster cluster)

// Check inode
Assert.assertTrue(inode instanceof Snapshot.Root);
Assert.assertTrue(((Snapshot.Root)inode).isMarkedAsDeleted());
Assert.assertTrue(((Snapshot.Root) inode).isMarkedAsDeleted());
}

static void assertNotMarkedAsDeleted(Path snapshotRoot,
Expand All @@ -139,12 +146,16 @@ static void assertNotMarkedAsDeleted(Path snapshotRoot,
void assertXAttrSet(String snapshot,
DistributedFileSystem hdfs, XAttr newXattr)
throws IOException {
hdfs.deleteSnapshot(snapshottableDir, snapshot);
String snapName = getDeletedSnapshotName(hdfs, snapshottableDir, snapshot);
hdfs.deleteSnapshot(snapshottableDir, snapName);
// Check xAttr for parent directory
Path snapshotRoot = SnapshotTestHelper.getSnapshotRoot(snapshottableDir,
snapshot);
assertMarkedAsDeleted(snapshotRoot, cluster);

Path snapshotRoot =
SnapshotTestHelper.getSnapshotRoot(snapshottableDir, snapshot);
assertMarkedAsDeleted(snapshotRoot, snapshottableDir, cluster);
// Check xAttr for parent directory
snapName = getDeletedSnapshotName(hdfs, snapshottableDir, snapshot);
snapshotRoot =
SnapshotTestHelper.getSnapshotRoot(snapshottableDir, snapName);
// Make sure its not user visible
if (cluster.getNameNode().getConf().getBoolean(DFSConfigKeys.
DFS_NAMENODE_XATTRS_ENABLED_KEY,
Expand Down Expand Up @@ -261,4 +272,11 @@ public void testSnapshotXAttrWithPreExistingXattrs() throws Exception {
hdfs.createSnapshot(snapshottableDir, "s1");
assertXAttrSet("s1", hdfs, newXAttr);
}

public static String getDeletedSnapshotName(DistributedFileSystem hdfs,
Path snapshottableDir, String snapshot) throws IOException {
return Arrays.stream(hdfs.getSnapshotListing(snapshottableDir))
.filter(p -> p.getFullPath().getName().startsWith(snapshot)).findFirst()
.get().getFullPath().getName();
}
}
Expand Up @@ -50,6 +50,7 @@
import static org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED_GC_PERIOD_MS;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.TestOrderedSnapshotDeletion.assertMarkedAsDeleted;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.TestOrderedSnapshotDeletion.assertNotMarkedAsDeleted;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.TestOrderedSnapshotDeletion.getDeletedSnapshotName;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;

Expand Down Expand Up @@ -112,26 +113,35 @@ public void testSingleDir() throws Exception {
hdfs.deleteSnapshot(snapshottableDir, "s2");
assertNotMarkedAsDeleted(s0path, cluster);
assertNotMarkedAsDeleted(s1path, cluster);
assertMarkedAsDeleted(s2path, cluster);
assertMarkedAsDeleted(s2path, snapshottableDir, cluster);
final Path s2pathNew = new Path(s2path.getParent(),
getDeletedSnapshotName(hdfs, snapshottableDir, s2path.getName()));
Assert.assertFalse(exist(s2path, hdfs));
Assert.assertTrue(exist(s2pathNew, hdfs));
Assert.assertFalse(s2path.equals(s2pathNew));

hdfs.deleteSnapshot(snapshottableDir, "s1");
assertNotMarkedAsDeleted(s0path, cluster);
assertMarkedAsDeleted(s1path, cluster);
assertMarkedAsDeleted(s2path, cluster);

assertMarkedAsDeleted(s1path, snapshottableDir, cluster);
assertMarkedAsDeleted(s2path, snapshottableDir, cluster);
final Path s1pathNew = new Path(s1path.getParent(),
getDeletedSnapshotName(hdfs, snapshottableDir, s1path.getName()));
Assert.assertFalse(exist(s1path, hdfs));
Assert.assertTrue(exist(s1pathNew, hdfs));
Assert.assertFalse(s1path.equals(s1pathNew));
// should not be gc'ed
Thread.sleep(10*GC_PERIOD);
assertNotMarkedAsDeleted(s0path, cluster);
assertMarkedAsDeleted(s1path, cluster);
assertMarkedAsDeleted(s2path, cluster);
assertMarkedAsDeleted(s1path, snapshottableDir, cluster);
assertMarkedAsDeleted(s2path, snapshottableDir, cluster);

hdfs.deleteSnapshot(snapshottableDir, "s0");
Assert.assertFalse(exist(s0path, hdfs));

waitForGc(Arrays.asList(s1path, s2path), hdfs);
waitForGc(Arrays.asList(s1pathNew, s2pathNew), hdfs);
// total no of edit log records created for delete snapshot will be equal
// to sum of no of user deleted snapshots and no of snapshots gc'ed with
// snapshotDeletion gc thread
// to sum of no of user deleted distinct snapshots
// and no of snapshots gc'ed with snapshotDeletion gc thread
doEditLogValidation(cluster, 5);
}

Expand Down