diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index fb616128d994..972f84e4a61f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -70,6 +70,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.yetus.audience.InterfaceStability; /** * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and @@ -2388,7 +2389,25 @@ default void cloneSnapshot(byte[] snapshotName, TableName tableName) */ default void cloneSnapshot(String snapshotName, TableName tableName) throws IOException, TableExistsException, RestoreSnapshotException { - cloneSnapshot(snapshotName, tableName, false); + cloneSnapshot(snapshotName, tableName, false, null); + } + + /** + * Create a new table by cloning the snapshot content. + * @param snapshotName name of the snapshot to be cloned + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to clone acl into newly created table + * @param customSFT specify the StoreFileTracker used for the table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists + * @throws RestoreSnapshotException if snapshot failed to be cloned + * @throws IllegalArgumentException if the specified table has not a valid name + */ + default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl, + String customSFT) + throws IOException, TableExistsException, RestoreSnapshotException { + get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT), getSyncWaitTimeout(), + TimeUnit.MILLISECONDS); } /** @@ -2435,8 +2454,25 @@ default Future cloneSnapshotAsync(String snapshotName, TableName tableName * @throws RestoreSnapshotException if snapshot failed to be cloned * @throws IllegalArgumentException if the specified table has not a valid name */ - Future cloneSnapshotAsync(String snapshotName, TableName tableName, boolean restoreAcl) - throws IOException, TableExistsException, RestoreSnapshotException; + default Future cloneSnapshotAsync(String snapshotName, TableName tableName, + boolean restoreAcl) + throws IOException, TableExistsException, RestoreSnapshotException { + return cloneSnapshotAsync(snapshotName, tableName, restoreAcl, null); + } + + /** + * Create a new table by cloning the snapshot content. + * @param snapshotName name of the snapshot to be cloned + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to clone acl into newly created table + * @param customSFT specify the StroreFileTracker used for the table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists + * @throws RestoreSnapshotException if snapshot failed to be cloned + * @throws IllegalArgumentException if the specified table has not a valid name + */ + Future cloneSnapshotAsync(String snapshotName, TableName tableName, boolean restoreAcl, + String customSFT) throws IOException, TableExistsException, RestoreSnapshotException; /** * Execute a distributed procedure on a cluster. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 6b8fda79f07b..43425de6a4ba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -872,8 +872,20 @@ default CompletableFuture cloneSnapshot(String snapshotName, TableName tab * @param tableName name of the table where the snapshot will be restored * @param restoreAcl true to restore acl of snapshot */ + default CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, + boolean restoreAcl) { + return cloneSnapshot(snapshotName, tableName, restoreAcl, null); + } + + /** + * Create a new table by cloning the snapshot content. + * @param snapshotName name of the snapshot to be cloned + * @param tableName name of the table where the snapshot will be restored + * @param restoreAcl true to restore acl of snapshot + * @param customSFT specify the StroreFileTracker used for the table + */ CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl); + boolean restoreAcl, String customSFT); /** * List completed snapshots. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index c7b9897f501f..d1b817d9f5c4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -483,14 +483,14 @@ public CompletableFuture restoreSnapshot(String snapshotName) { @Override public CompletableFuture restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, - boolean restoreAcl) { + boolean restoreAcl) { return wrap(rawAdmin.restoreSnapshot(snapshotName, takeFailSafeSnapshot, restoreAcl)); } @Override public CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl) { - return wrap(rawAdmin.cloneSnapshot(snapshotName, tableName, restoreAcl)); + boolean restoreAcl, String customSFT) { + return wrap(rawAdmin.cloneSnapshot(snapshotName, tableName, restoreAcl, customSFT)); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java index 2d9e91c4284e..72e953c5cd24 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java @@ -183,6 +183,11 @@ public interface ColumnFamilyDescriptor { * @return A clone value. Null if no mapping for the key */ Bytes getValue(Bytes key); + /** + * @param key The key. + * @return A clone value. Null if no mapping for the key + */ + String getValue(String key); /** * @param key The key. * @return A clone value. Null if no mapping for the key diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index e9e32908e0fe..8fa03bea07df 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -652,6 +652,12 @@ public byte[] getValue(byte[] key) { return value == null ? null : value.get(); } + @Override + public String getValue(String key) { + Bytes rval = values.get(new Bytes(Bytes.toBytes(key))); + return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength()); + } + @Override public Map getValues() { return Collections.unmodifiableMap(values); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index b7ed2d03ad97..ce64d885e41d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -92,6 +92,27 @@ import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil; import org.apache.hadoop.hbase.security.access.UserPermission; +import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; +import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; +import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; @@ -2641,7 +2662,7 @@ public void restoreSnapshot(final String snapshotName, final boolean takeFailSaf try { // Restore snapshot get( - internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl), + internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl, null), syncWaitTimeout, TimeUnit.MILLISECONDS); } catch (IOException e) { @@ -2650,7 +2671,7 @@ public void restoreSnapshot(final String snapshotName, final boolean takeFailSaf if (takeFailSafeSnapshot) { try { get( - internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, restoreAcl), + internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, restoreAcl, null), syncWaitTimeout, TimeUnit.MILLISECONDS); String msg = "Restore snapshot=" + snapshotName + @@ -2693,16 +2714,17 @@ public Future restoreSnapshotAsync(final String snapshotName) throw new TableNotDisabledException(tableName); } - return internalRestoreSnapshotAsync(snapshotName, tableName, false); + return internalRestoreSnapshotAsync(snapshotName, tableName, false, null); } @Override public Future cloneSnapshotAsync(String snapshotName, TableName tableName, - boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException { + boolean restoreAcl, String customSFT) + throws IOException, TableExistsException, RestoreSnapshotException { if (tableExists(tableName)) { throw new TableExistsException(tableName); } - return internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl); + return internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT); } @Override @@ -2791,7 +2813,7 @@ protected IsProcedureDoneResponse rpcCall() throws Exception { * @throws IllegalArgumentException if the restore request is formatted incorrectly */ private Future internalRestoreSnapshotAsync(final String snapshotName, - final TableName tableName, final boolean restoreAcl) + final TableName tableName, final boolean restoreAcl, String customSFT) throws IOException, RestoreSnapshotException { final SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder() @@ -2806,13 +2828,15 @@ private Future internalRestoreSnapshotAsync(final String snapshotName, Long nonce = ng.newNonce(); @Override protected RestoreSnapshotResponse rpcCall() throws Exception { - final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder() + final RestoreSnapshotRequest.Builder builder = RestoreSnapshotRequest.newBuilder() .setSnapshot(snapshot) .setNonceGroup(nonceGroup) .setNonce(nonce) - .setRestoreACL(restoreAcl) - .build(); - return master.restoreSnapshot(getRpcController(), request); + .setRestoreACL(restoreAcl); + if (customSFT != null) { + builder.setCustomSFT(customSFT); + } + return master.restoreSnapshot(getRpcController(), builder.build()); } }); @@ -4481,5 +4505,4 @@ protected Boolean rpcCall(int callTimeout) throws Exception { } }); } - } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 57d4bac5a3bc..971061013c12 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -1937,7 +1937,7 @@ public CompletableFuture restoreSnapshot(String snapshotName, boolean take } else if (!exists) { // if table does not exist, then just clone snapshot into new table. completeConditionalOnFuture(future, - internalRestoreSnapshot(snapshotName, finalTableName, restoreAcl)); + internalRestoreSnapshot(snapshotName, finalTableName, restoreAcl, null)); } else { addListener(isTableDisabled(finalTableName), (disabled, err4) -> { if (err4 != null) { @@ -1973,12 +1973,13 @@ private CompletableFuture restoreSnapshot(String snapshotName, TableName t future.completeExceptionally(err); } else { // Step.2 Restore snapshot - addListener(internalRestoreSnapshot(snapshotName, tableName, restoreAcl), + addListener(internalRestoreSnapshot(snapshotName, tableName, restoreAcl, null), (void2, err2) -> { if (err2 != null) { // Step.3.a Something went wrong during the restore and try to rollback. addListener( - internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl), + internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl, + null), (void3, err3) -> { if (err3 != null) { future.completeExceptionally(err3); @@ -2008,7 +2009,7 @@ private CompletableFuture restoreSnapshot(String snapshotName, TableName t }); return future; } else { - return internalRestoreSnapshot(snapshotName, tableName, restoreAcl); + return internalRestoreSnapshot(snapshotName, tableName, restoreAcl, null); } } @@ -2025,7 +2026,7 @@ private void completeConditionalOnFuture(CompletableFuture dependentFutur @Override public CompletableFuture cloneSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl) { + boolean restoreAcl, String customSFT) { CompletableFuture future = new CompletableFuture<>(); addListener(tableExists(tableName), (exists, err) -> { if (err != null) { @@ -2034,14 +2035,14 @@ public CompletableFuture cloneSnapshot(String snapshotName, TableName tabl future.completeExceptionally(new TableExistsException(tableName)); } else { completeConditionalOnFuture(future, - internalRestoreSnapshot(snapshotName, tableName, restoreAcl)); + internalRestoreSnapshot(snapshotName, tableName, restoreAcl, customSFT)); } }); return future; } private CompletableFuture internalRestoreSnapshot(String snapshotName, TableName tableName, - boolean restoreAcl) { + boolean restoreAcl, String customSFT) { SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder() .setName(snapshotName).setTable(tableName.getNameAsString()).build(); try { @@ -2049,10 +2050,15 @@ private CompletableFuture internalRestoreSnapshot(String snapshotName, Tab } catch (IllegalArgumentException e) { return failedFuture(e); } + RestoreSnapshotRequest.Builder builder = + RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup()) + .setNonce(ng.newNonce()).setRestoreACL(restoreAcl); + if(customSFT != null){ + builder.setCustomSFT(customSFT); + } return waitProcedureResult(this. newMasterCaller().action((controller, stub) -> this . call(controller, stub, - RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup()) - .setNonce(ng.newNonce()).setRestoreACL(restoreAcl).build(), + builder.build(), (s, c, req, done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId())) .call()); } diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto index 5e2e6cecef9b..17f6028528c2 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto @@ -458,6 +458,7 @@ message RestoreSnapshotRequest { optional uint64 nonce_group = 2 [default = 0]; optional uint64 nonce = 3 [default = 0]; optional bool restoreACL = 4 [default = false]; + optional string customSFT = 5; } message RestoreSnapshotResponse { diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto index ea1535c4bec3..04801bf988cb 100644 --- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto @@ -208,6 +208,7 @@ message CloneSnapshotStateData { repeated RegionInfo region_info = 4; repeated RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; optional bool restore_acl = 6; + optional string customSFT = 7; } enum RestoreSnapshotState { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 7af4e7da3738..5b999b8f8a8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -2497,8 +2497,8 @@ public TableDescriptor get() throws IOException { } - public long restoreSnapshot(final SnapshotDescription snapshotDesc, - final long nonceGroup, final long nonce, final boolean restoreAcl) throws IOException { + public long restoreSnapshot(final SnapshotDescription snapshotDesc, final long nonceGroup, + final long nonce, final boolean restoreAcl, final String customSFT) throws IOException { checkInitialized(); getSnapshotManager().checkSnapshotSupport(); @@ -2507,18 +2507,19 @@ public long restoreSnapshot(final SnapshotDescription snapshotDesc, getClusterSchema().getNamespace(dstTable.getNamespaceAsString()); return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { + new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { setProcId( - getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), restoreAcl)); - } + getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), restoreAcl, + customSFT)); + } - @Override - protected String getDescription() { - return "RestoreSnapshotProcedure"; - } - }); + @Override + protected String getDescription() { + return "RestoreSnapshotProcedure"; + } + }); } private void checkTableExists(final TableName tableName) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 1a4d387c9e98..c813a1e661c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1531,7 +1531,7 @@ public RestoreSnapshotResponse restoreSnapshot(RpcController controller, RestoreSnapshotRequest request) throws ServiceException { try { long procId = master.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(), - request.getNonce(), request.getRestoreACL()); + request.getNonce(), request.getRestoreACL(), request.getCustomSFT()); return RestoreSnapshotResponse.newBuilder().setProcId(procId).build(); } catch (ForeignException e) { throw new ServiceException(e.getCause()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java index 7157fbf04d40..f6185d156037 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java @@ -30,6 +30,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; @@ -44,6 +46,8 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; @@ -72,6 +76,7 @@ public class CloneSnapshotProcedure private TableDescriptor tableDescriptor; private SnapshotDescription snapshot; private boolean restoreAcl; + private String customSFT; private List newRegions = null; private Map > parentsToChildrenPairMap = new HashMap<>(); @@ -95,13 +100,20 @@ public CloneSnapshotProcedure(final MasterProcedureEnv env, * @param tableDescriptor the table to operate on * @param snapshot snapshot to clone from */ + public CloneSnapshotProcedure(final MasterProcedureEnv env, + final TableDescriptor tableDescriptor, final SnapshotDescription snapshot, + final boolean restoreAcl) { + this(env, tableDescriptor, snapshot, restoreAcl, null); + } + public CloneSnapshotProcedure(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, final SnapshotDescription snapshot, - final boolean restoreAcl) { + final boolean restoreAcl, final String customSFT) { super(env); this.tableDescriptor = tableDescriptor; this.snapshot = snapshot; this.restoreAcl = restoreAcl; + this.customSFT = customSFT; getMonitorStatus(); } @@ -139,6 +151,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CloneSnapsho setNextState(CloneSnapshotState.CLONE_SNAPSHOT_WRITE_FS_LAYOUT); break; case CLONE_SNAPSHOT_WRITE_FS_LAYOUT: + updateTableDescriptorWithSFT(); newRegions = createFilesystemLayout(env, tableDescriptor, newRegions); env.getMasterServices().getTableDescriptors().update(tableDescriptor, true); setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META); @@ -203,6 +216,37 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CloneSnapsho return Flow.HAS_MORE_STATE; } + /** + * If a StoreFileTracker is specified we strip the TableDescriptor from previous SFT config + * and set the specified SFT on the table level + */ + private void updateTableDescriptorWithSFT() { + if (StringUtils.isEmpty(customSFT)){ + return; + } + + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor); + builder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, customSFT); + for (ColumnFamilyDescriptor family : tableDescriptor.getColumnFamilies()){ + ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family); + cfBuilder.setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, null); + cfBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, null); + builder.modifyColumnFamily(cfBuilder.build()); + } + tableDescriptor = builder.build(); + } + + private void validateSFT() { + if (StringUtils.isEmpty(customSFT)){ + return; + } + + //if customSFT is invalid getTrackerClass will throw a RuntimeException + Configuration sftConfig = new Configuration(); + sftConfig.set(StoreFileTrackerFactory.TRACKER_IMPL, customSFT); + StoreFileTrackerFactory.getTrackerClass(sftConfig); + } + @Override protected void rollbackState(final MasterProcedureEnv env, final CloneSnapshotState state) throws IOException { @@ -292,6 +336,9 @@ protected void serializeStateData(ProcedureStateSerializer serializer) cloneSnapshotMsg.addParentToChildRegionsPairList(parentToChildrenPair); } } + if (!StringUtils.isEmpty(customSFT)){ + cloneSnapshotMsg.setCustomSFT(customSFT); + } serializer.serialize(cloneSnapshotMsg.build()); } @@ -327,6 +374,9 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) parentToChildrenPair.getChild2RegionName())); } } + if (!StringUtils.isEmpty(cloneSnapshotMsg.getCustomSFT())){ + customSFT = cloneSnapshotMsg.getCustomSFT(); + } // Make sure that the monitor status is set up getMonitorStatus(); } @@ -340,6 +390,8 @@ private void prepareClone(final MasterProcedureEnv env) throws IOException { if (env.getMasterServices().getTableDescriptors().exists(tableName)) { throw new TableExistsException(tableName); } + + validateSFT(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java index e9440621e44c..6b28173d4e4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java @@ -25,6 +25,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -89,6 +90,7 @@ public RestoreSnapshotProcedure(final MasterProcedureEnv env, throws HBaseIOException { this(env, tableDescriptor, snapshot, false); } + /** * Constructor * @param env MasterProcedureEnv @@ -386,14 +388,15 @@ private void restoreSnapshot(final MasterProcedureEnv env) throws IOException { FileSystem fs = fileSystemManager.getFileSystem(); Path rootDir = fileSystemManager.getRootDir(); final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher(); + final Configuration conf = new Configuration(env.getMasterConfiguration()); LOG.info("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)); try { Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); SnapshotManifest manifest = SnapshotManifest.open( - env.getMasterServices().getConfiguration(), fs, snapshotDir, snapshot); + conf, fs, snapshotDir, snapshot); RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper( - env.getMasterServices().getConfiguration(), + conf, fs, manifest, modifiedTableDescriptor, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 3e8741f2281b..b35a3df5e959 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessChecker; @@ -751,8 +752,8 @@ private boolean isSnapshotCompleted(SnapshotDescription snapshot) throws IOExcep * @throws IOException */ private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName, - final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc, - final NonceKey nonceKey, final boolean restoreAcl) throws IOException { + final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc, + final NonceKey nonceKey, final boolean restoreAcl, final String customSFT) throws IOException { MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost(); TableDescriptor htd = TableDescriptorBuilder.copy(tableName, snapshotTableDesc); org.apache.hadoop.hbase.client.SnapshotDescription snapshotPOJO = null; @@ -762,7 +763,7 @@ private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableNam } long procId; try { - procId = cloneSnapshot(snapshot, htd, nonceKey, restoreAcl); + procId = cloneSnapshot(snapshot, htd, nonceKey, restoreAcl, customSFT); } catch (IOException e) { LOG.error("Exception occurred while cloning the snapshot " + snapshot.getName() + " as table " + tableName.getNameAsString(), e); @@ -786,7 +787,8 @@ private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableNam * @return procId the ID of the clone snapshot procedure */ synchronized long cloneSnapshot(final SnapshotDescription snapshot, - final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl) + final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl, + final String customSFT) throws HBaseSnapshotException { TableName tableName = tableDescriptor.getTableName(); @@ -803,7 +805,7 @@ synchronized long cloneSnapshot(final SnapshotDescription snapshot, try { long procId = master.getMasterProcedureExecutor().submitProcedure( new CloneSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(), - tableDescriptor, snapshot, restoreAcl), + tableDescriptor, snapshot, restoreAcl, customSFT), nonceKey); this.restoreTableToProcIdMap.put(tableName, procId); return procId; @@ -822,7 +824,7 @@ synchronized long cloneSnapshot(final SnapshotDescription snapshot, * @throws IOException */ public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey nonceKey, - final boolean restoreAcl) throws IOException { + final boolean restoreAcl, String customSFT) throws IOException { FileSystem fs = master.getMasterFileSystem().getFileSystem(); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir); @@ -854,11 +856,12 @@ public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final // Execute the restore/clone operation long procId; if (master.getTableDescriptors().exists(tableName)) { - procId = restoreSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, - restoreAcl); + procId = + restoreSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl); } else { procId = - cloneSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl); + cloneSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl, + customSFT); } return procId; } @@ -880,6 +883,10 @@ private long restoreSnapshot(final SnapshotDescription reqSnapshot, final TableN final NonceKey nonceKey, final boolean restoreAcl) throws IOException { MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost(); + //have to check first if restoring the snapshot would break current SFT setup + StoreFileTrackerFactory.validatePreRestoreSnapshot(master.getTableDescriptors().get(tableName), + snapshotTableDesc, master.getConfiguration()); + if (master.getTableStateManager().isTableState( TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) { throw new UnsupportedOperationException("Table '" + @@ -921,7 +928,7 @@ private long restoreSnapshot(final SnapshotDescription reqSnapshot, final TableN * @return procId the ID of the restore snapshot procedure */ private synchronized long restoreSnapshot(final SnapshotDescription snapshot, - final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl) + final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl) throws HBaseSnapshotException { final TableName tableName = tableDescriptor.getTableName(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java index a08ea724949e..24b9e13a1a20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreUtils; +import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -93,7 +94,7 @@ static String getStoreFileTrackerName(Class clazz) { return name != null ? name.name() : clazz.getName(); } - private static Class getTrackerClass(Configuration conf) { + public static Class getTrackerClass(Configuration conf) { try { Trackers tracker = Trackers.valueOf(getStoreFileTrackerName(conf).toUpperCase()); return tracker.clazz; @@ -312,4 +313,40 @@ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTa } } } + + /** + * Makes sure restoring a snapshot does not break the current SFT setup + * follows StoreUtils.createStoreConfiguration + * @param currentTableDesc Existing Table's TableDescriptor + * @param snapshotTableDesc Snapshot's TableDescriptor + * @param baseConf Current global configuration + * @throws RestoreSnapshotException if restore would break the current SFT setup + */ + public static void validatePreRestoreSnapshot(TableDescriptor currentTableDesc, + TableDescriptor snapshotTableDesc, Configuration baseConf) throws RestoreSnapshotException { + + for (ColumnFamilyDescriptor cfDesc : currentTableDesc.getColumnFamilies()) { + ColumnFamilyDescriptor snapCFDesc = snapshotTableDesc.getColumnFamily(cfDesc.getName()); + // if there is no counterpart in the snapshot it will be just deleted so the config does + // not matter + if (snapCFDesc != null) { + Configuration currentCompositeConf = + StoreUtils.createStoreConfiguration(baseConf, currentTableDesc, cfDesc); + Configuration snapCompositeConf = + StoreUtils.createStoreConfiguration(baseConf, snapshotTableDesc, snapCFDesc); + Class currentSFT = + StoreFileTrackerFactory.getTrackerClass(currentCompositeConf); + Class snapSFT = + StoreFileTrackerFactory.getTrackerClass(snapCompositeConf); + + //restoration is not possible if there is an SFT mismatch + if (currentSFT != snapSFT) { + throw new RestoreSnapshotException( + "Restoring Snapshot is not possible because " + " the config for column family " + + cfDesc.getNameAsString() + " has incompatible configuration. Current SFT: " + + currentSFT + " SFT from snapshot: " + snapSFT); + } + } + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 29d513a99876..a5e58ee2bb0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.security.access.AccessControlClient; @@ -72,9 +73,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; - import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; @@ -200,8 +199,8 @@ private RestoreMetaChanges restoreHdfsRegions(final ThreadPoolExecutor exec) thr List tableRegions = getTableRegions(); - RegionInfo mobRegion = MobUtils.getMobRegionInfo(snapshotManifest.getTableDescriptor() - .getTableName()); + RegionInfo mobRegion = + MobUtils.getMobRegionInfo(snapshotManifest.getTableDescriptor().getTableName()); if (tableRegions != null) { // restore the mob region in case if (regionNames.contains(mobRegion.getEncodedName())) { @@ -707,7 +706,9 @@ private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo); - StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, + Configuration sftConf = StoreUtils.createStoreConfiguration(conf, tableDesc, + tableDesc.getColumnFamily(familyFiles.getFamilyName().toByteArray())); + StoreFileTracker tracker = StoreFileTrackerFactory.create(sftConf, true, StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir). withRegionFileSystem(regionFS).build()); tracker.set(clonedFiles); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCustomSFT.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCustomSFT.java new file mode 100644 index 000000000000..9497967ab3ce --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCustomSFT.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import java.io.IOException; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ LargeTests.class, ClientTests.class }) +public class TestCloneSnapshotFromClientCustomSFT extends CloneSnapshotFromClientTestBase{ + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCloneSnapshotFromClientCustomSFT.class); + + public static final String CLONE_SFT = "FILE"; + + @Test + public void testCloneSnapshotWithCustomSFT() throws IOException, InterruptedException { + TableName clonedTableName = + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + + admin.cloneSnapshot(Bytes.toString(snapshotName1), clonedTableName, false, CLONE_SFT); + verifyRowCount(TEST_UTIL, clonedTableName, snapshot1Rows); + + TableDescriptor td = admin.getDescriptor(clonedTableName); + assertEquals(CLONE_SFT, td.getValue(StoreFileTrackerFactory.TRACKER_IMPL)); + + TEST_UTIL.deleteTable(clonedTableName); + } + + @Test + public void testCloneSnapshotWithIncorrectCustomSFT() throws IOException, InterruptedException { + TableName clonedTableName = + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + + IOException ioException = assertThrows(IOException.class, () -> { + admin.cloneSnapshot(Bytes.toString(snapshotName1), clonedTableName, false, "IncorrectSFT"); + }); + + assertEquals( + "java.lang.RuntimeException: java.lang.RuntimeException: " + + "java.lang.ClassNotFoundException: Class IncorrectSFT not found", + ioException.getMessage()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTracker.java index 98189729ac75..fc54eb057537 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTracker.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -28,7 +27,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerFactory.java index 41f2afdfa421..91038e9fe176 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerFactory.java @@ -22,9 +22,16 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.StoreContext; +import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -55,4 +62,49 @@ public void testCreateForMigration() { assertThrows(IllegalArgumentException.class, () -> StoreFileTrackerFactory .createForMigration(conf, configName, false, StoreContext.getBuilder().build())); } + + @Test + public void testCheckSFTCompatibility() throws Exception { + //checking default value change on different configuration levels + Configuration conf = new Configuration(); + conf.set(StoreFileTrackerFactory.TRACKER_IMPL, "DEFAULT"); + + //creating a TD with only TableDescriptor level config + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf("TableX")); + builder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE"); + ColumnFamilyDescriptor cf = ColumnFamilyDescriptorBuilder.of("cf"); + builder.setColumnFamily(cf); + TableDescriptor td = builder.build(); + + //creating a TD with matching ColumnFamilyDescriptor level setting + TableDescriptorBuilder snapBuilder = + TableDescriptorBuilder.newBuilder(TableName.valueOf("TableY")); + snapBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE"); + ColumnFamilyDescriptorBuilder snapCFBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); + snapCFBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE"); + snapBuilder.setColumnFamily(snapCFBuilder.build()); + TableDescriptor snapTd = snapBuilder.build(); + + // adding a cf config that matches the td config is fine even when it does not match the default + StoreFileTrackerFactory.validatePreRestoreSnapshot(td, snapTd, conf); + // removing cf level config is fine when it matches the td config + StoreFileTrackerFactory.validatePreRestoreSnapshot(snapTd, td, conf); + + TableDescriptorBuilder defaultBuilder = + TableDescriptorBuilder.newBuilder(TableName.valueOf("TableY")); + defaultBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE"); + ColumnFamilyDescriptorBuilder defaultCFBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); + defaultCFBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "DEFAULT"); + defaultBuilder.setColumnFamily(defaultCFBuilder.build()); + TableDescriptor defaultTd = defaultBuilder.build(); + + assertThrows(RestoreSnapshotException.class, () -> { + StoreFileTrackerFactory.validatePreRestoreSnapshot(td, defaultTd, conf); + }); + assertThrows(RestoreSnapshotException.class, () -> { + StoreFileTrackerFactory.validatePreRestoreSnapshot(snapTd, defaultTd, conf); + }); + } } diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 80b75393a011..1f166494769d 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1184,8 +1184,8 @@ def restore_snapshot(snapshot_name, restore_acl = false) #---------------------------------------------------------------------------------------------- # Create a new table by cloning the snapshot content - def clone_snapshot(snapshot_name, table, restore_acl = false) - @admin.cloneSnapshot(snapshot_name, TableName.valueOf(table), restore_acl) + def clone_snapshot(snapshot_name, table, restore_acl = false, clone_sft = nil) + @admin.cloneSnapshot(snapshot_name, TableName.valueOf(table), restore_acl, clone_sft) end #---------------------------------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb index 3c637b88029d..76631c91cf9f 100644 --- a/hbase-shell/src/main/ruby/hbase_constants.rb +++ b/hbase-shell/src/main/ruby/hbase_constants.rb @@ -40,6 +40,7 @@ module HBaseConstants CACHE = 'CACHE'.freeze CACHE_BLOCKS = 'CACHE_BLOCKS'.freeze CLASSNAME = 'CLASSNAME'.freeze + CLONE_SFT = 'CLONE_SFT'.freeze CLUSTER_KEY = 'CLUSTER_KEY'.freeze COLUMN = 'COLUMN'.freeze COLUMNS = 'COLUMNS'.freeze diff --git a/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb index abc975919d92..3edd16d326bf 100644 --- a/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb +++ b/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb @@ -33,13 +33,17 @@ def help newly created table. hbase> clone_snapshot 'snapshotName', 'namespace:tableName', {RESTORE_ACL=>true} + +StoreFileTracker implementation used after restore can be set by the following command. + hbase> clone_snapshot 'snapshotName', 'namespace:tableName', {CLONE_SFT=>'FILE'} EOF end def command(snapshot_name, table, args = {}) raise(ArgumentError, 'Arguments should be a Hash') unless args.is_a?(Hash) restore_acl = args.delete(::HBaseConstants::RESTORE_ACL) || false - admin.clone_snapshot(snapshot_name, table, restore_acl) + clone_sft = args.delete(::HBaseConstants::CLONE_SFT) || nil + admin.clone_snapshot(snapshot_name, table, restore_acl, clone_sft) end def handle_exceptions(cause, *args) diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index fd551b720ffe..8f4a826789fc 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1058,8 +1058,8 @@ public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, } @Override - public Future cloneSnapshotAsync(String snapshotName, TableName tableName, boolean cloneAcl) - throws IOException, TableExistsException, RestoreSnapshotException { + public Future cloneSnapshotAsync(String snapshotName, TableName tableName, boolean cloneAcl, + String customSFT) throws IOException, TableExistsException, RestoreSnapshotException { throw new NotImplementedException("cloneSnapshotAsync not supported in ThriftAdmin"); }