From 91be2cf943b85238ab7afbc33d6caec0c18b04ff Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Tue, 19 Dec 2023 14:54:19 -0500 Subject: [PATCH 1/5] HBASE-28216 HDFS erasure coding support for table data dirs (#5579) Signed-off-by: Nihal Jain Signed-off-by: Wei-Chiu Chuang --- .../apache/hadoop/hbase/HTableDescriptor.java | 23 ++ .../hadoop/hbase/client/TableDescriptor.java | 9 + .../hbase/client/TableDescriptorBuilder.java | 36 +++ .../client/TestTableDescriptorBuilder.java | 4 +- .../src/main/protobuf/MasterProcedure.proto | 5 + .../hadoop/hbase/fs/ErasureCodingUtils.java | 185 +++++++++++ .../procedure/CloneSnapshotProcedure.java | 11 + .../procedure/CreateTableProcedure.java | 11 + .../procedure/ModifyTableProcedure.java | 29 +- .../procedure/RestoreSnapshotProcedure.java | 37 ++- .../master/snapshot/SnapshotManager.java | 3 +- .../CompactedHFilesDischarger.java | 5 +- .../hbase/util/TableDescriptorChecker.java | 79 +++-- .../TestManageTableErasureCodingPolicy.java | 302 ++++++++++++++++++ hbase-shell/src/main/ruby/hbase/admin.rb | 2 + .../hbase/client/AbstractTestShell.java | 8 +- hbase-shell/src/test/ruby/hbase/admin_test.rb | 26 +- 17 files changed, 716 insertions(+), 59 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 808cb5a40606..186108ea40ed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -86,6 +86,8 @@ public class HTableDescriptor implements TableDescriptor, Comparable getCoprocessors() { */ boolean isReadOnly(); + /** + * The HDFS erasure coding policy for a table. This will be set on the data dir of the table, and + * is an alternative to normal replication which takes less space at the cost of locality. + * @return the current policy, or null if undefined + */ + default String getErasureCodingPolicy() { + return null; + } + /** * Returns Name of this table and then a map of all of the column family descriptors (with only * the non-default column family attributes) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index 20511e446f0b..c01f1027b093 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -148,6 +148,14 @@ public class TableDescriptorBuilder { private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); + /** + * If non-null, the HDFS erasure coding policy to set on the data dir of the table + */ + public static final String ERASURE_CODING_POLICY = "ERASURE_CODING_POLICY"; + private static final Bytes ERASURE_CODING_POLICY_KEY = + new Bytes(Bytes.toBytes(ERASURE_CODING_POLICY)); + + private static final String DEFAULT_ERASURE_CODING_POLICY = null; /** * Used by shell/rest interface to access this metadata attribute which denotes if the table * should be treated by region normalizer. @@ -234,6 +242,7 @@ public class TableDescriptorBuilder { DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); // use the enum name DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); + DEFAULT_VALUES.put(ERASURE_CODING_POLICY, String.valueOf(DEFAULT_ERASURE_CODING_POLICY)); DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s))) .forEach(RESERVED_KEYWORDS::add); RESERVED_KEYWORDS.add(IS_META_KEY); @@ -532,6 +541,11 @@ public TableDescriptorBuilder setReadOnly(final boolean readOnly) { return this; } + public TableDescriptorBuilder setErasureCodingPolicy(String policy) { + desc.setErasureCodingPolicy(policy); + return this; + } + public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) { desc.setRegionMemStoreReplication(memstoreReplication); return this; @@ -802,6 +816,28 @@ public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { return setValue(READONLY_KEY, Boolean.toString(readOnly)); } + /** + * The HDFS erasure coding policy for a table. This will be set on the data dir of the table, + * and is an alternative to normal replication which takes less space at the cost of locality. + * @return the current policy, or null if undefined + */ + @Override + public String getErasureCodingPolicy() { + return getValue(ERASURE_CODING_POLICY); + } + + /** + * Sets the HDFS erasure coding policy for the table. This will be propagated to HDFS for the + * data dir of the table. Erasure coding is an alternative to normal replication which takes + * less space at the cost of locality. The policy must be available and enabled on the hdfs + * cluster before being set. + * @param policy the policy to set, or null to disable erasure coding + * @return the modifyable TD + */ + public ModifyableTableDescriptor setErasureCodingPolicy(String policy) { + return setValue(ERASURE_CODING_POLICY_KEY, policy); + } + /** * Check if the compaction enable flag of the table is true. If flag is false then no * minor/major compactions will be done in real. diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index 053b15d477ca..8a653ddd8ab7 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -365,10 +365,10 @@ public void testStringCustomizedValues() throws HBaseException { htd.toStringCustomizedValues()); htd = TableDescriptorBuilder.newBuilder(htd).setMaxFileSize("10737942528") - .setMemStoreFlushSize("256MB").build(); + .setMemStoreFlushSize("256MB").setErasureCodingPolicy("RS-6-3-1024k").build(); assertEquals( "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " - + "MAX_FILESIZE => '10737942528 B (10GB 512KB)', " + + "ERASURE_CODING_POLICY => 'RS-6-3-1024k', MAX_FILESIZE => '10737942528 B (10GB 512KB)', " + "MEMSTORE_FLUSHSIZE => '268435456 B (256MB)'}}, " + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", htd.toStringCustomizedValues()); diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto index 856cf668f183..03f8bde723b9 100644 --- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto @@ -56,6 +56,7 @@ enum CreateTableState { CREATE_TABLE_ASSIGN_REGIONS = 4; CREATE_TABLE_UPDATE_DESC_CACHE = 5; CREATE_TABLE_POST_OPERATION = 6; + CREATE_TABLE_SET_ERASURE_CODING_POLICY = 7; } message CreateTableStateData { @@ -74,6 +75,7 @@ enum ModifyTableState { MODIFY_TABLE_REOPEN_ALL_REGIONS = 7; MODIFY_TABLE_CLOSE_EXCESS_REPLICAS = 8; MODIFY_TABLE_ASSIGN_NEW_REPLICAS = 9; + MODIFY_TABLE_SYNC_ERASURE_CODING_POLICY = 10; } message ModifyTableStateData { @@ -267,6 +269,7 @@ enum CloneSnapshotState { CLONE_SNAPSHOT_UPDATE_DESC_CACHE = 5; CLONE_SNAPSHOT_POST_OPERATION = 6; CLONE_SNAPHOST_RESTORE_ACL = 7; + CLONE_SNAPSHOT_SET_ERASURE_CODING_POLICY = 8; } message CloneSnapshotStateData { @@ -285,6 +288,7 @@ enum RestoreSnapshotState { RESTORE_SNAPSHOT_WRITE_FS_LAYOUT = 3; RESTORE_SNAPSHOT_UPDATE_META = 4; RESTORE_SNAPSHOT_RESTORE_ACL = 5; + RESTORE_SNAPSHOT_SYNC_ERASURE_CODING_POLICY = 6; } message RestoreSnapshotStateData { @@ -296,6 +300,7 @@ message RestoreSnapshotStateData { repeated RegionInfo region_info_for_add = 6; repeated RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; optional bool restore_acl = 8; + required TableSchema old_table_schema = 9; } enum DispatchMergingRegionsState { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java new file mode 100644 index 000000000000..6e3c1e9a7887 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.fs; + +import java.io.IOException; +import java.util.Collection; +import java.util.Objects; +import java.util.stream.Collectors; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public final class ErasureCodingUtils { + + private ErasureCodingUtils() { + } + + private static final Logger LOG = LoggerFactory.getLogger(ErasureCodingUtils.class); + + /** + * Runs checks against the FileSystem, verifying that HDFS is supported and the policy is + * available, enabled, and works with a simple write. + */ + public static void verifySupport(Configuration conf, String policy) throws HBaseIOException { + DistributedFileSystem dfs = getDfs(conf); + checkAvailable(dfs, policy); + + // Enable the policy on a test directory. Try writing ot it to ensure that HDFS allows it + // This acts as a safeguard against topology issues (not enough nodes for policy, etc) and + // anything else. This is otherwise hard to validate more directly. + Path globalTempDir = new Path(conf.get(HConstants.HBASE_DIR), HConstants.HBASE_TEMP_DIRECTORY); + Path currentTempDir = createTempDir(dfs, globalTempDir); + try { + setPolicy(dfs, currentTempDir, policy); + try (FSDataOutputStream out = dfs.create(new Path(currentTempDir, "test.out"))) { + out.writeUTF("Testing " + policy); + } + } catch (IOException e) { + throw new DoNotRetryIOException("Failed write test for EC policy. Check cause or logs", e); + } finally { + try { + dfs.delete(currentTempDir, true); + } catch (IOException e) { + LOG.warn("Failed to delete temp path for ec test", e); + } + } + } + + private static Path createTempDir(FileSystem fs, Path tempDir) throws HBaseIOException { + Path currentTempDir = new Path(tempDir, "ec-test-" + System.currentTimeMillis()); + try { + fs.mkdirs(currentTempDir); + fs.deleteOnExit(currentTempDir); + } catch (IOException e) { + throw new HBaseIOException("Failed to create test dir for EC write test", e); + } + return currentTempDir; + } + + private static void checkAvailable(DistributedFileSystem dfs, String policy) + throws HBaseIOException { + Collection policies; + try { + policies = dfs.getAllErasureCodingPolicies(); + } catch (IOException e) { + throw new HBaseIOException("Failed to check for Erasure Coding policy: " + policy, e); + } + for (ErasureCodingPolicyInfo policyInfo : policies) { + if (policyInfo.getPolicy().getName().equals(policy)) { + if (!policyInfo.isEnabled()) { + throw new DoNotRetryIOException("Cannot set Erasure Coding policy: " + policy + + ". The policy must be enabled, but has state " + policyInfo.getState()); + } + return; + } + } + throw new DoNotRetryIOException( + "Cannot set Erasure Coding policy: " + policy + ". Policy not found. Available policies are: " + + policies.stream().map(p -> p.getPolicy().getName()).collect(Collectors.joining(", "))); + } + + /** + * Check if EC policy is different between two descriptors + * @return true if a sync is necessary + */ + public static boolean needsSync(TableDescriptor oldDescriptor, TableDescriptor newDescriptor) { + String newPolicy = oldDescriptor.getErasureCodingPolicy(); + String oldPolicy = newDescriptor.getErasureCodingPolicy(); + return !Objects.equals(oldPolicy, newPolicy); + } + + /** + * Sync the EC policy state from the newDescriptor onto the FS for the table dir of the provided + * table descriptor. If the policy is null, we will remove erasure coding from the FS for the + * table dir. If it's non-null, we'll set it to that policy. + * @param newDescriptor descriptor containing the policy and table name + */ + public static void sync(FileSystem fs, Path rootDir, TableDescriptor newDescriptor) + throws IOException { + String newPolicy = newDescriptor.getErasureCodingPolicy(); + if (newPolicy == null) { + unsetPolicy(fs, rootDir, newDescriptor.getTableName()); + } else { + setPolicy(fs, rootDir, newDescriptor.getTableName(), newPolicy); + } + } + + /** + * Sets the EC policy on the table directory for the specified table + */ + public static void setPolicy(FileSystem fs, Path rootDir, TableName tableName, String policy) + throws IOException { + Path path = CommonFSUtils.getTableDir(rootDir, tableName); + setPolicy(fs, path, policy); + } + + /** + * Sets the EC policy on the path + */ + public static void setPolicy(FileSystem fs, Path path, String policy) throws IOException { + getDfs(fs).setErasureCodingPolicy(path, policy); + } + + /** + * Unsets any EC policy specified on the path. + */ + public static void unsetPolicy(FileSystem fs, Path rootDir, TableName tableName) + throws IOException { + DistributedFileSystem dfs = getDfs(fs); + Path path = CommonFSUtils.getTableDir(rootDir, tableName); + if (dfs.getErasureCodingPolicy(path) == null) { + LOG.warn("No EC policy set for path {}, nothing to unset", path); + return; + } + dfs.unsetErasureCodingPolicy(path); + } + + private static DistributedFileSystem getDfs(Configuration conf) throws HBaseIOException { + try { + return getDfs(FileSystem.get(conf)); + } catch (DoNotRetryIOException e) { + throw e; + } catch (IOException e) { + throw new HBaseIOException("Failed to get FileSystem from conf", e); + } + + } + + private static DistributedFileSystem getDfs(FileSystem fs) throws DoNotRetryIOException { + if (!(fs instanceof DistributedFileSystem)) { + throw new DoNotRetryIOException( + "Cannot manage Erasure Coding policy. Erasure Coding is only available on HDFS, but fs is " + + fs.getClass().getSimpleName()); + } + return (DistributedFileSystem) fs; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java index 1a9c93b82c03..a2c4cd40c9c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.fs.ErasureCodingUtils; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MetricsSnapshot; @@ -155,6 +156,16 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CloneSnapsho updateTableDescriptorWithSFT(); newRegions = createFilesystemLayout(env, tableDescriptor, newRegions); env.getMasterServices().getTableDescriptors().update(tableDescriptor, true); + if (tableDescriptor.getErasureCodingPolicy() != null) { + setNextState(CloneSnapshotState.CLONE_SNAPSHOT_SET_ERASURE_CODING_POLICY); + } else { + setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META); + } + break; + case CLONE_SNAPSHOT_SET_ERASURE_CODING_POLICY: + ErasureCodingUtils.setPolicy(env.getMasterFileSystem().getFileSystem(), + env.getMasterFileSystem().getRootDir(), getTableName(), + tableDescriptor.getErasureCodingPolicy()); setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META); break; case CLONE_SNAPSHOT_ADD_TO_META: diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index 37aea5e2f7db..47122962502f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.fs.ErasureCodingUtils; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; @@ -98,6 +99,16 @@ protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableS DeleteTableProcedure.deleteFromFs(env, getTableName(), newRegions, true); newRegions = createFsLayout(env, tableDescriptor, newRegions); env.getMasterServices().getTableDescriptors().update(tableDescriptor, true); + if (tableDescriptor.getErasureCodingPolicy() != null) { + setNextState(CreateTableState.CREATE_TABLE_SET_ERASURE_CODING_POLICY); + } else { + setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META); + } + break; + case CREATE_TABLE_SET_ERASURE_CODING_POLICY: + ErasureCodingUtils.setPolicy(env.getMasterFileSystem().getFileSystem(), + env.getMasterFileSystem().getRootDir(), getTableName(), + tableDescriptor.getErasureCodingPolicy()); setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META); break; case CREATE_TABLE_ADD_TO_META: diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index bc907154e7e4..30f7ef6ac7d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.fs.ErasureCodingUtils; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; @@ -115,6 +116,7 @@ protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws H } } } + if (!reopenRegions) { if (this.unmodifiedTableDescriptor == null) { throw new HBaseIOException( @@ -220,9 +222,12 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS postModify(env, state); if (reopenRegions) { setNextState(ModifyTableState.MODIFY_TABLE_REOPEN_ALL_REGIONS); - } else { - return Flow.NO_MORE_STATE; - } + } else + if (ErasureCodingUtils.needsSync(unmodifiedTableDescriptor, modifiedTableDescriptor)) { + setNextState(ModifyTableState.MODIFY_TABLE_SYNC_ERASURE_CODING_POLICY); + } else { + return Flow.NO_MORE_STATE; + } break; case MODIFY_TABLE_REOPEN_ALL_REGIONS: if (isTableEnabled(env)) { @@ -246,12 +251,24 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS } if (deleteColumnFamilyInModify) { setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT); - } else { - return Flow.NO_MORE_STATE; - } + } else + if (ErasureCodingUtils.needsSync(unmodifiedTableDescriptor, modifiedTableDescriptor)) { + setNextState(ModifyTableState.MODIFY_TABLE_SYNC_ERASURE_CODING_POLICY); + } else { + return Flow.NO_MORE_STATE; + } break; case MODIFY_TABLE_DELETE_FS_LAYOUT: deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor); + if (ErasureCodingUtils.needsSync(unmodifiedTableDescriptor, modifiedTableDescriptor)) { + setNextState(ModifyTableState.MODIFY_TABLE_SYNC_ERASURE_CODING_POLICY); + break; + } else { + return Flow.NO_MORE_STATE; + } + case MODIFY_TABLE_SYNC_ERASURE_CODING_POLICY: + ErasureCodingUtils.sync(env.getMasterFileSystem().getFileSystem(), + env.getMasterFileSystem().getRootDir(), modifiedTableDescriptor); return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException("unhandled state=" + state); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java index cdc5f5ab3a27..623dacdd3314 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.favored.FavoredNodesManager; +import org.apache.hadoop.hbase.fs.ErasureCodingUtils; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MetricsSnapshot; import org.apache.hadoop.hbase.master.RegionState; @@ -69,6 +70,7 @@ public class RestoreSnapshotProcedure extends AbstractStateMachineTableProcedure { private static final Logger LOG = LoggerFactory.getLogger(RestoreSnapshotProcedure.class); + private TableDescriptor oldTableDescriptor; private TableDescriptor modifiedTableDescriptor; private List regionsToRestore = null; private List regionsToRemove = null; @@ -93,18 +95,25 @@ public RestoreSnapshotProcedure(final MasterProcedureEnv env, this(env, tableDescriptor, snapshot, false); } + public RestoreSnapshotProcedure(final MasterProcedureEnv env, + final TableDescriptor tableDescriptor, final SnapshotDescription snapshot, + final boolean restoreAcl) throws HBaseIOException { + this(env, tableDescriptor, tableDescriptor, snapshot, restoreAcl); + } + /** * Constructor - * @param env MasterProcedureEnv - * @param tableDescriptor the table to operate on - * @param snapshot snapshot to restore from + * @param env MasterProcedureEnv + * @param modifiedTableDescriptor the table to operate on + * @param snapshot snapshot to restore from */ public RestoreSnapshotProcedure(final MasterProcedureEnv env, - final TableDescriptor tableDescriptor, final SnapshotDescription snapshot, - final boolean restoreAcl) throws HBaseIOException { + final TableDescriptor oldTableDescriptor, final TableDescriptor modifiedTableDescriptor, + final SnapshotDescription snapshot, final boolean restoreAcl) throws HBaseIOException { super(env); + this.oldTableDescriptor = oldTableDescriptor; // This is the new schema we are going to write out as this modification. - this.modifiedTableDescriptor = tableDescriptor; + this.modifiedTableDescriptor = modifiedTableDescriptor; preflightChecks(env, null/* Table can be online when restore is called? */); // Snapshot information this.snapshot = snapshot; @@ -142,6 +151,18 @@ protected Flow executeFromState(final MasterProcedureEnv env, final RestoreSnaps break; case RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR: updateTableDescriptor(env); + // for restore, table dir already exists. sync EC if necessary before doing the real + // restore. this may be useful in certain restore scenarios where a user is explicitly + // trying to disable EC for some reason as part of the restore. + if (ErasureCodingUtils.needsSync(oldTableDescriptor, modifiedTableDescriptor)) { + setNextState(RestoreSnapshotState.RESTORE_SNAPSHOT_SYNC_ERASURE_CODING_POLICY); + } else { + setNextState(RestoreSnapshotState.RESTORE_SNAPSHOT_WRITE_FS_LAYOUT); + } + break; + case RESTORE_SNAPSHOT_SYNC_ERASURE_CODING_POLICY: + ErasureCodingUtils.sync(env.getMasterFileSystem().getFileSystem(), + env.getMasterFileSystem().getRootDir(), modifiedTableDescriptor); setNextState(RestoreSnapshotState.RESTORE_SNAPSHOT_WRITE_FS_LAYOUT); break; case RESTORE_SNAPSHOT_WRITE_FS_LAYOUT: @@ -238,7 +259,8 @@ protected void serializeStateData(ProcedureStateSerializer serializer) throws IO RestoreSnapshotStateData.Builder restoreSnapshotMsg = RestoreSnapshotStateData.newBuilder() .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())).setSnapshot(this.snapshot) - .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor)); + .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor)) + .setOldTableSchema(ProtobufUtil.toTableSchema(oldTableDescriptor)); if (regionsToRestore != null) { for (RegionInfo hri : regionsToRestore) { @@ -280,6 +302,7 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws serializer.deserialize(RestoreSnapshotStateData.class); setUser(MasterProcedureUtil.toUserInfo(restoreSnapshotMsg.getUserInfo())); snapshot = restoreSnapshotMsg.getSnapshot(); + oldTableDescriptor = ProtobufUtil.toTableDescriptor(restoreSnapshotMsg.getOldTableSchema()); modifiedTableDescriptor = ProtobufUtil.toTableDescriptor(restoreSnapshotMsg.getModifiedTableSchema()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index ac8db8a59259..11c74054dedf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -1082,9 +1082,10 @@ private synchronized long restoreSnapshot(final SnapshotDescription snapshot, } try { + TableDescriptor oldDescriptor = master.getTableDescriptors().get(tableName); long procId = master.getMasterProcedureExecutor().submitProcedure( new RestoreSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(), - tableDescriptor, snapshot, restoreAcl), + oldDescriptor, tableDescriptor, snapshot, restoreAcl), nonceKey); this.restoreTableToProcIdMap.put(tableName, procId); return procId; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java index 984b9e09a5af..20c0d93b4708 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.regionserver; +import com.google.errorprone.annotations.RestrictedApi; import java.util.List; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Server; @@ -70,7 +71,9 @@ public CompactedHFilesDischarger(final int period, final Stoppable stopper, * no-executor before you call run. * @return The old setting for useExecutor */ - boolean setUseExecutor(final boolean useExecutor) { + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public boolean setUseExecutor(final boolean useExecutor) { boolean oldSetting = this.useExecutor; this.useExecutor = useExecutor; return oldSetting; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java index e7080fc9323f..94e2e4bbfa08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.fs.ErasureCodingUtils; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; @@ -145,6 +146,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check bloom filter type checkBloomFilterType(conf, td); + if (td.getErasureCodingPolicy() != null) { + warnOrThrowExceptionForFailure(logWarn, + () -> ErasureCodingUtils.verifySupport(conf, td.getErasureCodingPolicy())); + } + for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { if (hcd.getTimeToLive() <= 0) { String message = "TTL for column family " + hcd.getNameAsString() + " must be positive."; @@ -184,19 +190,13 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) } // check in-memory compaction - try { - hcd.getInMemoryCompaction(); - } catch (IllegalArgumentException e) { - warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); - } + warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction); } } private static void checkReplicationScope(final Configuration conf, final TableDescriptor td) throws IOException { - // Setting logs to warning instead of throwing exception if sanityChecks are disabled - boolean logWarn = !shouldSanityCheck(conf); - try { + warnOrThrowExceptionForFailure(conf, () -> { for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { // check replication scope WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope()); @@ -207,15 +207,12 @@ private static void checkReplicationScope(final Configuration conf, final TableD throw new DoNotRetryIOException(message); } } - } catch (IOException e) { - warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); - } - + }); } private static void checkCompactionPolicy(final Configuration conf, final TableDescriptor td) throws IOException { - try { + warnOrThrowExceptionForFailure(false, () -> { // FIFO compaction has some requirements // Actually FCP ignores periodic major compactions String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); @@ -268,16 +265,12 @@ private static void checkCompactionPolicy(final Configuration conf, final TableD throw new IOException(message); } } - } catch (IOException e) { - warnOrThrowExceptionForFailure(false, e.getMessage(), e); - } + }); } private static void checkBloomFilterType(final Configuration conf, final TableDescriptor td) throws IOException { - // Setting logs to warning instead of throwing exception if sanityChecks are disabled - boolean logWarn = !shouldSanityCheck(conf); - try { + warnOrThrowExceptionForFailure(conf, () -> { for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { Configuration cfdConf = new CompoundConfiguration().addStringMap(cfd.getConfiguration()); try { @@ -286,50 +279,36 @@ private static void checkBloomFilterType(final Configuration conf, final TableDe throw new DoNotRetryIOException("Failed to get bloom filter param", e); } } - } catch (IOException e) { - warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); - } + }); } public static void checkCompression(final Configuration conf, final TableDescriptor td) throws IOException { - // Setting logs to warning instead of throwing exception if sanityChecks are disabled - boolean logWarn = !shouldSanityCheck(conf); - try { + warnOrThrowExceptionForFailure(conf, () -> { for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { CompressionTest.testCompression(cfd.getCompressionType()); CompressionTest.testCompression(cfd.getCompactionCompressionType()); CompressionTest.testCompression(cfd.getMajorCompactionCompressionType()); CompressionTest.testCompression(cfd.getMinorCompactionCompressionType()); } - } catch (IOException e) { - warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); - } + }); } public static void checkEncryption(final Configuration conf, final TableDescriptor td) throws IOException { - // Setting logs to warning instead of throwing exception if sanityChecks are disabled - boolean logWarn = !shouldSanityCheck(conf); - try { + warnOrThrowExceptionForFailure(conf, () -> { for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { EncryptionTest.testEncryption(conf, cfd.getEncryptionType(), cfd.getEncryptionKey()); } - } catch (IOException e) { - warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); - } + }); } public static void checkClassLoading(final Configuration conf, final TableDescriptor td) throws IOException { - // Setting logs to warning instead of throwing exception if sanityChecks are disabled - boolean logWarn = !shouldSanityCheck(conf); - try { + warnOrThrowExceptionForFailure(conf, () -> { RegionSplitPolicy.getSplitPolicyClass(td, conf); RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td); - } catch (Exception e) { - warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); - } + }); } // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled. @@ -341,4 +320,24 @@ private static void warnOrThrowExceptionForFailure(boolean logWarn, String messa } LOG.warn(message); } + + private static void warnOrThrowExceptionForFailure(Configuration conf, ThrowingRunnable runnable) + throws IOException { + boolean logWarn = !shouldSanityCheck(conf); + warnOrThrowExceptionForFailure(logWarn, runnable); + } + + private static void warnOrThrowExceptionForFailure(boolean logWarn, ThrowingRunnable runnable) + throws IOException { + try { + runnable.run(); + } catch (Exception e) { + warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); + } + } + + @FunctionalInterface + interface ThrowingRunnable { + void run() throws Exception; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java new file mode 100644 index 000000000000..e6d1c43e7909 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java @@ -0,0 +1,302 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertThrows; + +import java.io.IOException; +import java.util.function.Function; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.apache.hadoop.hbase.util.TableDescriptorChecker; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestManageTableErasureCodingPolicy { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestManageTableErasureCodingPolicy.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestManageTableErasureCodingPolicy.class); + + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + private static final byte[] FAMILY = Bytes.toBytes("a"); + private static final TableName NON_EC_TABLE = TableName.valueOf("foo"); + private static final TableDescriptor NON_EC_TABLE_DESC = TableDescriptorBuilder + .newBuilder(NON_EC_TABLE).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + private static final TableName EC_TABLE = TableName.valueOf("bar"); + private static final TableDescriptor EC_TABLE_DESC = + TableDescriptorBuilder.newBuilder(EC_TABLE).setErasureCodingPolicy("XOR-2-1-1024k") + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + + @BeforeClass + public static void beforeClass() throws Exception { + // enable because we are testing the checks below + UTIL.getConfiguration().setBoolean(TableDescriptorChecker.TABLE_SANITY_CHECKS, true); + UTIL.startMiniDFSCluster(3); // 3 necessary for XOR-2-1-1024k + UTIL.startMiniCluster(1); + DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(UTIL.getConfiguration()); + fs.enableErasureCodingPolicy("XOR-2-1-1024k"); + fs.enableErasureCodingPolicy("RS-6-3-1024k"); + Table table = UTIL.createTable(NON_EC_TABLE_DESC, null); + UTIL.loadTable(table, FAMILY); + UTIL.flush(); + } + + @AfterClass + public static void afterClass() throws Exception { + UTIL.shutdownMiniCluster(); + UTIL.shutdownMiniDFSCluster(); + } + + @Test + public void itValidatesPolicyNameForCreate() { + runValidatePolicyNameTest(unused -> EC_TABLE_DESC, Admin::createTable); + } + + @Test + public void itValidatesPolicyNameForAlter() { + runValidatePolicyNameTest(admin -> { + try { + return admin.getDescriptor(NON_EC_TABLE); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, Admin::modifyTable); + } + + @FunctionalInterface + interface ThrowingTableDescriptorConsumer { + void accept(Admin admin, TableDescriptor desc) throws IOException; + } + + private void runValidatePolicyNameTest(Function descriptorSupplier, + ThrowingTableDescriptorConsumer consumer) { + HBaseIOException thrown = assertThrows(HBaseIOException.class, () -> { + try (Admin admin = UTIL.getAdmin()) { + TableDescriptor desc = descriptorSupplier.apply(admin); + consumer.accept(admin, + TableDescriptorBuilder.newBuilder(desc).setErasureCodingPolicy("foo").build()); + } + }); + assertThat(thrown.getMessage(), + containsString("Cannot set Erasure Coding policy: foo. Policy not found")); + + thrown = assertThrows(HBaseIOException.class, () -> { + try (Admin admin = UTIL.getAdmin()) { + TableDescriptor desc = descriptorSupplier.apply(admin); + consumer.accept(admin, + TableDescriptorBuilder.newBuilder(desc).setErasureCodingPolicy("RS-10-4-1024k").build()); + } + }); + assertThat(thrown.getMessage(), containsString( + "Cannot set Erasure Coding policy: RS-10-4-1024k. The policy must be enabled")); + + // RS-6-3-1024k requires at least 6 datanodes, so should fail write test + thrown = assertThrows(HBaseIOException.class, () -> { + try (Admin admin = UTIL.getAdmin()) { + TableDescriptor desc = descriptorSupplier.apply(admin); + consumer.accept(admin, + TableDescriptorBuilder.newBuilder(desc).setErasureCodingPolicy("RS-6-3-1024k").build()); + } + }); + assertThat(thrown.getMessage(), containsString("Failed write test for EC policy")); + } + + @Test + public void testCreateTableErasureCodingSync() throws IOException { + try (Admin admin = UTIL.getAdmin()) { + recreateTable(admin, EC_TABLE_DESC); + UTIL.flush(EC_TABLE); + Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(UTIL.getConfiguration()); + checkRegionDirAndFilePolicies(dfs, rootDir, EC_TABLE, "XOR-2-1-1024k", "XOR-2-1-1024k"); + } + } + + private void recreateTable(Admin admin, TableDescriptor desc) throws IOException { + if (admin.tableExists(desc.getTableName())) { + admin.disableTable(desc.getTableName()); + admin.deleteTable(desc.getTableName()); + } + admin.createTable(desc); + try (Table table = UTIL.getConnection().getTable(desc.getTableName())) { + UTIL.loadTable(table, FAMILY); + } + } + + @Test + public void testModifyTableErasureCodingSync() throws IOException, InterruptedException { + try (Admin admin = UTIL.getAdmin()) { + Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(UTIL.getConfiguration()); + + // start off without EC + checkRegionDirAndFilePolicies(dfs, rootDir, NON_EC_TABLE, null, null); + + // add EC + TableDescriptor desc = UTIL.getAdmin().getDescriptor(NON_EC_TABLE); + TableDescriptor newDesc = + TableDescriptorBuilder.newBuilder(desc).setErasureCodingPolicy("XOR-2-1-1024k").build(); + admin.modifyTable(newDesc); + + // check dirs, but files should not be changed yet + checkRegionDirAndFilePolicies(dfs, rootDir, NON_EC_TABLE, "XOR-2-1-1024k", null); + + compactAwayOldFiles(NON_EC_TABLE); + + // expect both dirs and files to be EC now + checkRegionDirAndFilePolicies(dfs, rootDir, NON_EC_TABLE, "XOR-2-1-1024k", "XOR-2-1-1024k"); + + newDesc = TableDescriptorBuilder.newBuilder(newDesc).setErasureCodingPolicy(null).build(); + // remove EC now + admin.modifyTable(newDesc); + + // dirs should no longer be EC, but old EC files remain + checkRegionDirAndFilePolicies(dfs, rootDir, NON_EC_TABLE, null, "XOR-2-1-1024k"); + + // compact to rewrite EC files without EC, then run discharger to get rid of the old EC files + UTIL.compact(NON_EC_TABLE, true); + for (JVMClusterUtil.RegionServerThread regionserver : UTIL.getHBaseCluster() + .getLiveRegionServerThreads()) { + CompactedHFilesDischarger chore = + regionserver.getRegionServer().getCompactedHFilesDischarger(); + chore.setUseExecutor(false); + chore.chore(); + } + + checkRegionDirAndFilePolicies(dfs, rootDir, NON_EC_TABLE, null, null); + } + } + + private void compactAwayOldFiles(TableName tableName) throws IOException { + LOG.info("Compacting and discharging files for {}", tableName); + // compact to rewrit files, then run discharger to get rid of the old files + UTIL.compact(tableName, true); + for (JVMClusterUtil.RegionServerThread regionserver : UTIL.getHBaseCluster() + .getLiveRegionServerThreads()) { + CompactedHFilesDischarger chore = + regionserver.getRegionServer().getCompactedHFilesDischarger(); + chore.setUseExecutor(false); + chore.chore(); + } + } + + @Test + public void testRestoreSnapshot() throws IOException { + String snapshotName = "testRestoreSnapshot_snap"; + TableName tableName = TableName.valueOf("testRestoreSnapshot_tbl"); + try (Admin admin = UTIL.getAdmin()) { + Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(UTIL.getConfiguration()); + + // recreate EC test table and load it + recreateTable(admin, EC_TABLE_DESC); + + // Take a snapshot, then clone it into a new table + admin.snapshot(snapshotName, EC_TABLE); + admin.cloneSnapshot(snapshotName, tableName); + compactAwayOldFiles(tableName); + + // Verify the new table has the right EC policy + checkRegionDirAndFilePolicies(dfs, rootDir, tableName, "XOR-2-1-1024k", "XOR-2-1-1024k"); + + // Remove the EC policy from the EC test table, and verify that worked + admin.modifyTable( + TableDescriptorBuilder.newBuilder(EC_TABLE_DESC).setErasureCodingPolicy(null).build()); + compactAwayOldFiles(EC_TABLE); + checkRegionDirAndFilePolicies(dfs, rootDir, EC_TABLE, null, null); + + // Restore snapshot, and then verify it has the policy again + admin.disableTable(EC_TABLE); + admin.restoreSnapshot(snapshotName); + admin.enableTable(EC_TABLE); + compactAwayOldFiles(EC_TABLE); + checkRegionDirAndFilePolicies(dfs, rootDir, EC_TABLE, "XOR-2-1-1024k", "XOR-2-1-1024k"); + } + } + + private void checkRegionDirAndFilePolicies(DistributedFileSystem dfs, Path rootDir, + TableName testTable, String expectedDirPolicy, String expectedFilePolicy) throws IOException { + Path tableDir = CommonFSUtils.getTableDir(rootDir, testTable); + checkPolicy(dfs, tableDir, expectedDirPolicy); + + int filesMatched = 0; + for (HRegion region : UTIL.getHBaseCluster().getRegions(testTable)) { + Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName()); + checkPolicy(dfs, regionDir, expectedDirPolicy); + RemoteIterator itr = dfs.listFiles(regionDir, true); + while (itr.hasNext()) { + LocatedFileStatus fileStatus = itr.next(); + Path path = fileStatus.getPath(); + if (!HFile.isHFileFormat(dfs, path)) { + LOG.info("{} is not an hfile", path); + continue; + } + filesMatched++; + checkPolicy(dfs, path, expectedFilePolicy); + } + } + assertThat(filesMatched, greaterThan(0)); + } + + private void checkPolicy(DistributedFileSystem dfs, Path path, String expectedPolicy) + throws IOException { + ErasureCodingPolicy policy = dfs.getErasureCodingPolicy(path); + if (expectedPolicy == null) { + assertThat("policy for " + path, policy, nullValue()); + } else { + assertThat("policy for " + path, policy, notNullValue()); + assertThat("policy for " + path, policy.getName(), equalTo(expectedPolicy)); + } + } +} diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 1f2e684dce91..a037aa480b26 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1516,6 +1516,8 @@ def list_locks # Parse arguments and update HTableDescriptor accordingly def update_htd_from_arg(htd, arg) + htd.setErasureCodingPolicy(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::ERASURE_CODING_POLICY)) \ + if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::ERASURE_CODING_POLICY) htd.setOwnerString(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::OWNER)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::OWNER) htd.setMaxFileSize(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE) htd.setReadOnly(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::READONLY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::READONLY) diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java index 9dbea245edf4..65a9088589ee 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java @@ -21,11 +21,13 @@ import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.security.access.SecureTestUtil; import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.jruby.embed.PathType; import org.jruby.embed.ScriptingContainer; import org.junit.AfterClass; @@ -99,7 +101,11 @@ public static void setUpBeforeClass() throws Exception { setUpConfig(); // Start mini cluster - TEST_UTIL.startMiniCluster(1); + // 3 datanodes needed for erasure coding checks + TEST_UTIL.startMiniCluster(3); + DistributedFileSystem dfs = + (DistributedFileSystem) FileSystem.get(TEST_UTIL.getConfiguration()); + dfs.enableErasureCodingPolicy("XOR-2-1-1024k"); setUpJRubyRuntime(); } diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index dcd3d850343e..e593c32f4521 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -412,7 +412,7 @@ def teardown define_test 'clear slowlog responses should work' do output = capture_stdout { command(:clear_slowlog_responses, nil) } - assert(output.include?('Cleared Slowlog responses from 0/1 RegionServers')) + assert(output.include?('Cleared Slowlog responses from 0/3 RegionServers')) end #------------------------------------------------------------------------------- @@ -468,6 +468,7 @@ def teardown drop_test_table(@create_test_name) command(:create, @create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678, OWNER => '987654321', + ERASURE_CODING_POLICY => 'XOR-2-1-1024k', PRIORITY => '77', FLUSH_POLICY => 'org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy', REGION_MEMSTORE_REPLICATION => 'TRUE', @@ -478,6 +479,7 @@ def teardown assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) assert_match(/12345678/, admin.describe(@create_test_name)) assert_match(/987654321/, admin.describe(@create_test_name)) + assert_match(/XOR-2-1-1024k/, admin.describe(@create_test_name)) assert_match(/77/, admin.describe(@create_test_name)) assert_match(/'COMPACTION_ENABLED' => 'false'/, admin.describe(@create_test_name)) assert_match(/'SPLIT_ENABLED' => 'false'/, admin.describe(@create_test_name)) @@ -957,6 +959,28 @@ def teardown assert_match(/12345678/, admin.describe(@test_name)) end + define_test 'alter should be able to change EC policy' do + command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + assert_match(/XOR-2-1-1024k/, admin.describe(@test_name)) + end + + define_test 'alter should be able to remove EC policy' do + command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + command(:alter, @test_name, METHOD => 'table_att_unset', NAME => 'ERASURE_CODING_POLICY') + assert_not_match(/ERASURE_CODING_POLICY/, admin.describe(@test_name)) + end + + define_test 'alter should be able to change EC POLICY w/o table_att' do + command(:alter, @test_name, 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + assert_match(/XOR-2-1-1024k/, admin.describe(@test_name)) + end + + define_test 'alter should be able to remove EC POLICY w/o table_att' do + command(:alter, @test_name, 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + command(:alter, @test_name, 'ERASURE_CODING_POLICY' => nil) + assert_not_match(/ERASURE_CODING_POLICY/, admin.describe(@test_name)) + end + define_test "alter should be able to change coprocessor attributes" do drop_test_table(@test_name) create_test_table(@test_name) From 6fd0cd4624c70da909f73ef3d7a7a753a4340baa Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Tue, 9 Jan 2024 17:14:07 -0500 Subject: [PATCH 2/5] more --- .../hadoop/hbase/fs/ErasureCodingUtils.java | 109 +++++++++++++++--- .../TestManageTableErasureCodingPolicy.java | 32 +++-- .../hbase/client/AbstractTestShell.java | 12 +- 3 files changed, 126 insertions(+), 27 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java index 6e3c1e9a7887..1cbd74d308b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.fs; import java.io.IOException; +import java.lang.reflect.InvocationTargetException; import java.util.Collection; import java.util.Objects; import java.util.stream.Collectors; @@ -31,8 +32,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,26 +86,59 @@ private static Path createTempDir(FileSystem fs, Path tempDir) throws HBaseIOExc return currentTempDir; } - private static void checkAvailable(DistributedFileSystem dfs, String policy) + private static void checkAvailable(DistributedFileSystem dfs, String requestedPolicy) throws HBaseIOException { - Collection policies; + Collection policies; + try { - policies = dfs.getAllErasureCodingPolicies(); + policies = callDfsMethod(dfs, "getAllErasureCodingPolicies"); } catch (IOException e) { - throw new HBaseIOException("Failed to check for Erasure Coding policy: " + policy, e); + throw new HBaseIOException("Failed to check for Erasure Coding policy: " + requestedPolicy, e); } - for (ErasureCodingPolicyInfo policyInfo : policies) { - if (policyInfo.getPolicy().getName().equals(policy)) { - if (!policyInfo.isEnabled()) { - throw new DoNotRetryIOException("Cannot set Erasure Coding policy: " + policy - + ". The policy must be enabled, but has state " + policyInfo.getState()); - } + for (Object policyInfo : policies) { + if (checkPolicyMatch(policyInfo, requestedPolicy)) { return; } } throw new DoNotRetryIOException( - "Cannot set Erasure Coding policy: " + policy + ". Policy not found. Available policies are: " - + policies.stream().map(p -> p.getPolicy().getName()).collect(Collectors.joining(", "))); + "Cannot set Erasure Coding policy: " + requestedPolicy + ". Policy not found. Available policies are: " + + getPolicyNames(policies)); + } + + private static boolean checkPolicyMatch(Object policyInfo, String requestedPolicy) + throws DoNotRetryIOException { + try { + String policyName = getPolicyNameFromInfo(policyInfo); + if (requestedPolicy.equals(policyName)) { + boolean isEnabled = callObjectMethod(policyInfo, "isEnabled"); + if (!isEnabled) { + throw new DoNotRetryIOException("Cannot set Erasure Coding policy: " + requestedPolicy + ". The policy must be enabled, but has state " + callObjectMethod(policyInfo, + "getState")); + } + return true; + } + } catch (DoNotRetryIOException e) { + throw e; + } catch (IOException e) { + throw new DoNotRetryIOException("Unable to check for match of Erasure Coding Policy " + policyInfo, e); + } + return false; + } + + private static String getPolicyNameFromInfo(Object policyInfo) throws IOException { + Object policy = callObjectMethod(policyInfo, "getPolicy"); + return callObjectMethod(policy, "getName"); + } + + private static String getPolicyNames(Collection policyInfos) { + return policyInfos.stream().map(p -> { + try { + return getPolicyNameFromInfo(p); + } catch (IOException e) { + LOG.warn("Could not extract policy name from {}", p, e); + return "unknown"; + } + }).collect(Collectors.joining(", ")); } /** @@ -146,7 +180,7 @@ public static void setPolicy(FileSystem fs, Path rootDir, TableName tableName, S * Sets the EC policy on the path */ public static void setPolicy(FileSystem fs, Path path, String policy) throws IOException { - getDfs(fs).setErasureCodingPolicy(path, policy); + callDfsMethod(getDfs(fs), "setErasureCodingPolicy", path, policy); } /** @@ -156,11 +190,15 @@ public static void unsetPolicy(FileSystem fs, Path rootDir, TableName tableName) throws IOException { DistributedFileSystem dfs = getDfs(fs); Path path = CommonFSUtils.getTableDir(rootDir, tableName); - if (dfs.getErasureCodingPolicy(path) == null) { + if (getPolicyNameForPath(dfs, path) == null) { LOG.warn("No EC policy set for path {}, nothing to unset", path); return; } - dfs.unsetErasureCodingPolicy(path); + callDfsMethod(dfs, "unsetErasureCodingPolicy", path); + } + + public static void enablePolicy(FileSystem fs, String policy) throws IOException { + callDfsMethod(getDfs(fs), "enableErasureCodingPolicy", policy); } private static DistributedFileSystem getDfs(Configuration conf) throws HBaseIOException { @@ -182,4 +220,43 @@ private static DistributedFileSystem getDfs(FileSystem fs) throws DoNotRetryIOEx } return (DistributedFileSystem) fs; } + + public static String getPolicyNameForPath(DistributedFileSystem dfs, Path path) + throws IOException { + Object policy = callDfsMethod(dfs, "getErasureCodingPolicy", path); + if (policy == null) { + return null; + } + return callObjectMethod(policy, "getName"); + } + + private interface ThrowingObjectSupplier { + Object run() throws IOException; + } + + private static T callDfsMethod(DistributedFileSystem dfs, String name, Object... params) + throws IOException { + return callObjectMethod(dfs, name, params); + } + + private static T callObjectMethod(Object object, String name, Object... params) + throws IOException { + return unwrapInvocationException(() -> ReflectionUtils.invokeMethod(object, name, params)); + } + + private static T unwrapInvocationException(ThrowingObjectSupplier runnable) throws IOException { + try { + return (T) runnable.run(); + } catch (UnsupportedOperationException e) { + if (e.getCause() instanceof InvocationTargetException) { + Throwable cause = e.getCause().getCause(); + if (cause instanceof IOException) { + throw (IOException) cause; + } else if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } + } + throw e; + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java index e6d1c43e7909..a875d088a5d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java @@ -21,9 +21,9 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.junit.Assert.assertThrows; +import static org.junit.Assume.*; import java.io.IOException; import java.util.function.Function; @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.fs.ErasureCodingUtils; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -50,8 +51,8 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.TableDescriptorChecker; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.junit.AfterClass; +import org.junit.Assume; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -77,6 +78,7 @@ public class TestManageTableErasureCodingPolicy { private static final TableDescriptor EC_TABLE_DESC = TableDescriptorBuilder.newBuilder(EC_TABLE).setErasureCodingPolicy("XOR-2-1-1024k") .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + private static boolean erasureCodingSupported; @BeforeClass public static void beforeClass() throws Exception { @@ -85,13 +87,25 @@ public static void beforeClass() throws Exception { UTIL.startMiniDFSCluster(3); // 3 necessary for XOR-2-1-1024k UTIL.startMiniCluster(1); DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(UTIL.getConfiguration()); - fs.enableErasureCodingPolicy("XOR-2-1-1024k"); - fs.enableErasureCodingPolicy("RS-6-3-1024k"); + + erasureCodingSupported = enableErasureCoding(fs); + Table table = UTIL.createTable(NON_EC_TABLE_DESC, null); UTIL.loadTable(table, FAMILY); UTIL.flush(); } + private static boolean enableErasureCoding(DistributedFileSystem fs) throws IOException { + try { + ErasureCodingUtils.enablePolicy(fs, "XOR-2-1-1024k"); + ErasureCodingUtils.enablePolicy(fs, "RS-6-3-1024k"); + return true; + } catch (UnsupportedOperationException e) { + LOG.info("Current hadoop version does not support erasure coding, only validation tests will run."); + return false; + } + } + @AfterClass public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); @@ -154,6 +168,7 @@ private void runValidatePolicyNameTest(Function descript @Test public void testCreateTableErasureCodingSync() throws IOException { + assumeTrue(erasureCodingSupported); try (Admin admin = UTIL.getAdmin()) { recreateTable(admin, EC_TABLE_DESC); UTIL.flush(EC_TABLE); @@ -175,7 +190,8 @@ private void recreateTable(Admin admin, TableDescriptor desc) throws IOException } @Test - public void testModifyTableErasureCodingSync() throws IOException, InterruptedException { + public void testModifyTableErasureCodingSync() throws IOException { + assumeTrue(erasureCodingSupported); try (Admin admin = UTIL.getAdmin()) { Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(UTIL.getConfiguration()); @@ -233,6 +249,7 @@ private void compactAwayOldFiles(TableName tableName) throws IOException { @Test public void testRestoreSnapshot() throws IOException { + assumeTrue(erasureCodingSupported); String snapshotName = "testRestoreSnapshot_snap"; TableName tableName = TableName.valueOf("testRestoreSnapshot_tbl"); try (Admin admin = UTIL.getAdmin()) { @@ -291,12 +308,11 @@ private void checkRegionDirAndFilePolicies(DistributedFileSystem dfs, Path rootD private void checkPolicy(DistributedFileSystem dfs, Path path, String expectedPolicy) throws IOException { - ErasureCodingPolicy policy = dfs.getErasureCodingPolicy(path); + String policy = ErasureCodingUtils.getPolicyNameForPath(dfs, path); if (expectedPolicy == null) { assertThat("policy for " + path, policy, nullValue()); } else { - assertThat("policy for " + path, policy, notNullValue()); - assertThat("policy for " + path, policy.getName(), equalTo(expectedPolicy)); + assertThat("policy for " + path, policy, equalTo(expectedPolicy)); } } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java index 65a9088589ee..d59ab5bb691f 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.fs.ErasureCodingUtils; import org.apache.hadoop.hbase.security.access.SecureTestUtil; import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -43,6 +44,8 @@ public abstract class AbstractTestShell { protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); protected final static ScriptingContainer jruby = new ScriptingContainer(); + protected static boolean erasureCodingSupported = false; + protected static void setUpConfig() throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt("hbase.regionserver.msginterval", 100); @@ -103,9 +106,12 @@ public static void setUpBeforeClass() throws Exception { // Start mini cluster // 3 datanodes needed for erasure coding checks TEST_UTIL.startMiniCluster(3); - DistributedFileSystem dfs = - (DistributedFileSystem) FileSystem.get(TEST_UTIL.getConfiguration()); - dfs.enableErasureCodingPolicy("XOR-2-1-1024k"); + try { + ErasureCodingUtils.enablePolicy(FileSystem.get(TEST_UTIL.getConfiguration()), "XOR-2-1-1024k"); + erasureCodingSupported = true; + } catch (UnsupportedOperationException e) { + LOG.info("Current hadoop version does not support erasure coding, only validation tests will run."); + } setUpJRubyRuntime(); } From cb2022ab5a291f7f3ead7bed7e303d2cb27923e0 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Tue, 23 Jan 2024 18:51:46 -0500 Subject: [PATCH 3/5] fixes --- .../apache/hadoop/hbase/HTableDescriptor.java | 3 +- .../hadoop/hbase/fs/ErasureCodingUtils.java | 21 +++++---- .../TestManageTableErasureCodingPolicy.java | 22 ++++++--- .../hbase/client/AbstractTestShell.java | 23 +++++----- .../hadoop/hbase/client/TestAdminShell.java | 29 ++++++++++++ hbase-shell/src/test/ruby/hbase/admin_test.rb | 45 +++++++++++-------- 6 files changed, 94 insertions(+), 49 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 186108ea40ed..7515bcdf3250 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -269,8 +269,9 @@ public HTableDescriptor setCompactionEnabled(final boolean isEnable) { * being set. * @param policy the policy to set, or null to disable erasure coding */ - public void setErasureCodingPolicy(final String policy) { + public HTableDescriptor setErasureCodingPolicy(final String policy) { getDelegateeForModification().setErasureCodingPolicy(policy); + return this; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java index 1cbd74d308b2..304980c10216 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/ErasureCodingUtils.java @@ -93,16 +93,16 @@ private static void checkAvailable(DistributedFileSystem dfs, String requestedPo try { policies = callDfsMethod(dfs, "getAllErasureCodingPolicies"); } catch (IOException e) { - throw new HBaseIOException("Failed to check for Erasure Coding policy: " + requestedPolicy, e); + throw new HBaseIOException("Failed to check for Erasure Coding policy: " + requestedPolicy, + e); } for (Object policyInfo : policies) { if (checkPolicyMatch(policyInfo, requestedPolicy)) { return; } } - throw new DoNotRetryIOException( - "Cannot set Erasure Coding policy: " + requestedPolicy + ". Policy not found. Available policies are: " - + getPolicyNames(policies)); + throw new DoNotRetryIOException("Cannot set Erasure Coding policy: " + requestedPolicy + + ". Policy not found. Available policies are: " + getPolicyNames(policies)); } private static boolean checkPolicyMatch(Object policyInfo, String requestedPolicy) @@ -112,15 +112,17 @@ private static boolean checkPolicyMatch(Object policyInfo, String requestedPolic if (requestedPolicy.equals(policyName)) { boolean isEnabled = callObjectMethod(policyInfo, "isEnabled"); if (!isEnabled) { - throw new DoNotRetryIOException("Cannot set Erasure Coding policy: " + requestedPolicy + ". The policy must be enabled, but has state " + callObjectMethod(policyInfo, - "getState")); + throw new DoNotRetryIOException("Cannot set Erasure Coding policy: " + requestedPolicy + + ". The policy must be enabled, but has state " + + callObjectMethod(policyInfo, "getState")); } return true; } } catch (DoNotRetryIOException e) { throw e; } catch (IOException e) { - throw new DoNotRetryIOException("Unable to check for match of Erasure Coding Policy " + policyInfo, e); + throw new DoNotRetryIOException( + "Unable to check for match of Erasure Coding Policy " + policyInfo, e); } return false; } @@ -227,7 +229,7 @@ public static String getPolicyNameForPath(DistributedFileSystem dfs, Path path) if (policy == null) { return null; } - return callObjectMethod(policy, "getName"); + return callObjectMethod(policy, "getName"); } private interface ThrowingObjectSupplier { @@ -244,7 +246,8 @@ private static T callObjectMethod(Object object, String name, Object... para return unwrapInvocationException(() -> ReflectionUtils.invokeMethod(object, name, params)); } - private static T unwrapInvocationException(ThrowingObjectSupplier runnable) throws IOException { + private static T unwrapInvocationException(ThrowingObjectSupplier runnable) + throws IOException { try { return (T) runnable.run(); } catch (UnsupportedOperationException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java index a875d088a5d8..c778544d8781 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java @@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.util.TableDescriptorChecker; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.junit.AfterClass; -import org.junit.Assume; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -101,7 +100,8 @@ private static boolean enableErasureCoding(DistributedFileSystem fs) throws IOEx ErasureCodingUtils.enablePolicy(fs, "RS-6-3-1024k"); return true; } catch (UnsupportedOperationException e) { - LOG.info("Current hadoop version does not support erasure coding, only validation tests will run."); + LOG.info( + "Current hadoop version does not support erasure coding, only validation tests will run."); return false; } } @@ -142,8 +142,8 @@ private void runValidatePolicyNameTest(Function descript TableDescriptorBuilder.newBuilder(desc).setErasureCodingPolicy("foo").build()); } }); - assertThat(thrown.getMessage(), - containsString("Cannot set Erasure Coding policy: foo. Policy not found")); + assertPolicyValidationException(thrown.getMessage(), + "Cannot set Erasure Coding policy: foo. Policy not found"); thrown = assertThrows(HBaseIOException.class, () -> { try (Admin admin = UTIL.getAdmin()) { @@ -152,8 +152,8 @@ private void runValidatePolicyNameTest(Function descript TableDescriptorBuilder.newBuilder(desc).setErasureCodingPolicy("RS-10-4-1024k").build()); } }); - assertThat(thrown.getMessage(), containsString( - "Cannot set Erasure Coding policy: RS-10-4-1024k. The policy must be enabled")); + assertPolicyValidationException(thrown.getMessage(), + "Cannot set Erasure Coding policy: RS-10-4-1024k. The policy must be enabled"); // RS-6-3-1024k requires at least 6 datanodes, so should fail write test thrown = assertThrows(HBaseIOException.class, () -> { @@ -163,7 +163,15 @@ private void runValidatePolicyNameTest(Function descript TableDescriptorBuilder.newBuilder(desc).setErasureCodingPolicy("RS-6-3-1024k").build()); } }); - assertThat(thrown.getMessage(), containsString("Failed write test for EC policy")); + assertPolicyValidationException(thrown.getMessage(), "Failed write test for EC policy"); + } + + private void assertPolicyValidationException(String message, String expected) { + if (erasureCodingSupported) { + assertThat(message, containsString(expected)); + } else { + assertThat(message, containsString("Cannot find specified method")); + } } @Test diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java index d59ab5bb691f..63f651ddf432 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java @@ -19,16 +19,15 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.fs.ErasureCodingUtils; import org.apache.hadoop.hbase.security.access.SecureTestUtil; import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.jruby.embed.PathType; import org.jruby.embed.ScriptingContainer; import org.junit.AfterClass; @@ -44,8 +43,6 @@ public abstract class AbstractTestShell { protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); protected final static ScriptingContainer jruby = new ScriptingContainer(); - protected static boolean erasureCodingSupported = false; - protected static void setUpConfig() throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt("hbase.regionserver.msginterval", 100); @@ -65,11 +62,18 @@ protected static void setUpConfig() throws IOException { } protected static void setUpJRubyRuntime() { + setUpJRubyRuntime(Collections.emptyMap()); + } + + protected static void setUpJRubyRuntime(Map extraVars) { LOG.debug("Configure jruby runtime, cluster set to {}", TEST_UTIL); List loadPaths = new ArrayList<>(2); loadPaths.add("src/test/ruby"); jruby.setLoadPaths(loadPaths); jruby.put("$TEST_CLUSTER", TEST_UTIL); + for (Map.Entry entry : extraVars.entrySet()) { + jruby.put(entry.getKey(), entry.getValue()); + } System.setProperty("jruby.jit.logging.verbose", "true"); System.setProperty("jruby.jit.logging", "true"); System.setProperty("jruby.native.verbose", "true"); @@ -104,14 +108,7 @@ public static void setUpBeforeClass() throws Exception { setUpConfig(); // Start mini cluster - // 3 datanodes needed for erasure coding checks - TEST_UTIL.startMiniCluster(3); - try { - ErasureCodingUtils.enablePolicy(FileSystem.get(TEST_UTIL.getConfiguration()), "XOR-2-1-1024k"); - erasureCodingSupported = true; - } catch (UnsupportedOperationException e) { - LOG.info("Current hadoop version does not support erasure coding, only validation tests will run."); - } + TEST_UTIL.startMiniCluster(1); setUpJRubyRuntime(); } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java index 983d43f9ef8d..432e669cad4f 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java @@ -17,14 +17,21 @@ */ package org.apache.hadoop.hbase.client; +import java.util.Collections; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.fs.ErasureCodingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Category({ ClientTests.class, LargeTests.class }) public class TestAdminShell extends AbstractTestShell { + private static final Logger LOG = LoggerFactory.getLogger(TestAdminShell.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -34,4 +41,26 @@ public class TestAdminShell extends AbstractTestShell { protected String getIncludeList() { return "admin_test.rb"; } + + protected static boolean erasureCodingSupported = false; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + setUpConfig(); + + // Start mini cluster + // 3 datanodes needed for erasure coding checks + TEST_UTIL.startMiniCluster(3); + try { + ErasureCodingUtils.enablePolicy(FileSystem.get(TEST_UTIL.getConfiguration()), + "XOR-2-1-1024k"); + erasureCodingSupported = true; + } catch (UnsupportedOperationException e) { + LOG.info( + "Current hadoop version does not support erasure coding, only validation tests will run."); + } + + // we'll use this extra variable to trigger some differences in the tests + setUpJRubyRuntime(Collections.singletonMap("@erasureCodingSupported", erasureCodingSupported)); + } } diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index e593c32f4521..e372004ae7c7 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -468,7 +468,6 @@ def teardown drop_test_table(@create_test_name) command(:create, @create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678, OWNER => '987654321', - ERASURE_CODING_POLICY => 'XOR-2-1-1024k', PRIORITY => '77', FLUSH_POLICY => 'org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy', REGION_MEMSTORE_REPLICATION => 'TRUE', @@ -479,7 +478,6 @@ def teardown assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) assert_match(/12345678/, admin.describe(@create_test_name)) assert_match(/987654321/, admin.describe(@create_test_name)) - assert_match(/XOR-2-1-1024k/, admin.describe(@create_test_name)) assert_match(/77/, admin.describe(@create_test_name)) assert_match(/'COMPACTION_ENABLED' => 'false'/, admin.describe(@create_test_name)) assert_match(/'SPLIT_ENABLED' => 'false'/, admin.describe(@create_test_name)) @@ -959,26 +957,35 @@ def teardown assert_match(/12345678/, admin.describe(@test_name)) end - define_test 'alter should be able to change EC policy' do - command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') - assert_match(/XOR-2-1-1024k/, admin.describe(@test_name)) - end + if @erasureCodingSupported + define_test "create should be able to set table EC options" do + drop_test_table(@create_test_name) + command(:create, @create_test_name, 'a', 'b',ERASURE_CODING_POLICY => 'XOR-2-1-1024k') + assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) + assert_match(/XOR-2-1-1024k/, admin.describe(@create_test_name)) + end - define_test 'alter should be able to remove EC policy' do - command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') - command(:alter, @test_name, METHOD => 'table_att_unset', NAME => 'ERASURE_CODING_POLICY') - assert_not_match(/ERASURE_CODING_POLICY/, admin.describe(@test_name)) - end + define_test 'alter should be able to change EC policy' do + command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + assert_match(/XOR-2-1-1024k/, admin.describe(@test_name)) + end - define_test 'alter should be able to change EC POLICY w/o table_att' do - command(:alter, @test_name, 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') - assert_match(/XOR-2-1-1024k/, admin.describe(@test_name)) - end + define_test 'alter should be able to remove EC policy' do + command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + command(:alter, @test_name, METHOD => 'table_att_unset', NAME => 'ERASURE_CODING_POLICY') + assert_not_match(/ERASURE_CODING_POLICY/, admin.describe(@test_name)) + end + + define_test 'alter should be able to change EC POLICY w/o table_att' do + command(:alter, @test_name, 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + assert_match(/XOR-2-1-1024k/, admin.describe(@test_name)) + end - define_test 'alter should be able to remove EC POLICY w/o table_att' do - command(:alter, @test_name, 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') - command(:alter, @test_name, 'ERASURE_CODING_POLICY' => nil) - assert_not_match(/ERASURE_CODING_POLICY/, admin.describe(@test_name)) + define_test 'alter should be able to remove EC POLICY w/o table_att' do + command(:alter, @test_name, 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + command(:alter, @test_name, 'ERASURE_CODING_POLICY' => nil) + assert_not_match(/ERASURE_CODING_POLICY/, admin.describe(@test_name)) + end end define_test "alter should be able to change coprocessor attributes" do From f2c25bfbeef5b969fecc23e76bf571362136e564 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Wed, 24 Jan 2024 14:35:20 -0500 Subject: [PATCH 4/5] add tests even with EC is not supported --- .../hadoop/hbase/client/TestAdminShell.java | 3 +- hbase-shell/src/test/ruby/hbase/admin_test.rb | 37 +++++++++++++++---- hbase-shell/src/test/ruby/test_helper.rb | 7 ++++ 3 files changed, 38 insertions(+), 9 deletions(-) diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java index 432e669cad4f..597ae903d1ce 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java @@ -61,6 +61,7 @@ public static void setUpBeforeClass() throws Exception { } // we'll use this extra variable to trigger some differences in the tests - setUpJRubyRuntime(Collections.singletonMap("@erasureCodingSupported", erasureCodingSupported)); + setUpJRubyRuntime( + Collections.singletonMap("$ERASURE_CODING_SUPPORTED", erasureCodingSupported)); } } diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index e372004ae7c7..cb488bd1caa8 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -516,6 +516,22 @@ def teardown assert_match(/'MERGE_ENABLED' => 'false'/, admin.describe(@create_test_name)) end + if $ERASURE_CODING_SUPPORTED + define_test "create should be able to set table EC options" do + drop_test_table(@create_test_name) + command(:create, @create_test_name, 'a', 'b', ERASURE_CODING_POLICY => 'XOR-2-1-1024k') + assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) + assert_match(/XOR-2-1-1024k/, admin.describe(@create_test_name)) + end + else + define_test "should raise error creating EC table" do + drop_test_table(@create_test_name) + assert_raise_message_contains "Cannot find specified method" do + command(:create, @create_test_name, 'a', 'b',ERASURE_CODING_POLICY => 'XOR-2-1-1024k') + end + end + end + #------------------------------------------------------------------------------- define_test "describe should fail for non-existent tables" do @@ -957,14 +973,7 @@ def teardown assert_match(/12345678/, admin.describe(@test_name)) end - if @erasureCodingSupported - define_test "create should be able to set table EC options" do - drop_test_table(@create_test_name) - command(:create, @create_test_name, 'a', 'b',ERASURE_CODING_POLICY => 'XOR-2-1-1024k') - assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) - assert_match(/XOR-2-1-1024k/, admin.describe(@create_test_name)) - end - + if $ERASURE_CODING_SUPPORTED define_test 'alter should be able to change EC policy' do command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') assert_match(/XOR-2-1-1024k/, admin.describe(@test_name)) @@ -986,6 +995,18 @@ def teardown command(:alter, @test_name, 'ERASURE_CODING_POLICY' => nil) assert_not_match(/ERASURE_CODING_POLICY/, admin.describe(@test_name)) end + else + define_test "should raise error changing EC policy" do + assert_raise_message_contains "Cannot find specified method" do + command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + end + end + + define_test "should raise error changing EC policy w/o table_att" do + assert_raise_message_contains "Cannot find specified method" do + command(:alter, @test_name, 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + end + end end define_test "alter should be able to change coprocessor attributes" do diff --git a/hbase-shell/src/test/ruby/test_helper.rb b/hbase-shell/src/test/ruby/test_helper.rb index db014f502787..741a994f3e1d 100644 --- a/hbase-shell/src/test/ruby/test_helper.rb +++ b/hbase-shell/src/test/ruby/test_helper.rb @@ -164,6 +164,13 @@ def capture_stdout $stdout = old_stdout end end + + def assert_raise_message_contains(message, &block) + error = assert_raise do + block.call + end + assert_match(eval("/" + message + "/"), error.message) + end end end From 7d797a7f4f6f6058778bf6df14a40ff56763673b Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Wed, 24 Jan 2024 15:53:10 -0500 Subject: [PATCH 5/5] checkstyle --- .../TestManageTableErasureCodingPolicy.java | 2 +- hbase-shell/src/test/ruby/hbase/admin_test.rb | 26 +++++++++++-------- hbase-shell/src/test/ruby/test_helper.rb | 2 +- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java index c778544d8781..d984764374fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestManageTableErasureCodingPolicy.java @@ -23,7 +23,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.nullValue; import static org.junit.Assert.assertThrows; -import static org.junit.Assume.*; +import static org.junit.Assume.assumeTrue; import java.io.IOException; import java.util.function.Function; diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index cb488bd1caa8..9e19de3dbb0d 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -517,16 +517,16 @@ def teardown end if $ERASURE_CODING_SUPPORTED - define_test "create should be able to set table EC options" do + define_test 'create should be able to set table EC options' do drop_test_table(@create_test_name) command(:create, @create_test_name, 'a', 'b', ERASURE_CODING_POLICY => 'XOR-2-1-1024k') assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) assert_match(/XOR-2-1-1024k/, admin.describe(@create_test_name)) end else - define_test "should raise error creating EC table" do + define_test 'should raise error creating EC table' do drop_test_table(@create_test_name) - assert_raise_message_contains "Cannot find specified method" do + assert_raise_message_contains 'Cannot find specified method' do command(:create, @create_test_name, 'a', 'b',ERASURE_CODING_POLICY => 'XOR-2-1-1024k') end end @@ -975,13 +975,16 @@ def teardown if $ERASURE_CODING_SUPPORTED define_test 'alter should be able to change EC policy' do - command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + command(:alter, @test_name, + METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') assert_match(/XOR-2-1-1024k/, admin.describe(@test_name)) end define_test 'alter should be able to remove EC policy' do - command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') - command(:alter, @test_name, METHOD => 'table_att_unset', NAME => 'ERASURE_CODING_POLICY') + command(:alter, @test_name, + METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + command(:alter, @test_name, + METHOD => 'table_att_unset', NAME => 'ERASURE_CODING_POLICY') assert_not_match(/ERASURE_CODING_POLICY/, admin.describe(@test_name)) end @@ -996,14 +999,15 @@ def teardown assert_not_match(/ERASURE_CODING_POLICY/, admin.describe(@test_name)) end else - define_test "should raise error changing EC policy" do - assert_raise_message_contains "Cannot find specified method" do - command(:alter, @test_name, METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') + define_test 'should raise error changing EC policy' do + assert_raise_message_contains 'Cannot find specified method' do + command(:alter, @test_name, + METHOD => 'table_att', 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') end end - define_test "should raise error changing EC policy w/o table_att" do - assert_raise_message_contains "Cannot find specified method" do + define_test 'should raise error changing EC policy w/o table_att' do + assert_raise_message_contains 'Cannot find specified method' do command(:alter, @test_name, 'ERASURE_CODING_POLICY' => 'XOR-2-1-1024k') end end diff --git a/hbase-shell/src/test/ruby/test_helper.rb b/hbase-shell/src/test/ruby/test_helper.rb index 741a994f3e1d..359ab2ee8ea5 100644 --- a/hbase-shell/src/test/ruby/test_helper.rb +++ b/hbase-shell/src/test/ruby/test_helper.rb @@ -169,7 +169,7 @@ def assert_raise_message_contains(message, &block) error = assert_raise do block.call end - assert_match(eval("/" + message + "/"), error.message) + assert_match(eval('/' + message + '/'), error.message) end end end